From 8a7d822e45e370e67b29cc00fb1ceb727aaba120 Mon Sep 17 00:00:00 2001 From: Avdhesh Kumar Date: Thu, 24 Aug 2023 16:14:57 +0530 Subject: [PATCH 001/143] Add custom tag --- .../app/BuildPipelineRestHandler.go | 1 + .../pipelineConfig/CdWorfkflowRepository.go | 1 + .../pipelineConfig/CiPipelineMaterial.go | 2 +- .../pipelineConfig/CiPipelineRepository.go | 38 +++++++++- .../pipelineConfig/CiWorkflowRepository.go | 11 +++ pkg/bean/app.go | 7 ++ pkg/pipeline/CiCdPipelineOrchestrator.go | 76 +++++++++++++++++++ pkg/pipeline/CiService.go | 56 +++++++++++++- pkg/pipeline/PipelineBuilder.go | 6 ++ scripts/sql/166_custom_image_tag.down.sql | 6 ++ scripts/sql/166_custom_image_tag.up.sql | 15 ++++ 11 files changed, 216 insertions(+), 3 deletions(-) create mode 100644 scripts/sql/166_custom_image_tag.down.sql create mode 100644 scripts/sql/166_custom_image_tag.up.sql diff --git a/api/restHandler/app/BuildPipelineRestHandler.go b/api/restHandler/app/BuildPipelineRestHandler.go index 2908cf13dc..338fd36024 100644 --- a/api/restHandler/app/BuildPipelineRestHandler.go +++ b/api/restHandler/app/BuildPipelineRestHandler.go @@ -1055,6 +1055,7 @@ func (handler PipelineConfigRestHandlerImpl) GetCIPipelineById(w http.ResponseWr common.WriteJsonResp(w, err, nil, http.StatusInternalServerError) return } + ciPipeline.DefaultTag = []string{"{git_hash}", "{ci_pipeline_id}", "{global_counter}"} common.WriteJsonResp(w, err, ciPipeline, http.StatusOK) } diff --git a/internal/sql/repository/pipelineConfig/CdWorfkflowRepository.go b/internal/sql/repository/pipelineConfig/CdWorfkflowRepository.go index 1e83fba28d..7c041973dc 100644 --- a/internal/sql/repository/pipelineConfig/CdWorfkflowRepository.go +++ b/internal/sql/repository/pipelineConfig/CdWorfkflowRepository.go @@ -202,6 +202,7 @@ type CdWorkflowWithArtifact struct { WorkflowType string `json:"workflow_type,omitempty"` ExecutorType string `json:"executor_type,omitempty"` BlobStorageEnabled bool `json:"blobStorageEnabled"` + TargetImageURL bool `json:"target_image_location"` GitTriggers map[int]GitCommit `json:"gitTriggers"` CiMaterials []CiPipelineMaterialResponse `json:"ciMaterials"` ImageReleaseTags []*repository2.ImageTag `json:"imageReleaseTags"` diff --git a/internal/sql/repository/pipelineConfig/CiPipelineMaterial.go b/internal/sql/repository/pipelineConfig/CiPipelineMaterial.go index 65ade20d6a..bf0312d249 100644 --- a/internal/sql/repository/pipelineConfig/CiPipelineMaterial.go +++ b/internal/sql/repository/pipelineConfig/CiPipelineMaterial.go @@ -83,7 +83,7 @@ func (impl CiPipelineMaterialRepositoryImpl) GetById(id int) (*CiPipelineMateria func (impl CiPipelineMaterialRepositoryImpl) GetByPipelineId(id int) ([]*CiPipelineMaterial, error) { var ciPipelineMaterials []*CiPipelineMaterial err := impl.dbConnection.Model(&ciPipelineMaterials). - Column("ci_pipeline_material.*", "CiPipeline", "CiPipeline.CiTemplate", "CiPipeline.CiTemplate.GitMaterial", "CiPipeline.App", "CiPipeline.CiTemplate.DockerRegistry", "CiPipeline.CiTemplate.CiBuildConfig", "GitMaterial", "GitMaterial.GitProvider"). + Column("ci_pipeline_material.*", "CiPipeline", "CiPipeline.CiTemplate", "CiPipeline.CiTemplate.GitMaterial", "CiPipeline.App", "CiPipeline.CiTemplate.DockerRegistry", "CiPipeline.CiTemplate.CiBuildConfig", "GitMaterial", "GitMaterial.GitProvider", "CiPipeline.CustomTagObject"). Where("ci_pipeline_material.ci_pipeline_id = ?", id). Where("ci_pipeline_material.active = ?", true). Where("ci_pipeline_material.type != ?", SOURCE_TYPE_BRANCH_REGEX). diff --git a/internal/sql/repository/pipelineConfig/CiPipelineRepository.go b/internal/sql/repository/pipelineConfig/CiPipelineRepository.go index d9a1e5666f..312645f1e0 100644 --- a/internal/sql/repository/pipelineConfig/CiPipelineRepository.go +++ b/internal/sql/repository/pipelineConfig/CiPipelineRepository.go @@ -47,6 +47,15 @@ type CiPipeline struct { sql.AuditLog CiPipelineMaterials []*CiPipelineMaterial CiTemplate *CiTemplate + CustomTagObject *CustomTagObject +} + +type CustomTagObject struct { + tableName struct{} `sql:"custom_tag" pg:",discard_unknown_columns"` + Id int `sql:"id,pk""` + CiPipelineId int `sql:"ci_pipeline_id" pg:",discard_unknown_columns"` + CustomTagFormat string `sql:"custom_tag_format" pg:",discard_unknown_columns"` + AutoIncreasingNumber int `sql:"auto_increasing_number" pg:",discard_unknown_columns"` } type CiEnvMapping struct { @@ -122,6 +131,11 @@ type CiPipelineRepository interface { FindAppIdsForCiPipelineIds(pipelineIds []int) (map[int]int, error) GetCiPipelineByArtifactId(artifactId int) (*CiPipeline, error) GetExternalCiPipelineByArtifactId(artifactId int) (*ExternalCiPipeline, error) + + InsertCustomTag(customTagObject *CustomTagObject, tx *pg.Tx) error + UpdateCustomTag(customTagObject *CustomTagObject, tx *pg.Tx) error + DeleteCustomTag(customTagObject *CustomTagObject, tx *pg.Tx) error + IncrementCustomTagCounter(customTagObjectId int) (*CustomTagObject, error) } type CiPipelineRepositoryImpl struct { dbConnection *pg.DB @@ -196,6 +210,28 @@ func (impl CiPipelineRepositoryImpl) MarkCiPipelineScriptsInactiveByCiPipelineId return nil } +func (impl CiPipelineRepositoryImpl) InsertCustomTag(customTagObject *CustomTagObject, tx *pg.Tx) error { + return tx.Insert(customTagObject) +} + +func (impl CiPipelineRepositoryImpl) DeleteCustomTag(customTagObject *CustomTagObject, tx *pg.Tx) error { + return tx.Delete(customTagObject) +} + +func (impl CiPipelineRepositoryImpl) UpdateCustomTag(customTagObject *CustomTagObject, tx *pg.Tx) error { + return tx.Update(customTagObject) +} + +func (impl CiPipelineRepositoryImpl) IncrementCustomTagCounter(customTagObjectId int) (*CustomTagObject, error) { + customTagObject := &CustomTagObject{} + query := `update custom_tag set auto_increasing_number=auto_increasing_number+1 where id=? returning id, ci_pipeline_id, custom_tag_format, auto_increasing_number` + _, err := impl.dbConnection.Query(customTagObject, query, customTagObjectId) + if err != nil { + return nil, err + } + return customTagObject, nil +} + func (impl CiPipelineRepositoryImpl) FindByAppId(appId int) (pipelines []*CiPipeline, err error) { err = impl.dbConnection.Model(&pipelines). Column("ci_pipeline.*", "CiPipelineMaterials", "CiPipelineMaterials.GitMaterial"). @@ -282,7 +318,7 @@ func (impl CiPipelineRepositoryImpl) SaveCiPipelineScript(ciPipelineScript *CiPi func (impl CiPipelineRepositoryImpl) FindById(id int) (pipeline *CiPipeline, err error) { pipeline = &CiPipeline{Id: id} err = impl.dbConnection.Model(pipeline). - Column("ci_pipeline.*", "App", "CiPipelineMaterials", "CiTemplate", "CiTemplate.DockerRegistry", "CiPipelineMaterials.GitMaterial"). + Column("ci_pipeline.*", "App", "CiPipelineMaterials", "CiTemplate", "CiTemplate.DockerRegistry", "CiPipelineMaterials.GitMaterial", "CustomTagObject"). Where("ci_pipeline.id= ?", id). Where("ci_pipeline.deleted =? ", false). Select() diff --git a/internal/sql/repository/pipelineConfig/CiWorkflowRepository.go b/internal/sql/repository/pipelineConfig/CiWorkflowRepository.go index ae4a373f09..150a20f1c0 100644 --- a/internal/sql/repository/pipelineConfig/CiWorkflowRepository.go +++ b/internal/sql/repository/pipelineConfig/CiWorkflowRepository.go @@ -43,6 +43,7 @@ type CiWorkflowRepository interface { ExistsByStatus(status string) (bool, error) FindBuildTypeAndStatusDataOfLast1Day() []*BuildTypeCount FIndCiWorkflowStatusesByAppId(appId int) ([]*CiWorkflowStatus, error) + FindWorkFlowsByTargetImage(targetImage string) ([]*CiWorkflow, error) } type CiWorkflowRepositoryImpl struct { @@ -69,6 +70,7 @@ type CiWorkflow struct { PodName string `sql:"pod_name"` CiBuildType string `sql:"ci_build_type"` EnvironmentId int `sql:"environment_id"` + TargetImage string `sql:"target_image_location"` CiPipeline *CiPipeline } @@ -144,6 +146,15 @@ func NewCiWorkflowRepositoryImpl(dbConnection *pg.DB, logger *zap.SugaredLogger) } } +func (impl *CiWorkflowRepositoryImpl) FindWorkFlowsByTargetImage(targetImage string) ([]*CiWorkflow, error) { + var ciWorkFlows []*CiWorkflow + err := impl.dbConnection.Model(&ciWorkFlows). + Column("ci_workflow.*"). + Where("ci_workflow.target_image_location = ?", targetImage). + Select() + return ciWorkFlows, err +} + func (impl *CiWorkflowRepositoryImpl) FindLastTriggeredWorkflow(pipelineId int) (ciWorkflow *CiWorkflow, err error) { workflow := &CiWorkflow{} err = impl.dbConnection.Model(workflow). diff --git a/pkg/bean/app.go b/pkg/bean/app.go index ffdc2e1458..535b93afbd 100644 --- a/pkg/bean/app.go +++ b/pkg/bean/app.go @@ -118,6 +118,8 @@ type CiPipeline struct { DockerConfigOverride DockerConfigOverride `json:"dockerConfigOverride,omitempty"` EnvironmentId int `json:"environmentId,omitempty"` LastTriggeredEnvId int `json:"lastTriggeredEnvId"` + CustomTagObject *CustomTagData `json:"customTagObject,omitempty"` + DefaultTag []string `json:"defaultTag,omitempty"` } type DockerConfigOverride struct { @@ -219,6 +221,11 @@ type CiMaterialPatchRequest struct { Source *SourceTypeConfig `json:"source" validate:"required"` } +type CustomTagData struct { + TagPattern string `json:"tagPattern"` + CounterX int `json:"counterX"` +} + type CiPatchRequest struct { CiPipeline *CiPipeline `json:"ciPipeline"` AppId int `json:"appId,omitempty"` diff --git a/pkg/pipeline/CiCdPipelineOrchestrator.go b/pkg/pipeline/CiCdPipelineOrchestrator.go index 912a0e95b3..7def0caf14 100644 --- a/pkg/pipeline/CiCdPipelineOrchestrator.go +++ b/pkg/pipeline/CiCdPipelineOrchestrator.go @@ -254,6 +254,37 @@ func (impl CiCdPipelineOrchestratorImpl) PatchMaterialValue(createRequest *bean. AuditLog: sql.AuditLog{UpdatedBy: userId, UpdatedOn: time.Now()}, } + if createRequest.CustomTagObject != nil { + err := validateCustomTagFormat(createRequest.CustomTagObject.TagPattern) + if err != nil { + return nil, err + } + if oldPipeline.CustomTagObject != nil { + ciPipelineObject.CustomTagObject = oldPipeline.CustomTagObject + ciPipelineObject.CustomTagObject.CustomTagFormat = createRequest.CustomTagObject.TagPattern + ciPipelineObject.CustomTagObject.AutoIncreasingNumber = createRequest.CustomTagObject.CounterX + err := impl.ciPipelineRepository.UpdateCustomTag(ciPipelineObject.CustomTagObject, tx) + if err != nil { + return nil, err + } + } else { + ciPipelineObject.CustomTagObject = &pipelineConfig.CustomTagObject{ + CiPipelineId: oldPipeline.Id, + CustomTagFormat: createRequest.CustomTagObject.TagPattern, + AutoIncreasingNumber: createRequest.CustomTagObject.CounterX, + } + err = impl.ciPipelineRepository.InsertCustomTag(ciPipelineObject.CustomTagObject, tx) + if err != nil { + return nil, err + } + } + } else { + err := impl.ciPipelineRepository.DeleteCustomTag(oldPipeline.CustomTagObject, tx) + if err != nil { + return nil, err + } + } + createOnTimeMap := make(map[int]time.Time) createByMap := make(map[int]int32) for _, oldMaterial := range oldPipeline.CiPipelineMaterials { @@ -665,6 +696,23 @@ func (impl CiCdPipelineOrchestratorImpl) CreateCiConf(createRequest *bean.CiConf impl.logger.Errorw("error in saving pipeline", "ciPipelineObject", ciPipelineObject, "err", err) return nil, err } + + if ciPipeline.CustomTagObject != nil { + err = validateCustomTagFormat(ciPipeline.CustomTagObject.TagPattern) + if err != nil { + return nil, err + } + ciPipelineObject.CustomTagObject = &pipelineConfig.CustomTagObject{ + CiPipelineId: ciPipeline.Id, + CustomTagFormat: ciPipeline.CustomTagObject.TagPattern, + AutoIncreasingNumber: ciPipeline.CustomTagObject.CounterX, + } + err := impl.ciPipelineRepository.InsertCustomTag(ciPipelineObject.CustomTagObject, tx) + if err != nil { + return nil, err + } + } + if createRequest.IsJob { CiEnvMapping := &pipelineConfig.CiEnvMapping{ CiPipelineId: ciPipeline.Id, @@ -832,6 +880,34 @@ func (impl CiCdPipelineOrchestratorImpl) CreateCiConf(createRequest *bean.CiConf return createRequest, nil } +func ValidateTag(imageTag string) error { + if len(imageTag) == 0 || len(imageTag) > 128 { + return fmt.Errorf("image tag should be of len 1-128 only, imageTag: %s", imageTag) + } + allowedSymbols := ".abcdefghijklmnopqrstuvwxyz_ABCDEFGHIJKLMNOPQRSTUVWXYZ-0987654321" + allowedCharSet := make(map[int32]struct{}) + for _, c := range allowedSymbols { + allowedCharSet[c] = struct{}{} + } + firstChar := imageTag[0:1] + if firstChar == "." || firstChar == "-" { + fmt.Errorf("image tag can not start with a period or a hyphen, imageTag: %s", imageTag) + } + return nil +} + +func validateCustomTagFormat(customTagPattern string) error { + allowedVariables := []string{"{x}", "{X}"} + totalX := 0 + for _, variable := range allowedVariables { + totalX += strings.Count(customTagPattern, variable) + } + if totalX != 1 { + return fmt.Errorf("variable {x} is allowed exactly once") + } + return nil +} + func (impl CiCdPipelineOrchestratorImpl) BuildCiPipelineScript(userId int32, ciScript *bean.CiScript, scriptStage string, ciPipeline *bean.CiPipeline) *pipelineConfig.CiPipelineScript { ciPipelineScript := &pipelineConfig.CiPipelineScript{ Name: ciScript.Name, diff --git a/pkg/pipeline/CiService.go b/pkg/pipeline/CiService.go index d900349597..c84a2ad6ff 100644 --- a/pkg/pipeline/CiService.go +++ b/pkg/pipeline/CiService.go @@ -172,6 +172,14 @@ func (impl *CiServiceImpl) TriggerCiPipeline(trigger Trigger) (int, error) { return 0, err } + savedCiWf.TargetImage = workflowRequest.DockerRegistryURL + "/" + workflowRequest.DockerRepository + ":" + workflowRequest.DockerImageTag + tagUsedStatuses := []string{pipelineConfig.WorkflowSucceeded} + tagReleasedStatuses := []string{pipelineConfig.WorkflowFailed, pipelineConfig.WorkflowAborted, string(v1alpha1.NodeError)} + err = impl.CanTargetImagePathBeReused(savedCiWf.TargetImage, tagReleasedStatuses, tagUsedStatuses) + if err != nil { + return 0, err + } + if impl.ciConfig != nil && impl.ciConfig.BuildxK8sDriverOptions != "" { err = impl.setBuildxK8sDriverData(workflowRequest) if err != nil { @@ -450,7 +458,27 @@ func (impl *CiServiceImpl) buildWfRequestForCiPipeline(pipeline *pipelineConfig. return nil, err } } - dockerImageTag := impl.buildImageTag(commitHashes, pipeline.Id, savedWf.Id) + + var dockerImageTag string + if pipeline.CustomTagObject != nil { + customTagObjectLatest, err := impl.ciPipelineRepository.IncrementCustomTagCounter(pipeline.CustomTagObject.Id) + if err != nil { + return nil, err + } + err = validateCustomTagFormat(dockerImageTag) + if err != nil { + return nil, err + } + pipeline.CustomTagObject = customTagObjectLatest + dockerImageTag = strings.ReplaceAll(customTagObjectLatest.CustomTagFormat, "{x}", strconv.Itoa(customTagObjectLatest.AutoIncreasingNumber-1)) //-1 because number is already incremented, current value will be used next time + err = ValidateTag(dockerImageTag) + if err != nil { + return nil, err + } + } else { + dockerImageTag = impl.buildImageTag(commitHashes, pipeline.Id, savedWf.Id) + } + if ciWorkflowConfig.CiCacheBucket == "" { ciWorkflowConfig.CiCacheBucket = impl.ciConfig.DefaultCacheBucket } @@ -770,3 +798,29 @@ func _getTruncatedImageTag(imageTag string) string { } } + +func (impl *CiServiceImpl) CanTargetImagePathBeReused(targetImageURL string, tagUsedStatuses []string, tagReleasedStatuses []string) error { + allWfs, err := impl.ciWorkflowRepository.FindWorkFlowsByTargetImage(targetImageURL) + if err != nil && err != pg.ErrNoRows { + return err + } + for _, wf := range allWfs { + if arrayContains(tagUsedStatuses, wf.Status) { + return fmt.Errorf("image path is already used") + } else if arrayContains(tagReleasedStatuses, wf.Status) { + continue + } else { + return fmt.Errorf("image path tag is reserved") + } + } + return nil +} + +func arrayContains(arr []string, str string) bool { + for _, s := range arr { + if s == str { + return true + } + } + return false +} diff --git a/pkg/pipeline/PipelineBuilder.go b/pkg/pipeline/PipelineBuilder.go index f7c18c2f1e..48a28faeb7 100644 --- a/pkg/pipeline/PipelineBuilder.go +++ b/pkg/pipeline/PipelineBuilder.go @@ -4151,6 +4151,12 @@ func (impl *PipelineBuilderImpl) GetCiPipelineById(pipelineId int) (ciPipeline * ScanEnabled: pipeline.ScanEnabled, IsDockerConfigOverridden: pipeline.IsDockerConfigOverridden, } + if pipeline.CustomTagObject != nil { + ciPipeline.CustomTagObject = &bean.CustomTagData{ + TagPattern: pipeline.CustomTagObject.CustomTagFormat, + CounterX: pipeline.CustomTagObject.AutoIncreasingNumber, + } + } ciEnvMapping, err := impl.ciPipelineRepository.FindCiEnvMappingByCiPipelineId(pipelineId) if err != nil && err != pg.ErrNoRows { impl.logger.Errorw("error in fetching ci env mapping", "pipelineId", pipelineId, "err", err) diff --git a/scripts/sql/166_custom_image_tag.down.sql b/scripts/sql/166_custom_image_tag.down.sql new file mode 100644 index 0000000000..38919016af --- /dev/null +++ b/scripts/sql/166_custom_image_tag.down.sql @@ -0,0 +1,6 @@ +ALTER TABLE ci_workflow + DROP COLUMN IF EXISTS target_image_location; + +DROP INDEX IF EXISTS target_image_path; + +DROP TABLE IF EXISTS custom_tag; diff --git a/scripts/sql/166_custom_image_tag.up.sql b/scripts/sql/166_custom_image_tag.up.sql new file mode 100644 index 0000000000..aa099e7734 --- /dev/null +++ b/scripts/sql/166_custom_image_tag.up.sql @@ -0,0 +1,15 @@ + +ALTER TABLE ci_workflow + ADD COLUMN IF NOT EXISTS target_image_location text default null; +CREATE INDEX IF NOT EXISTS target_image_path ON ci_workflow (target_image_location); + + +CREATE TABLE "public"."custom_tag" +( + id serial PRIMARY KEY, + ci_pipeline_id int NOT NULL UNIQUE, + custom_tag_format text, + auto_increasing_number int DEFAULT 0, + + FOREIGN KEY (ci_pipeline_id) REFERENCES ci_pipeline (id) +); \ No newline at end of file From 127ad00198496c7d9f4e505f14893d5b2c4157ba Mon Sep 17 00:00:00 2001 From: Avdhesh Kumar Date: Thu, 24 Aug 2023 17:12:59 +0530 Subject: [PATCH 002/143] Refactor handle concurrency --- .../pipelineConfig/CiWorkflowRepository.go | 29 +++++++++++++++++-- pkg/pipeline/CiService.go | 19 +++++------- 2 files changed, 34 insertions(+), 14 deletions(-) diff --git a/internal/sql/repository/pipelineConfig/CiWorkflowRepository.go b/internal/sql/repository/pipelineConfig/CiWorkflowRepository.go index 150a20f1c0..f10e31bf66 100644 --- a/internal/sql/repository/pipelineConfig/CiWorkflowRepository.go +++ b/internal/sql/repository/pipelineConfig/CiWorkflowRepository.go @@ -43,7 +43,8 @@ type CiWorkflowRepository interface { ExistsByStatus(status string) (bool, error) FindBuildTypeAndStatusDataOfLast1Day() []*BuildTypeCount FIndCiWorkflowStatusesByAppId(appId int) ([]*CiWorkflowStatus, error) - FindWorkFlowsByTargetImage(targetImage string) ([]*CiWorkflow, error) + FindWorkFlowsByTargetImage(targetImage string, tx *pg.Tx) ([]*CiWorkflow, error) + UpdateWorkFlowWithValidation(wf *CiWorkflow, fn func(tx *pg.Tx) error) error } type CiWorkflowRepositoryImpl struct { @@ -146,9 +147,9 @@ func NewCiWorkflowRepositoryImpl(dbConnection *pg.DB, logger *zap.SugaredLogger) } } -func (impl *CiWorkflowRepositoryImpl) FindWorkFlowsByTargetImage(targetImage string) ([]*CiWorkflow, error) { +func (impl *CiWorkflowRepositoryImpl) FindWorkFlowsByTargetImage(targetImage string, tx *pg.Tx) ([]*CiWorkflow, error) { var ciWorkFlows []*CiWorkflow - err := impl.dbConnection.Model(&ciWorkFlows). + err := tx.Model(&ciWorkFlows). Column("ci_workflow.*"). Where("ci_workflow.target_image_location = ?", targetImage). Select() @@ -229,6 +230,28 @@ func (impl *CiWorkflowRepositoryImpl) SaveWorkFlow(wf *CiWorkflow) error { return err } +func (impl *CiWorkflowRepositoryImpl) UpdateWorkFlowWithValidation(wf *CiWorkflow, fn func(tx *pg.Tx) error) error { + connection := impl.dbConnection + tx, err := connection.Begin() + defer tx.Rollback() + if err != nil { + return err + } + err = fn(tx) + if err != nil { + return err + } + err = tx.Update(wf) + if err != nil { + return err + } + err = tx.Commit() + if err != nil { + return err + } + return nil +} + func (impl *CiWorkflowRepositoryImpl) UpdateWorkFlow(wf *CiWorkflow) error { err := impl.dbConnection.Update(wf) return err diff --git a/pkg/pipeline/CiService.go b/pkg/pipeline/CiService.go index c84a2ad6ff..387261c284 100644 --- a/pkg/pipeline/CiService.go +++ b/pkg/pipeline/CiService.go @@ -172,14 +172,6 @@ func (impl *CiServiceImpl) TriggerCiPipeline(trigger Trigger) (int, error) { return 0, err } - savedCiWf.TargetImage = workflowRequest.DockerRegistryURL + "/" + workflowRequest.DockerRepository + ":" + workflowRequest.DockerImageTag - tagUsedStatuses := []string{pipelineConfig.WorkflowSucceeded} - tagReleasedStatuses := []string{pipelineConfig.WorkflowFailed, pipelineConfig.WorkflowAborted, string(v1alpha1.NodeError)} - err = impl.CanTargetImagePathBeReused(savedCiWf.TargetImage, tagReleasedStatuses, tagUsedStatuses) - if err != nil { - return 0, err - } - if impl.ciConfig != nil && impl.ciConfig.BuildxK8sDriverOptions != "" { err = impl.setBuildxK8sDriverData(workflowRequest) if err != nil { @@ -780,7 +772,12 @@ func (impl *CiServiceImpl) updateCiWorkflow(request *WorkflowRequest, savedWf *p ciBuildConfig := request.CiBuildConfig ciBuildType := string(ciBuildConfig.CiBuildType) savedWf.CiBuildType = ciBuildType - return impl.ciWorkflowRepository.UpdateWorkFlow(savedWf) + savedWf.TargetImage = request.DockerRegistryURL + "/" + request.DockerRepository + ":" + request.DockerImageTag + tagUsedStatuses := []string{pipelineConfig.WorkflowSucceeded} + tagReleasedStatuses := []string{pipelineConfig.WorkflowFailed, pipelineConfig.WorkflowAborted, string(v1alpha1.NodeError)} + return impl.ciWorkflowRepository.UpdateWorkFlowWithValidation(savedWf, func(tx *pg.Tx) error { + return impl.CanTargetImagePathBeReused(savedWf.TargetImage, tagReleasedStatuses, tagUsedStatuses, tx) + }) } func _getTruncatedImageTag(imageTag string) string { @@ -799,8 +796,8 @@ func _getTruncatedImageTag(imageTag string) string { } -func (impl *CiServiceImpl) CanTargetImagePathBeReused(targetImageURL string, tagUsedStatuses []string, tagReleasedStatuses []string) error { - allWfs, err := impl.ciWorkflowRepository.FindWorkFlowsByTargetImage(targetImageURL) +func (impl *CiServiceImpl) CanTargetImagePathBeReused(targetImageURL string, tagUsedStatuses []string, tagReleasedStatuses []string, tx *pg.Tx) error { + allWfs, err := impl.ciWorkflowRepository.FindWorkFlowsByTargetImage(targetImageURL, tx) if err != nil && err != pg.ErrNoRows { return err } From d5c610216c23231b447bed6d07d1da22a2222816 Mon Sep 17 00:00:00 2001 From: Avdhesh Kumar Date: Thu, 24 Aug 2023 18:13:39 +0530 Subject: [PATCH 003/143] Refactor --- .../pipelineConfig/CiWorkflowRepository.go | 85 ++++++++++--------- pkg/pipeline/CiService.go | 4 +- 2 files changed, 45 insertions(+), 44 deletions(-) diff --git a/internal/sql/repository/pipelineConfig/CiWorkflowRepository.go b/internal/sql/repository/pipelineConfig/CiWorkflowRepository.go index f10e31bf66..12ec4756ca 100644 --- a/internal/sql/repository/pipelineConfig/CiWorkflowRepository.go +++ b/internal/sql/repository/pipelineConfig/CiWorkflowRepository.go @@ -53,51 +53,52 @@ type CiWorkflowRepositoryImpl struct { } type CiWorkflow struct { - tableName struct{} `sql:"ci_workflow" pg:",discard_unknown_columns"` - Id int `sql:"id,pk"` - Name string `sql:"name"` - Status string `sql:"status"` - PodStatus string `sql:"pod_status"` - Message string `sql:"message"` - StartedOn time.Time `sql:"started_on"` - FinishedOn time.Time `sql:"finished_on"` - CiPipelineId int `sql:"ci_pipeline_id"` - Namespace string `sql:"namespace"` - BlobStorageEnabled bool `sql:"blob_storage_enabled,notnull"` - LogLocation string `sql:"log_file_path"` - GitTriggers map[int]GitCommit `sql:"git_triggers"` - TriggeredBy int32 `sql:"triggered_by"` - CiArtifactLocation string `sql:"ci_artifact_location"` - PodName string `sql:"pod_name"` - CiBuildType string `sql:"ci_build_type"` - EnvironmentId int `sql:"environment_id"` - TargetImage string `sql:"target_image_location"` - CiPipeline *CiPipeline + tableName struct{} `sql:"ci_workflow" pg:",discard_unknown_columns"` + Id int `sql:"id,pk"` + Name string `sql:"name"` + Status string `sql:"status"` + PodStatus string `sql:"pod_status"` + Message string `sql:"message"` + StartedOn time.Time `sql:"started_on"` + FinishedOn time.Time `sql:"finished_on"` + CiPipelineId int `sql:"ci_pipeline_id"` + Namespace string `sql:"namespace"` + BlobStorageEnabled bool `sql:"blob_storage_enabled,notnull"` + LogLocation string `sql:"log_file_path"` + GitTriggers map[int]GitCommit `sql:"git_triggers"` + TriggeredBy int32 `sql:"triggered_by"` + CiArtifactLocation string `sql:"ci_artifact_location"` + PodName string `sql:"pod_name"` + CiBuildType string `sql:"ci_build_type"` + EnvironmentId int `sql:"environment_id"` + TargetImageLocation string `sql:"target_image_location"` + CiPipeline *CiPipeline } type WorkflowWithArtifact struct { - Id int `json:"id"` - Name string `json:"name"` - PodName string `json:"podName"` - Status string `json:"status"` - PodStatus string `json:"pod_status"` - Message string `json:"message"` - StartedOn time.Time `json:"started_on"` - FinishedOn time.Time `json:"finished_on"` - CiPipelineId int `json:"ci_pipeline_id"` - Namespace string `json:"namespace"` - LogFilePath string `json:"log_file_path"` - GitTriggers map[int]GitCommit `json:"git_triggers"` - TriggeredBy int32 `json:"triggered_by"` - EmailId string `json:"email_id"` - Image string `json:"image"` - CiArtifactLocation string `json:"ci_artifact_location"` - CiArtifactId int `json:"ci_artifact_d"` - BlobStorageEnabled bool `json:"blobStorageEnabled"` - CiBuildType string `json:"ci_build_type"` - IsArtifactUploaded bool `json:"is_artifact_uploaded"` - EnvironmentId int `json:"environmentId"` - EnvironmentName string `json:"environmentName"` + Id int `json:"id"` + Name string `json:"name"` + PodName string `json:"podName"` + Status string `json:"status"` + PodStatus string `json:"pod_status"` + Message string `json:"message"` + StartedOn time.Time `json:"started_on"` + FinishedOn time.Time `json:"finished_on"` + CiPipelineId int `json:"ci_pipeline_id"` + Namespace string `json:"namespace"` + LogFilePath string `json:"log_file_path"` + GitTriggers map[int]GitCommit `json:"git_triggers"` + TriggeredBy int32 `json:"triggered_by"` + EmailId string `json:"email_id"` + Image string `json:"image"` + CiArtifactLocation string `json:"ci_artifact_location"` + CiArtifactId int `json:"ci_artifact_d"` + BlobStorageEnabled bool `json:"blobStorageEnabled"` + CiBuildType string `json:"ci_build_type"` + IsArtifactUploaded bool `json:"is_artifact_uploaded"` + EnvironmentId int `json:"environmentId"` + EnvironmentName string `json:"environmentName"` + TargetImageLocation string `json:"targetImageLocation"` } type GitCommit struct { diff --git a/pkg/pipeline/CiService.go b/pkg/pipeline/CiService.go index 387261c284..9fadeab38e 100644 --- a/pkg/pipeline/CiService.go +++ b/pkg/pipeline/CiService.go @@ -772,11 +772,11 @@ func (impl *CiServiceImpl) updateCiWorkflow(request *WorkflowRequest, savedWf *p ciBuildConfig := request.CiBuildConfig ciBuildType := string(ciBuildConfig.CiBuildType) savedWf.CiBuildType = ciBuildType - savedWf.TargetImage = request.DockerRegistryURL + "/" + request.DockerRepository + ":" + request.DockerImageTag + savedWf.TargetImageLocation = request.DockerRegistryURL + "/" + request.DockerRepository + ":" + request.DockerImageTag tagUsedStatuses := []string{pipelineConfig.WorkflowSucceeded} tagReleasedStatuses := []string{pipelineConfig.WorkflowFailed, pipelineConfig.WorkflowAborted, string(v1alpha1.NodeError)} return impl.ciWorkflowRepository.UpdateWorkFlowWithValidation(savedWf, func(tx *pg.Tx) error { - return impl.CanTargetImagePathBeReused(savedWf.TargetImage, tagReleasedStatuses, tagUsedStatuses, tx) + return impl.CanTargetImagePathBeReused(savedWf.TargetImageLocation, tagReleasedStatuses, tagUsedStatuses, tx) }) } From c55939341a931368efbf761bbe9b3d4377fbfaa7 Mon Sep 17 00:00:00 2001 From: Avdhesh Kumar Date: Fri, 25 Aug 2023 09:59:18 +0530 Subject: [PATCH 004/143] Update workflow when trigger failes due to tag conflict --- pkg/pipeline/CiService.go | 16 +++++++++++++--- 1 file changed, 13 insertions(+), 3 deletions(-) diff --git a/pkg/pipeline/CiService.go b/pkg/pipeline/CiService.go index 9fadeab38e..417cd4a4fe 100644 --- a/pkg/pipeline/CiService.go +++ b/pkg/pipeline/CiService.go @@ -72,6 +72,10 @@ type CiServiceImpl struct { appRepository appRepository.AppRepository } +var ( + ImagePathUnavailable = fmt.Errorf("image path tag is reserved/reserved") +) + func NewCiServiceImpl(Logger *zap.SugaredLogger, workflowService WorkflowService, ciPipelineMaterialRepository pipelineConfig.CiPipelineMaterialRepository, ciWorkflowRepository pipelineConfig.CiWorkflowRepository, ciConfig *CiConfig, eventClient client.EventClient, @@ -775,9 +779,15 @@ func (impl *CiServiceImpl) updateCiWorkflow(request *WorkflowRequest, savedWf *p savedWf.TargetImageLocation = request.DockerRegistryURL + "/" + request.DockerRepository + ":" + request.DockerImageTag tagUsedStatuses := []string{pipelineConfig.WorkflowSucceeded} tagReleasedStatuses := []string{pipelineConfig.WorkflowFailed, pipelineConfig.WorkflowAborted, string(v1alpha1.NodeError)} - return impl.ciWorkflowRepository.UpdateWorkFlowWithValidation(savedWf, func(tx *pg.Tx) error { + err := impl.ciWorkflowRepository.UpdateWorkFlowWithValidation(savedWf, func(tx *pg.Tx) error { return impl.CanTargetImagePathBeReused(savedWf.TargetImageLocation, tagReleasedStatuses, tagUsedStatuses, tx) }) + if err == ImagePathUnavailable { + savedWf.Status = pipelineConfig.WorkflowAborted + savedWf.Message = err.Error() + return impl.ciWorkflowRepository.UpdateWorkFlow(savedWf) + } + return nil } func _getTruncatedImageTag(imageTag string) string { @@ -803,11 +813,11 @@ func (impl *CiServiceImpl) CanTargetImagePathBeReused(targetImageURL string, tag } for _, wf := range allWfs { if arrayContains(tagUsedStatuses, wf.Status) { - return fmt.Errorf("image path is already used") + return ImagePathUnavailable } else if arrayContains(tagReleasedStatuses, wf.Status) { continue } else { - return fmt.Errorf("image path tag is reserved") + return ImagePathUnavailable } } return nil From 3dfde7a45ba13a02221436fe7f9cff67e2508555 Mon Sep 17 00:00:00 2001 From: Avdhesh Kumar Date: Fri, 25 Aug 2023 12:04:56 +0530 Subject: [PATCH 005/143] Fix error --- pkg/bean/app.go | 2 +- pkg/pipeline/CiCdPipelineOrchestrator.go | 8 +++++--- 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/pkg/bean/app.go b/pkg/bean/app.go index 535b93afbd..488496961c 100644 --- a/pkg/bean/app.go +++ b/pkg/bean/app.go @@ -118,7 +118,7 @@ type CiPipeline struct { DockerConfigOverride DockerConfigOverride `json:"dockerConfigOverride,omitempty"` EnvironmentId int `json:"environmentId,omitempty"` LastTriggeredEnvId int `json:"lastTriggeredEnvId"` - CustomTagObject *CustomTagData `json:"customTagObject,omitempty"` + CustomTagObject *CustomTagData `json:"customTag,omitempty"` DefaultTag []string `json:"defaultTag,omitempty"` } diff --git a/pkg/pipeline/CiCdPipelineOrchestrator.go b/pkg/pipeline/CiCdPipelineOrchestrator.go index 7def0caf14..c580aee7a7 100644 --- a/pkg/pipeline/CiCdPipelineOrchestrator.go +++ b/pkg/pipeline/CiCdPipelineOrchestrator.go @@ -279,9 +279,11 @@ func (impl CiCdPipelineOrchestratorImpl) PatchMaterialValue(createRequest *bean. } } } else { - err := impl.ciPipelineRepository.DeleteCustomTag(oldPipeline.CustomTagObject, tx) - if err != nil { - return nil, err + if oldPipeline.CustomTagObject != nil { + err := impl.ciPipelineRepository.DeleteCustomTag(oldPipeline.CustomTagObject, tx) + if err != nil { + return nil, err + } } } From 2768ea559b61cf5a5a071c1f2c0c8cbae33d3ddb Mon Sep 17 00:00:00 2001 From: Avdhesh Kumar Date: Fri, 25 Aug 2023 17:13:50 +0530 Subject: [PATCH 006/143] temp --- internal/sql/repository/ImageTagRepository.go | 18 ++++++++++++++++++ pkg/pipeline/CiService.go | 1 - .../167_custom_image_tag_generalize.down.sql | 0 .../sql/167_custom_image_tag_generalize.up.sql | 8 ++++++++ 4 files changed, 26 insertions(+), 1 deletion(-) create mode 100644 internal/sql/repository/ImageTagRepository.go create mode 100644 scripts/sql/167_custom_image_tag_generalize.down.sql create mode 100644 scripts/sql/167_custom_image_tag_generalize.up.sql diff --git a/internal/sql/repository/ImageTagRepository.go b/internal/sql/repository/ImageTagRepository.go new file mode 100644 index 0000000000..b38dea8e72 --- /dev/null +++ b/internal/sql/repository/ImageTagRepository.go @@ -0,0 +1,18 @@ +package repository + +import ( + "github.com/go-pg/pg" + "go.uber.org/zap" +) + +type ImageTagRepository interface { +} + +type ImageTagRepositoryImpl struct { + dbConnection *pg.DB + logger *zap.SugaredLogger +} + +func NewImageTagRepository(dbConnection *pg.DB, logger *zap.SugaredLogger) *ImageTagRepositoryImpl { + return &ImageTagRepositoryImpl{dbConnection: dbConnection, logger: logger} +} diff --git a/pkg/pipeline/CiService.go b/pkg/pipeline/CiService.go index 417cd4a4fe..8f8abd434d 100644 --- a/pkg/pipeline/CiService.go +++ b/pkg/pipeline/CiService.go @@ -803,7 +803,6 @@ func _getTruncatedImageTag(imageTag string) string { } else { return imageTag[:_truncatedLength] } - } func (impl *CiServiceImpl) CanTargetImagePathBeReused(targetImageURL string, tagUsedStatuses []string, tagReleasedStatuses []string, tx *pg.Tx) error { diff --git a/scripts/sql/167_custom_image_tag_generalize.down.sql b/scripts/sql/167_custom_image_tag_generalize.down.sql new file mode 100644 index 0000000000..e69de29bb2 diff --git a/scripts/sql/167_custom_image_tag_generalize.up.sql b/scripts/sql/167_custom_image_tag_generalize.up.sql new file mode 100644 index 0000000000..b4cad1cf18 --- /dev/null +++ b/scripts/sql/167_custom_image_tag_generalize.up.sql @@ -0,0 +1,8 @@ +ALTER TABLE custom_tag + ADD COLUMN entity_key varchar(30); +ALTER TABLE custom_tag + ADD COLUMN entity_value varchar(100); + +ALTER TABLE custom_tag + ADD COLUMN metadata jsonb; + From e3da3b87471f53c4c7ff9184e33e08fa42dcb7cc Mon Sep 17 00:00:00 2001 From: Avdhesh Kumar Date: Fri, 25 Aug 2023 20:05:39 +0530 Subject: [PATCH 007/143] Build history failure case --- .../pipelineConfig/CiPipelineRepository.go | 9 +++++++++ pkg/pipeline/CiHandler.go | 11 +++++++++++ pkg/pipeline/CiService.go | 16 ++++++++++------ 3 files changed, 30 insertions(+), 6 deletions(-) diff --git a/internal/sql/repository/pipelineConfig/CiPipelineRepository.go b/internal/sql/repository/pipelineConfig/CiPipelineRepository.go index 312645f1e0..815e0025da 100644 --- a/internal/sql/repository/pipelineConfig/CiPipelineRepository.go +++ b/internal/sql/repository/pipelineConfig/CiPipelineRepository.go @@ -132,6 +132,7 @@ type CiPipelineRepository interface { GetCiPipelineByArtifactId(artifactId int) (*CiPipeline, error) GetExternalCiPipelineByArtifactId(artifactId int) (*ExternalCiPipeline, error) + GetCustomTagByCiPipelineId(ciPipelineId int) (*CustomTagObject, error) InsertCustomTag(customTagObject *CustomTagObject, tx *pg.Tx) error UpdateCustomTag(customTagObject *CustomTagObject, tx *pg.Tx) error DeleteCustomTag(customTagObject *CustomTagObject, tx *pg.Tx) error @@ -149,6 +150,14 @@ func NewCiPipelineRepositoryImpl(dbConnection *pg.DB, logger *zap.SugaredLogger) } } +func (impl CiPipelineRepositoryImpl) GetCustomTagByCiPipelineId(ciPipelineId int) (*CustomTagObject, error) { + var customTag CustomTagObject + err := impl.dbConnection.Model(&customTag). + Where("ci_pipeline_id = ?", ciPipelineId). + Select() + return &customTag, err +} + func (impl CiPipelineRepositoryImpl) FindByParentCiPipelineId(parentCiPipelineId int) ([]*CiPipeline, error) { var ciPipelines []*CiPipeline err := impl.dbConnection.Model(&ciPipelines). diff --git a/pkg/pipeline/CiHandler.go b/pkg/pipeline/CiHandler.go index dca4644afd..50f82cd101 100644 --- a/pkg/pipeline/CiHandler.go +++ b/pkg/pipeline/CiHandler.go @@ -157,6 +157,7 @@ type WorkflowResponse struct { EnvironmentName string `json:"environmentName"` ImageReleaseTags []*repository2.ImageTag `json:"imageReleaseTags"` ImageComment *repository2.ImageComment `json:"imageComment"` + CustomTag *bean.CustomTagData `json:"customTag,omitempty"` } type GitTriggerInfoResponse struct { @@ -501,6 +502,16 @@ func (impl *CiHandlerImpl) GetBuildHistory(pipelineId int, appId int, offset int EnvironmentId: w.EnvironmentId, EnvironmentName: w.EnvironmentName, } + if w.Message == ImageTagUnavailableMessage { + customTag, err := impl.ciPipelineRepository.GetCustomTagByCiPipelineId(w.CiPipelineId) + if err != nil { + return nil, err + } + wfResponse.CustomTag = &bean.CustomTagData{ + TagPattern: customTag.CustomTagFormat, + CounterX: customTag.AutoIncreasingNumber, + } + } if imageTagsDataMap[w.CiArtifactId] != nil { wfResponse.ImageReleaseTags = imageTagsDataMap[w.CiArtifactId] //if artifact is not yet created,empty list will be sent } diff --git a/pkg/pipeline/CiService.go b/pkg/pipeline/CiService.go index 417cd4a4fe..f0494ff8d6 100644 --- a/pkg/pipeline/CiService.go +++ b/pkg/pipeline/CiService.go @@ -73,7 +73,11 @@ type CiServiceImpl struct { } var ( - ImagePathUnavailable = fmt.Errorf("image path tag is reserved/reserved") + ErrImagePathUnavailable = fmt.Errorf("image path tag is reserved/reserved") +) + +const ( + ImageTagUnavailableMessage = "Desired image tag already exists" ) func NewCiServiceImpl(Logger *zap.SugaredLogger, workflowService WorkflowService, @@ -782,9 +786,9 @@ func (impl *CiServiceImpl) updateCiWorkflow(request *WorkflowRequest, savedWf *p err := impl.ciWorkflowRepository.UpdateWorkFlowWithValidation(savedWf, func(tx *pg.Tx) error { return impl.CanTargetImagePathBeReused(savedWf.TargetImageLocation, tagReleasedStatuses, tagUsedStatuses, tx) }) - if err == ImagePathUnavailable { - savedWf.Status = pipelineConfig.WorkflowAborted - savedWf.Message = err.Error() + if err == ErrImagePathUnavailable { + savedWf.Status = pipelineConfig.WorkflowFailed + savedWf.Message = ImageTagUnavailableMessage return impl.ciWorkflowRepository.UpdateWorkFlow(savedWf) } return nil @@ -813,11 +817,11 @@ func (impl *CiServiceImpl) CanTargetImagePathBeReused(targetImageURL string, tag } for _, wf := range allWfs { if arrayContains(tagUsedStatuses, wf.Status) { - return ImagePathUnavailable + return ErrImagePathUnavailable } else if arrayContains(tagReleasedStatuses, wf.Status) { continue } else { - return ImagePathUnavailable + return ErrImagePathUnavailable } } return nil From edd2d54c413733a094c910a93deeb3f32fc37f84 Mon Sep 17 00:00:00 2001 From: Avdhesh Kumar Date: Mon, 28 Aug 2023 17:57:14 +0530 Subject: [PATCH 008/143] Custom tag logic extracted --- Wire.go | 7 + api/bean/CustomTag.go | 16 ++ .../sql/repository/CustomTagRepository.go | 89 +++++++++++ internal/sql/repository/ImageTagRepository.go | 18 --- .../pipelineConfig/CdWorfkflowRepository.go | 1 - .../pipelineConfig/CiPipelineMaterial.go | 2 +- .../pipelineConfig/CiPipelineRepository.go | 47 +----- .../pipelineConfig/CiWorkflowRepository.go | 119 +++++--------- pkg/CustomTagService.go | 150 ++++++++++++++++++ pkg/pipeline/CiCdPipelineOrchestrator.go | 85 +++------- pkg/pipeline/CiHandler.go | 18 ++- pkg/pipeline/CiService.go | 77 +++------ .../167_custom_image_tag_generalize.up.sql | 7 +- scripts/sql/168_image_path.down.sql | 0 scripts/sql/168_image_path.up.sql | 12 ++ wire_gen.go | 9 +- 16 files changed, 384 insertions(+), 273 deletions(-) create mode 100644 api/bean/CustomTag.go create mode 100644 internal/sql/repository/CustomTagRepository.go delete mode 100644 internal/sql/repository/ImageTagRepository.go create mode 100644 pkg/CustomTagService.go create mode 100644 scripts/sql/168_image_path.down.sql create mode 100644 scripts/sql/168_image_path.up.sql diff --git a/Wire.go b/Wire.go index 713a887aa7..00cdbbc218 100644 --- a/Wire.go +++ b/Wire.go @@ -75,6 +75,7 @@ import ( security2 "github.com/devtron-labs/devtron/internal/sql/repository/security" "github.com/devtron-labs/devtron/internal/util" "github.com/devtron-labs/devtron/internal/util/ArgoUtil" + "github.com/devtron-labs/devtron/pkg" "github.com/devtron-labs/devtron/pkg/app" "github.com/devtron-labs/devtron/pkg/app/status" "github.com/devtron-labs/devtron/pkg/appClone" @@ -245,6 +246,12 @@ func InitializeApp() (*App, error) { bulkAction.NewBulkUpdateServiceImpl, wire.Bind(new(bulkAction.BulkUpdateService), new(*bulkAction.BulkUpdateServiceImpl)), + repository.NewImageTagRepository, + wire.Bind(new(repository.ImageTagRepository), new(*repository.ImageTagRepositoryImpl)), + + pkg.NewCustomTagService, + wire.Bind(new(pkg.CustomTagService), new(*pkg.CustomTagServiceImpl)), + repository.NewGitProviderRepositoryImpl, wire.Bind(new(repository.GitProviderRepository), new(*repository.GitProviderRepositoryImpl)), pipeline.NewGitRegistryConfigImpl, diff --git a/api/bean/CustomTag.go b/api/bean/CustomTag.go new file mode 100644 index 0000000000..8d398be5be --- /dev/null +++ b/api/bean/CustomTag.go @@ -0,0 +1,16 @@ +package bean + +type CustomTag struct { + EntityKey int `json:"entityKey"` + EntityValue string `json:"entityValue"` + TagPattern string `json:"tagPattern"` + AutoIncreasingNumber int `json:"counterX"` + Metadata string `json:"metadata"` +} + +type CustomTagErrorResponse struct { + ConflictingArtifactPath string `json:"conflictingLink"` + TagPattern string `json:"tagPattern"` + AutoIncreasingNumber int `json:"counterX"` + Message string `json:"message"` +} diff --git a/internal/sql/repository/CustomTagRepository.go b/internal/sql/repository/CustomTagRepository.go new file mode 100644 index 0000000000..1f2b66df7c --- /dev/null +++ b/internal/sql/repository/CustomTagRepository.go @@ -0,0 +1,89 @@ +package repository + +import ( + "github.com/go-pg/pg" + "go.uber.org/zap" +) + +type CustomTag struct { + tableName struct{} `sql:"custom_tag" pg:",discard_unknown_columns"` + Id int `sql:"id"` + EntityKey int `sql:"entity_key"` + EntityValue string `sql:"entity_value"` + TagPattern string `sql:"tag_pattern"` + AutoIncreasingNumber int `sql:"auto_increasing_number"` + Metadata string `sql:"metadata"` +} + +type ImagePathReservation struct { + tableName struct{} `sql:"image_path_reservation" pg:",discard_unknown_columns"` + Id int `sql:"id"` + ImagePath string `sql:"image_path"` + CustomTagId int `sql:"custom_tag_id"` + active bool `sql:"active"` +} + +type ImageTagRepository interface { + GetConnection() *pg.DB + CreateImageTag(customTagData *CustomTag) error + FetchCustomTagData(entityType int, entityValue string) (*CustomTag, error) + IncrementAndFetchByEntityKeyAndValue(tx *pg.Tx, entityKey int, entityValue string) (*CustomTag, error) + FindByImagePath(tx *pg.Tx, path string) ([]*ImagePathReservation, error) + InsertImagePath(tx *pg.Tx, reservation ImagePathReservation) error + UpdateImageTag(customTag *CustomTag) error + DeleteByEntityKeyAndValue(entityKey int, entityValue string) error +} + +type ImageTagRepositoryImpl struct { + dbConnection *pg.DB + logger *zap.SugaredLogger +} + +func NewImageTagRepository(dbConnection *pg.DB, logger *zap.SugaredLogger) *ImageTagRepositoryImpl { + return &ImageTagRepositoryImpl{dbConnection: dbConnection, logger: logger} +} + +func (impl *ImageTagRepositoryImpl) GetConnection() *pg.DB { + return impl.dbConnection +} + +func (impl *ImageTagRepositoryImpl) CreateImageTag(customTagData *CustomTag) error { + return impl.dbConnection.Insert(customTagData) +} + +func (impl *ImageTagRepositoryImpl) UpdateImageTag(customTag *CustomTag) error { + return impl.dbConnection.Update(customTag) +} + +func (impl *ImageTagRepositoryImpl) DeleteByEntityKeyAndValue(entityKey int, entityValue string) error { + query := `delete from table custom_tag where entity_key = ? and entity_value = ?` + _, err := impl.dbConnection.Exec(query, entityKey, entityValue) + return err +} + +func (impl *ImageTagRepositoryImpl) FetchCustomTagData(entityType int, entityValue string) (*CustomTag, error) { + var customTagData CustomTag + err := impl.dbConnection.Model(&customTagData). + Where("entity_type = ?", entityType). + Where("entity_value = ?", entityValue).Select() + return &customTagData, err +} + +func (impl *ImageTagRepositoryImpl) IncrementAndFetchByEntityKeyAndValue(tx *pg.Tx, entityKey int, entityValue string) (*CustomTag, error) { + var customTag CustomTag + query := `update custom_tag set auto_increasing_number=auto_increasing_number+1 where entity_key=? and entity_value=? returning id, custom_tag_format, auto_increasing_number, metadata, entity_key, entity_value` + _, err := tx.Query(&customTag, query, entityKey, entityValue) + return &customTag, err +} + +func (impl *ImageTagRepositoryImpl) FindByImagePath(tx *pg.Tx, path string) ([]*ImagePathReservation, error) { + var imagePaths []*ImagePathReservation + err := tx.Model(&imagePaths). + Where("image_path = ?", path). + Where("active = ?", true).Select() + return imagePaths, err +} + +func (impl *ImageTagRepositoryImpl) InsertImagePath(tx *pg.Tx, reservation ImagePathReservation) error { + return tx.Insert(reservation) +} diff --git a/internal/sql/repository/ImageTagRepository.go b/internal/sql/repository/ImageTagRepository.go deleted file mode 100644 index b38dea8e72..0000000000 --- a/internal/sql/repository/ImageTagRepository.go +++ /dev/null @@ -1,18 +0,0 @@ -package repository - -import ( - "github.com/go-pg/pg" - "go.uber.org/zap" -) - -type ImageTagRepository interface { -} - -type ImageTagRepositoryImpl struct { - dbConnection *pg.DB - logger *zap.SugaredLogger -} - -func NewImageTagRepository(dbConnection *pg.DB, logger *zap.SugaredLogger) *ImageTagRepositoryImpl { - return &ImageTagRepositoryImpl{dbConnection: dbConnection, logger: logger} -} diff --git a/internal/sql/repository/pipelineConfig/CdWorfkflowRepository.go b/internal/sql/repository/pipelineConfig/CdWorfkflowRepository.go index 7c041973dc..1e83fba28d 100644 --- a/internal/sql/repository/pipelineConfig/CdWorfkflowRepository.go +++ b/internal/sql/repository/pipelineConfig/CdWorfkflowRepository.go @@ -202,7 +202,6 @@ type CdWorkflowWithArtifact struct { WorkflowType string `json:"workflow_type,omitempty"` ExecutorType string `json:"executor_type,omitempty"` BlobStorageEnabled bool `json:"blobStorageEnabled"` - TargetImageURL bool `json:"target_image_location"` GitTriggers map[int]GitCommit `json:"gitTriggers"` CiMaterials []CiPipelineMaterialResponse `json:"ciMaterials"` ImageReleaseTags []*repository2.ImageTag `json:"imageReleaseTags"` diff --git a/internal/sql/repository/pipelineConfig/CiPipelineMaterial.go b/internal/sql/repository/pipelineConfig/CiPipelineMaterial.go index bf0312d249..65ade20d6a 100644 --- a/internal/sql/repository/pipelineConfig/CiPipelineMaterial.go +++ b/internal/sql/repository/pipelineConfig/CiPipelineMaterial.go @@ -83,7 +83,7 @@ func (impl CiPipelineMaterialRepositoryImpl) GetById(id int) (*CiPipelineMateria func (impl CiPipelineMaterialRepositoryImpl) GetByPipelineId(id int) ([]*CiPipelineMaterial, error) { var ciPipelineMaterials []*CiPipelineMaterial err := impl.dbConnection.Model(&ciPipelineMaterials). - Column("ci_pipeline_material.*", "CiPipeline", "CiPipeline.CiTemplate", "CiPipeline.CiTemplate.GitMaterial", "CiPipeline.App", "CiPipeline.CiTemplate.DockerRegistry", "CiPipeline.CiTemplate.CiBuildConfig", "GitMaterial", "GitMaterial.GitProvider", "CiPipeline.CustomTagObject"). + Column("ci_pipeline_material.*", "CiPipeline", "CiPipeline.CiTemplate", "CiPipeline.CiTemplate.GitMaterial", "CiPipeline.App", "CiPipeline.CiTemplate.DockerRegistry", "CiPipeline.CiTemplate.CiBuildConfig", "GitMaterial", "GitMaterial.GitProvider"). Where("ci_pipeline_material.ci_pipeline_id = ?", id). Where("ci_pipeline_material.active = ?", true). Where("ci_pipeline_material.type != ?", SOURCE_TYPE_BRANCH_REGEX). diff --git a/internal/sql/repository/pipelineConfig/CiPipelineRepository.go b/internal/sql/repository/pipelineConfig/CiPipelineRepository.go index 815e0025da..d9a1e5666f 100644 --- a/internal/sql/repository/pipelineConfig/CiPipelineRepository.go +++ b/internal/sql/repository/pipelineConfig/CiPipelineRepository.go @@ -47,15 +47,6 @@ type CiPipeline struct { sql.AuditLog CiPipelineMaterials []*CiPipelineMaterial CiTemplate *CiTemplate - CustomTagObject *CustomTagObject -} - -type CustomTagObject struct { - tableName struct{} `sql:"custom_tag" pg:",discard_unknown_columns"` - Id int `sql:"id,pk""` - CiPipelineId int `sql:"ci_pipeline_id" pg:",discard_unknown_columns"` - CustomTagFormat string `sql:"custom_tag_format" pg:",discard_unknown_columns"` - AutoIncreasingNumber int `sql:"auto_increasing_number" pg:",discard_unknown_columns"` } type CiEnvMapping struct { @@ -131,12 +122,6 @@ type CiPipelineRepository interface { FindAppIdsForCiPipelineIds(pipelineIds []int) (map[int]int, error) GetCiPipelineByArtifactId(artifactId int) (*CiPipeline, error) GetExternalCiPipelineByArtifactId(artifactId int) (*ExternalCiPipeline, error) - - GetCustomTagByCiPipelineId(ciPipelineId int) (*CustomTagObject, error) - InsertCustomTag(customTagObject *CustomTagObject, tx *pg.Tx) error - UpdateCustomTag(customTagObject *CustomTagObject, tx *pg.Tx) error - DeleteCustomTag(customTagObject *CustomTagObject, tx *pg.Tx) error - IncrementCustomTagCounter(customTagObjectId int) (*CustomTagObject, error) } type CiPipelineRepositoryImpl struct { dbConnection *pg.DB @@ -150,14 +135,6 @@ func NewCiPipelineRepositoryImpl(dbConnection *pg.DB, logger *zap.SugaredLogger) } } -func (impl CiPipelineRepositoryImpl) GetCustomTagByCiPipelineId(ciPipelineId int) (*CustomTagObject, error) { - var customTag CustomTagObject - err := impl.dbConnection.Model(&customTag). - Where("ci_pipeline_id = ?", ciPipelineId). - Select() - return &customTag, err -} - func (impl CiPipelineRepositoryImpl) FindByParentCiPipelineId(parentCiPipelineId int) ([]*CiPipeline, error) { var ciPipelines []*CiPipeline err := impl.dbConnection.Model(&ciPipelines). @@ -219,28 +196,6 @@ func (impl CiPipelineRepositoryImpl) MarkCiPipelineScriptsInactiveByCiPipelineId return nil } -func (impl CiPipelineRepositoryImpl) InsertCustomTag(customTagObject *CustomTagObject, tx *pg.Tx) error { - return tx.Insert(customTagObject) -} - -func (impl CiPipelineRepositoryImpl) DeleteCustomTag(customTagObject *CustomTagObject, tx *pg.Tx) error { - return tx.Delete(customTagObject) -} - -func (impl CiPipelineRepositoryImpl) UpdateCustomTag(customTagObject *CustomTagObject, tx *pg.Tx) error { - return tx.Update(customTagObject) -} - -func (impl CiPipelineRepositoryImpl) IncrementCustomTagCounter(customTagObjectId int) (*CustomTagObject, error) { - customTagObject := &CustomTagObject{} - query := `update custom_tag set auto_increasing_number=auto_increasing_number+1 where id=? returning id, ci_pipeline_id, custom_tag_format, auto_increasing_number` - _, err := impl.dbConnection.Query(customTagObject, query, customTagObjectId) - if err != nil { - return nil, err - } - return customTagObject, nil -} - func (impl CiPipelineRepositoryImpl) FindByAppId(appId int) (pipelines []*CiPipeline, err error) { err = impl.dbConnection.Model(&pipelines). Column("ci_pipeline.*", "CiPipelineMaterials", "CiPipelineMaterials.GitMaterial"). @@ -327,7 +282,7 @@ func (impl CiPipelineRepositoryImpl) SaveCiPipelineScript(ciPipelineScript *CiPi func (impl CiPipelineRepositoryImpl) FindById(id int) (pipeline *CiPipeline, err error) { pipeline = &CiPipeline{Id: id} err = impl.dbConnection.Model(pipeline). - Column("ci_pipeline.*", "App", "CiPipelineMaterials", "CiTemplate", "CiTemplate.DockerRegistry", "CiPipelineMaterials.GitMaterial", "CustomTagObject"). + Column("ci_pipeline.*", "App", "CiPipelineMaterials", "CiTemplate", "CiTemplate.DockerRegistry", "CiPipelineMaterials.GitMaterial"). Where("ci_pipeline.id= ?", id). Where("ci_pipeline.deleted =? ", false). Select() diff --git a/internal/sql/repository/pipelineConfig/CiWorkflowRepository.go b/internal/sql/repository/pipelineConfig/CiWorkflowRepository.go index 12ec4756ca..232942a46a 100644 --- a/internal/sql/repository/pipelineConfig/CiWorkflowRepository.go +++ b/internal/sql/repository/pipelineConfig/CiWorkflowRepository.go @@ -43,8 +43,6 @@ type CiWorkflowRepository interface { ExistsByStatus(status string) (bool, error) FindBuildTypeAndStatusDataOfLast1Day() []*BuildTypeCount FIndCiWorkflowStatusesByAppId(appId int) ([]*CiWorkflowStatus, error) - FindWorkFlowsByTargetImage(targetImage string, tx *pg.Tx) ([]*CiWorkflow, error) - UpdateWorkFlowWithValidation(wf *CiWorkflow, fn func(tx *pg.Tx) error) error } type CiWorkflowRepositoryImpl struct { @@ -53,52 +51,52 @@ type CiWorkflowRepositoryImpl struct { } type CiWorkflow struct { - tableName struct{} `sql:"ci_workflow" pg:",discard_unknown_columns"` - Id int `sql:"id,pk"` - Name string `sql:"name"` - Status string `sql:"status"` - PodStatus string `sql:"pod_status"` - Message string `sql:"message"` - StartedOn time.Time `sql:"started_on"` - FinishedOn time.Time `sql:"finished_on"` - CiPipelineId int `sql:"ci_pipeline_id"` - Namespace string `sql:"namespace"` - BlobStorageEnabled bool `sql:"blob_storage_enabled,notnull"` - LogLocation string `sql:"log_file_path"` - GitTriggers map[int]GitCommit `sql:"git_triggers"` - TriggeredBy int32 `sql:"triggered_by"` - CiArtifactLocation string `sql:"ci_artifact_location"` - PodName string `sql:"pod_name"` - CiBuildType string `sql:"ci_build_type"` - EnvironmentId int `sql:"environment_id"` - TargetImageLocation string `sql:"target_image_location"` - CiPipeline *CiPipeline + tableName struct{} `sql:"ci_workflow" pg:",discard_unknown_columns"` + Id int `sql:"id,pk"` + Name string `sql:"name"` + Status string `sql:"status"` + PodStatus string `sql:"pod_status"` + Message string `sql:"message"` + StartedOn time.Time `sql:"started_on"` + FinishedOn time.Time `sql:"finished_on"` + CiPipelineId int `sql:"ci_pipeline_id"` + Namespace string `sql:"namespace"` + BlobStorageEnabled bool `sql:"blob_storage_enabled,notnull"` + LogLocation string `sql:"log_file_path"` + GitTriggers map[int]GitCommit `sql:"git_triggers"` + TriggeredBy int32 `sql:"triggered_by"` + CiArtifactLocation string `sql:"ci_artifact_location"` + PodName string `sql:"pod_name"` + CiBuildType string `sql:"ci_build_type"` + EnvironmentId int `sql:"environment_id"` + ImagePathReservationId int `sql:"image_path_reservation_id"` + CiPipeline *CiPipeline } type WorkflowWithArtifact struct { - Id int `json:"id"` - Name string `json:"name"` - PodName string `json:"podName"` - Status string `json:"status"` - PodStatus string `json:"pod_status"` - Message string `json:"message"` - StartedOn time.Time `json:"started_on"` - FinishedOn time.Time `json:"finished_on"` - CiPipelineId int `json:"ci_pipeline_id"` - Namespace string `json:"namespace"` - LogFilePath string `json:"log_file_path"` - GitTriggers map[int]GitCommit `json:"git_triggers"` - TriggeredBy int32 `json:"triggered_by"` - EmailId string `json:"email_id"` - Image string `json:"image"` - CiArtifactLocation string `json:"ci_artifact_location"` - CiArtifactId int `json:"ci_artifact_d"` - BlobStorageEnabled bool `json:"blobStorageEnabled"` - CiBuildType string `json:"ci_build_type"` - IsArtifactUploaded bool `json:"is_artifact_uploaded"` - EnvironmentId int `json:"environmentId"` - EnvironmentName string `json:"environmentName"` - TargetImageLocation string `json:"targetImageLocation"` + Id int `json:"id"` + Name string `json:"name"` + PodName string `json:"podName"` + Status string `json:"status"` + PodStatus string `json:"pod_status"` + Message string `json:"message"` + StartedOn time.Time `json:"started_on"` + FinishedOn time.Time `json:"finished_on"` + CiPipelineId int `json:"ci_pipeline_id"` + Namespace string `json:"namespace"` + LogFilePath string `json:"log_file_path"` + GitTriggers map[int]GitCommit `json:"git_triggers"` + TriggeredBy int32 `json:"triggered_by"` + EmailId string `json:"email_id"` + Image string `json:"image"` + CiArtifactLocation string `json:"ci_artifact_location"` + CiArtifactId int `json:"ci_artifact_d"` + BlobStorageEnabled bool `json:"blobStorageEnabled"` + CiBuildType string `json:"ci_build_type"` + IsArtifactUploaded bool `json:"is_artifact_uploaded"` + EnvironmentId int `json:"environmentId"` + EnvironmentName string `json:"environmentName"` + ImagePathReservationId int `json:"image_path_reservation_id"` } type GitCommit struct { @@ -148,15 +146,6 @@ func NewCiWorkflowRepositoryImpl(dbConnection *pg.DB, logger *zap.SugaredLogger) } } -func (impl *CiWorkflowRepositoryImpl) FindWorkFlowsByTargetImage(targetImage string, tx *pg.Tx) ([]*CiWorkflow, error) { - var ciWorkFlows []*CiWorkflow - err := tx.Model(&ciWorkFlows). - Column("ci_workflow.*"). - Where("ci_workflow.target_image_location = ?", targetImage). - Select() - return ciWorkFlows, err -} - func (impl *CiWorkflowRepositoryImpl) FindLastTriggeredWorkflow(pipelineId int) (ciWorkflow *CiWorkflow, err error) { workflow := &CiWorkflow{} err = impl.dbConnection.Model(workflow). @@ -231,28 +220,6 @@ func (impl *CiWorkflowRepositoryImpl) SaveWorkFlow(wf *CiWorkflow) error { return err } -func (impl *CiWorkflowRepositoryImpl) UpdateWorkFlowWithValidation(wf *CiWorkflow, fn func(tx *pg.Tx) error) error { - connection := impl.dbConnection - tx, err := connection.Begin() - defer tx.Rollback() - if err != nil { - return err - } - err = fn(tx) - if err != nil { - return err - } - err = tx.Update(wf) - if err != nil { - return err - } - err = tx.Commit() - if err != nil { - return err - } - return nil -} - func (impl *CiWorkflowRepositoryImpl) UpdateWorkFlow(wf *CiWorkflow) error { err := impl.dbConnection.Update(wf) return err diff --git a/pkg/CustomTagService.go b/pkg/CustomTagService.go new file mode 100644 index 0000000000..3923926163 --- /dev/null +++ b/pkg/CustomTagService.go @@ -0,0 +1,150 @@ +package pkg + +import ( + "fmt" + "github.com/devtron-labs/devtron/api/bean" + "github.com/devtron-labs/devtron/internal/sql/repository" + "github.com/go-pg/pg" + "go.uber.org/zap" + "strconv" + "strings" +) + +const ( + EntityTypeCiPipelineId = iota +) + +const ( + imagePathPattern = "%s/%s:%s" // dockerReg/dockerRepo:Tag + ImageTagUnavailableMessage = "Desired image tag already exists" +) + +var ( + ErrImagePathInUse = fmt.Errorf("image path is already being used by someone") +) + +type CustomTagService interface { + CreateOrUpdateCustomTag(tag *bean.CustomTag) error + GetCustomTagByEntityKeyAndValue(entityKey int, entityValue string) (*repository.CustomTag, error) + GenerateImagePath(entityKey int, entityValue string, dockerRegistryURL string, dockerRepo string) (*repository.ImagePathReservation, error) + DeleteCustomTagIfExists(tag bean.CustomTag) error +} + +type CustomTagServiceImpl struct { + Logger *zap.SugaredLogger + customTagRepository repository.ImageTagRepository +} + +func NewCustomTagService(logger *zap.SugaredLogger, customTagRepo repository.ImageTagRepository) *CustomTagServiceImpl { + return &CustomTagServiceImpl{ + Logger: logger, + customTagRepository: customTagRepo, + } +} + +func (impl *CustomTagServiceImpl) CreateOrUpdateCustomTag(tag *bean.CustomTag) error { + customTagData := repository.CustomTag{ + EntityKey: tag.EntityKey, + EntityValue: tag.EntityValue, + TagPattern: strings.ReplaceAll(tag.TagPattern, "{X}", "{x}"), + AutoIncreasingNumber: tag.AutoIncreasingNumber, + Metadata: tag.Metadata, + } + oldTagObject, err := impl.customTagRepository.FetchCustomTagData(customTagData.EntityKey, customTagData.EntityValue) + if err != nil { + return err + } + if oldTagObject.Id == 0 { + return impl.customTagRepository.CreateImageTag(&customTagData) + } else { + customTagData.Id = oldTagObject.Id + return impl.customTagRepository.UpdateImageTag(&customTagData) + } +} + +func (impl *CustomTagServiceImpl) DeleteCustomTagIfExists(tag bean.CustomTag) error { + return impl.customTagRepository.DeleteByEntityKeyAndValue(tag.EntityKey, tag.EntityValue) +} + +func (impl *CustomTagServiceImpl) GetCustomTagByEntityKeyAndValue(entityKey int, entityValue string) (*repository.CustomTag, error) { + return impl.customTagRepository.FetchCustomTagData(entityKey, entityValue) +} + +func (impl *CustomTagServiceImpl) GenerateImagePath(entityKey int, entityValue string, dockerRegistryURL string, dockerRepo string) (*repository.ImagePathReservation, error) { + connection := impl.customTagRepository.GetConnection() + tx, err := connection.Begin() + if err != nil { + return nil, nil + } + defer tx.Rollback() + customTagData, err := impl.customTagRepository.IncrementAndFetchByEntityKeyAndValue(tx, entityKey, entityValue) + if err != nil { + return nil, err + } + tag, err := validateAndConstructTag(customTagData) + if err != nil { + return nil, err + } + imagePath := fmt.Sprintf(imagePathPattern, dockerRegistryURL, dockerRepo, tag) + imagePathReservations, err := impl.customTagRepository.FindByImagePath(tx, imagePath) + if err != nil && err != pg.ErrNoRows { + return nil, err + } + if len(imagePathReservations) > 0 { + return nil, ErrImagePathInUse + } + imagePathReservation := repository.ImagePathReservation{ + ImagePath: imagePath, + CustomTagId: customTagData.Id, + } + err = impl.customTagRepository.InsertImagePath(tx, imagePathReservation) + if err != nil { + return nil, err + } + err = tx.Commit() + if err != nil { + return nil, err + } + return &imagePathReservation, nil +} + +func validateAndConstructTag(customTagData *repository.CustomTag) (string, error) { + err := validateTagPattern(customTagData.TagPattern) + if err != nil { + return "", err + } + dockerImageTag := strings.ReplaceAll(customTagData.TagPattern, "{x}", strconv.Itoa(customTagData.AutoIncreasingNumber-1)) //-1 because number is already incremented, current value will be used next time + err = validateTag(dockerImageTag) + if err != nil { + return "", err + } + return dockerImageTag, nil +} + +func validateTagPattern(customTagPattern string) error { + allowedVariables := []string{"{x}", "{X}"} + totalX := 0 + for _, variable := range allowedVariables { + totalX += strings.Count(customTagPattern, variable) + } + if totalX != 1 { + return fmt.Errorf("variable {x} is allowed exactly once") + } + return nil +} + +func validateTag(imageTag string) error { + if len(imageTag) == 0 || len(imageTag) > 128 { + return fmt.Errorf("image tag should be of len 1-128 only, imageTag: %s", imageTag) + } + allowedSymbols := ".abcdefghijklmnopqrstuvwxyz_ABCDEFGHIJKLMNOPQRSTUVWXYZ-0987654321" + allowedCharSet := make(map[int32]struct{}) + for _, c := range allowedSymbols { + allowedCharSet[c] = struct{}{} + } + firstChar := imageTag[0:1] + if firstChar == "." || firstChar == "-" { + return fmt.Errorf("image tag can not start with a period or a hyphen, imageTag: %s", imageTag) + } + return nil +} diff --git a/pkg/pipeline/CiCdPipelineOrchestrator.go b/pkg/pipeline/CiCdPipelineOrchestrator.go index c580aee7a7..225014c218 100644 --- a/pkg/pipeline/CiCdPipelineOrchestrator.go +++ b/pkg/pipeline/CiCdPipelineOrchestrator.go @@ -26,10 +26,12 @@ import ( "encoding/json" "errors" "fmt" + bean4 "github.com/devtron-labs/devtron/api/bean" "github.com/devtron-labs/devtron/client/gitSensor" app2 "github.com/devtron-labs/devtron/internal/sql/repository/app" dockerRegistryRepository "github.com/devtron-labs/devtron/internal/sql/repository/dockerRegistry" "github.com/devtron-labs/devtron/internal/sql/repository/helper" + "github.com/devtron-labs/devtron/pkg" repository2 "github.com/devtron-labs/devtron/pkg/cluster/repository" "github.com/devtron-labs/devtron/pkg/genericNotes" repository3 "github.com/devtron-labs/devtron/pkg/genericNotes/repository" @@ -110,6 +112,7 @@ type CiCdPipelineOrchestratorImpl struct { dockerArtifactStoreRepository dockerRegistryRepository.DockerArtifactStoreRepository configMapService ConfigMapService genericNoteService genericNotes.GenericNoteService + customTagService pkg.CustomTagService } func NewCiCdPipelineOrchestrator( @@ -135,6 +138,7 @@ func NewCiCdPipelineOrchestrator( ciTemplateService CiTemplateService, dockerArtifactStoreRepository dockerRegistryRepository.DockerArtifactStoreRepository, configMapService ConfigMapService, + customTagService pkg.CustomTagService, genericNoteService genericNotes.GenericNoteService) *CiCdPipelineOrchestratorImpl { return &CiCdPipelineOrchestratorImpl{ appRepository: pipelineGroupRepository, @@ -161,6 +165,7 @@ func NewCiCdPipelineOrchestrator( dockerArtifactStoreRepository: dockerArtifactStoreRepository, configMapService: configMapService, genericNoteService: genericNoteService, + customTagService: customTagService, } } @@ -255,35 +260,24 @@ func (impl CiCdPipelineOrchestratorImpl) PatchMaterialValue(createRequest *bean. } if createRequest.CustomTagObject != nil { - err := validateCustomTagFormat(createRequest.CustomTagObject.TagPattern) + customTag := bean4.CustomTag{ + EntityKey: pkg.EntityTypeCiPipelineId, + EntityValue: strconv.Itoa(ciPipelineObject.Id), + TagPattern: createRequest.CustomTagObject.TagPattern, + AutoIncreasingNumber: createRequest.CustomTagObject.CounterX, + } + err = impl.customTagService.CreateOrUpdateCustomTag(&customTag) if err != nil { return nil, err } - if oldPipeline.CustomTagObject != nil { - ciPipelineObject.CustomTagObject = oldPipeline.CustomTagObject - ciPipelineObject.CustomTagObject.CustomTagFormat = createRequest.CustomTagObject.TagPattern - ciPipelineObject.CustomTagObject.AutoIncreasingNumber = createRequest.CustomTagObject.CounterX - err := impl.ciPipelineRepository.UpdateCustomTag(ciPipelineObject.CustomTagObject, tx) - if err != nil { - return nil, err - } - } else { - ciPipelineObject.CustomTagObject = &pipelineConfig.CustomTagObject{ - CiPipelineId: oldPipeline.Id, - CustomTagFormat: createRequest.CustomTagObject.TagPattern, - AutoIncreasingNumber: createRequest.CustomTagObject.CounterX, - } - err = impl.ciPipelineRepository.InsertCustomTag(ciPipelineObject.CustomTagObject, tx) - if err != nil { - return nil, err - } - } } else { - if oldPipeline.CustomTagObject != nil { - err := impl.ciPipelineRepository.DeleteCustomTag(oldPipeline.CustomTagObject, tx) - if err != nil { - return nil, err - } + customTag := bean4.CustomTag{ + EntityKey: pkg.EntityTypeCiPipelineId, + EntityValue: strconv.Itoa(ciPipelineObject.Id), + } + err := impl.customTagService.DeleteCustomTagIfExists(customTag) + if err != nil { + return nil, err } } @@ -700,16 +694,13 @@ func (impl CiCdPipelineOrchestratorImpl) CreateCiConf(createRequest *bean.CiConf } if ciPipeline.CustomTagObject != nil { - err = validateCustomTagFormat(ciPipeline.CustomTagObject.TagPattern) - if err != nil { - return nil, err - } - ciPipelineObject.CustomTagObject = &pipelineConfig.CustomTagObject{ - CiPipelineId: ciPipeline.Id, - CustomTagFormat: ciPipeline.CustomTagObject.TagPattern, + customTag := &bean4.CustomTag{ + EntityKey: pkg.EntityTypeCiPipelineId, + EntityValue: strconv.Itoa(ciPipeline.Id), + TagPattern: ciPipeline.CustomTagObject.TagPattern, AutoIncreasingNumber: ciPipeline.CustomTagObject.CounterX, } - err := impl.ciPipelineRepository.InsertCustomTag(ciPipelineObject.CustomTagObject, tx) + err := impl.customTagService.CreateOrUpdateCustomTag(customTag) if err != nil { return nil, err } @@ -882,34 +873,6 @@ func (impl CiCdPipelineOrchestratorImpl) CreateCiConf(createRequest *bean.CiConf return createRequest, nil } -func ValidateTag(imageTag string) error { - if len(imageTag) == 0 || len(imageTag) > 128 { - return fmt.Errorf("image tag should be of len 1-128 only, imageTag: %s", imageTag) - } - allowedSymbols := ".abcdefghijklmnopqrstuvwxyz_ABCDEFGHIJKLMNOPQRSTUVWXYZ-0987654321" - allowedCharSet := make(map[int32]struct{}) - for _, c := range allowedSymbols { - allowedCharSet[c] = struct{}{} - } - firstChar := imageTag[0:1] - if firstChar == "." || firstChar == "-" { - fmt.Errorf("image tag can not start with a period or a hyphen, imageTag: %s", imageTag) - } - return nil -} - -func validateCustomTagFormat(customTagPattern string) error { - allowedVariables := []string{"{x}", "{X}"} - totalX := 0 - for _, variable := range allowedVariables { - totalX += strings.Count(customTagPattern, variable) - } - if totalX != 1 { - return fmt.Errorf("variable {x} is allowed exactly once") - } - return nil -} - func (impl CiCdPipelineOrchestratorImpl) BuildCiPipelineScript(userId int32, ciScript *bean.CiScript, scriptStage string, ciPipeline *bean.CiPipeline) *pipelineConfig.CiPipelineScript { ciPipelineScript := &pipelineConfig.CiPipelineScript{ Name: ciScript.Name, diff --git a/pkg/pipeline/CiHandler.go b/pkg/pipeline/CiHandler.go index 50f82cd101..6a6fba89ce 100644 --- a/pkg/pipeline/CiHandler.go +++ b/pkg/pipeline/CiHandler.go @@ -28,6 +28,7 @@ import ( bean2 "github.com/devtron-labs/devtron/api/bean" "github.com/devtron-labs/devtron/client/gitSensor" repository2 "github.com/devtron-labs/devtron/internal/sql/repository/imageTagging" + "github.com/devtron-labs/devtron/pkg" appGroup2 "github.com/devtron-labs/devtron/pkg/appGroup" "github.com/devtron-labs/devtron/pkg/cluster" repository3 "github.com/devtron-labs/devtron/pkg/cluster/repository" @@ -104,9 +105,10 @@ type CiHandlerImpl struct { appGroupService appGroup2.AppGroupService envRepository repository3.EnvironmentRepository imageTaggingService ImageTaggingService + customTagService pkg.CustomTagService } -func NewCiHandlerImpl(Logger *zap.SugaredLogger, ciService CiService, ciPipelineMaterialRepository pipelineConfig.CiPipelineMaterialRepository, gitSensorClient gitSensor.Client, ciWorkflowRepository pipelineConfig.CiWorkflowRepository, workflowService WorkflowService, ciLogService CiLogService, ciConfig *CiConfig, ciArtifactRepository repository.CiArtifactRepository, userService user.UserService, eventClient client.EventClient, eventFactory client.EventFactory, ciPipelineRepository pipelineConfig.CiPipelineRepository, appListingRepository repository.AppListingRepository, K8sUtil *k8s.K8sUtil, cdPipelineRepository pipelineConfig.PipelineRepository, enforcerUtil rbac.EnforcerUtil, appGroupService appGroup2.AppGroupService, envRepository repository3.EnvironmentRepository, imageTaggingService ImageTaggingService) *CiHandlerImpl { +func NewCiHandlerImpl(Logger *zap.SugaredLogger, ciService CiService, ciPipelineMaterialRepository pipelineConfig.CiPipelineMaterialRepository, gitSensorClient gitSensor.Client, ciWorkflowRepository pipelineConfig.CiWorkflowRepository, workflowService WorkflowService, ciLogService CiLogService, ciConfig *CiConfig, ciArtifactRepository repository.CiArtifactRepository, userService user.UserService, eventClient client.EventClient, eventFactory client.EventFactory, ciPipelineRepository pipelineConfig.CiPipelineRepository, appListingRepository repository.AppListingRepository, K8sUtil *k8s.K8sUtil, cdPipelineRepository pipelineConfig.PipelineRepository, enforcerUtil rbac.EnforcerUtil, appGroupService appGroup2.AppGroupService, envRepository repository3.EnvironmentRepository, imageTaggingService ImageTaggingService, customTagService pkg.CustomTagService) *CiHandlerImpl { return &CiHandlerImpl{ Logger: Logger, ciService: ciService, @@ -128,6 +130,7 @@ func NewCiHandlerImpl(Logger *zap.SugaredLogger, ciService CiService, ciPipeline appGroupService: appGroupService, envRepository: envRepository, imageTaggingService: imageTaggingService, + customTagService: customTagService, } } @@ -157,7 +160,7 @@ type WorkflowResponse struct { EnvironmentName string `json:"environmentName"` ImageReleaseTags []*repository2.ImageTag `json:"imageReleaseTags"` ImageComment *repository2.ImageComment `json:"imageComment"` - CustomTag *bean.CustomTagData `json:"customTag,omitempty"` + CustomTag *bean2.CustomTagErrorResponse `json:"customTag,omitempty"` } type GitTriggerInfoResponse struct { @@ -502,14 +505,15 @@ func (impl *CiHandlerImpl) GetBuildHistory(pipelineId int, appId int, offset int EnvironmentId: w.EnvironmentId, EnvironmentName: w.EnvironmentName, } - if w.Message == ImageTagUnavailableMessage { - customTag, err := impl.ciPipelineRepository.GetCustomTagByCiPipelineId(w.CiPipelineId) + if w.Message == pkg.ImageTagUnavailableMessage { + customTag, err := impl.customTagService.GetCustomTagByEntityKeyAndValue(pkg.EntityTypeCiPipelineId, strconv.Itoa(w.CiPipelineId)) if err != nil { return nil, err } - wfResponse.CustomTag = &bean.CustomTagData{ - TagPattern: customTag.CustomTagFormat, - CounterX: customTag.AutoIncreasingNumber, + wfResponse.CustomTag = &bean2.CustomTagErrorResponse{ + TagPattern: customTag.TagPattern, + AutoIncreasingNumber: customTag.AutoIncreasingNumber, + Message: pkg.ImageTagUnavailableMessage, } } if imageTagsDataMap[w.CiArtifactId] != nil { diff --git a/pkg/pipeline/CiService.go b/pkg/pipeline/CiService.go index 2f36c62fce..266287f9ba 100644 --- a/pkg/pipeline/CiService.go +++ b/pkg/pipeline/CiService.go @@ -24,6 +24,7 @@ import ( appRepository "github.com/devtron-labs/devtron/internal/sql/repository/app" repository3 "github.com/devtron-labs/devtron/internal/sql/repository/dockerRegistry" "github.com/devtron-labs/devtron/internal/sql/repository/helper" + "github.com/devtron-labs/devtron/pkg" "github.com/devtron-labs/devtron/pkg/app" repository1 "github.com/devtron-labs/devtron/pkg/cluster/repository" bean2 "github.com/devtron-labs/devtron/pkg/pipeline/bean" @@ -70,16 +71,9 @@ type CiServiceImpl struct { appCrudOperationService app.AppCrudOperationService envRepository repository1.EnvironmentRepository appRepository appRepository.AppRepository + customTagService pkg.CustomTagService } -var ( - ErrImagePathUnavailable = fmt.Errorf("image path tag is reserved/reserved") -) - -const ( - ImageTagUnavailableMessage = "Desired image tag already exists" -) - func NewCiServiceImpl(Logger *zap.SugaredLogger, workflowService WorkflowService, ciPipelineMaterialRepository pipelineConfig.CiPipelineMaterialRepository, ciWorkflowRepository pipelineConfig.CiWorkflowRepository, ciConfig *CiConfig, eventClient client.EventClient, @@ -87,6 +81,7 @@ func NewCiServiceImpl(Logger *zap.SugaredLogger, workflowService WorkflowService prePostCiScriptHistoryService history.PrePostCiScriptHistoryService, pipelineStageService PipelineStageService, userService user.UserService, + customTagService pkg.CustomTagService, ciTemplateService CiTemplateService, appCrudOperationService app.AppCrudOperationService, envRepository repository1.EnvironmentRepository, appRepository appRepository.AppRepository) *CiServiceImpl { return &CiServiceImpl{ Logger: Logger, @@ -105,6 +100,7 @@ func NewCiServiceImpl(Logger *zap.SugaredLogger, workflowService WorkflowService appCrudOperationService: appCrudOperationService, envRepository: envRepository, appRepository: appRepository, + customTagService: customTagService, } } @@ -460,21 +456,23 @@ func (impl *CiServiceImpl) buildWfRequestForCiPipeline(pipeline *pipelineConfig. } var dockerImageTag string - if pipeline.CustomTagObject != nil { - customTagObjectLatest, err := impl.ciPipelineRepository.IncrementCustomTagCounter(pipeline.CustomTagObject.Id) - if err != nil { - return nil, err - } - err = validateCustomTagFormat(dockerImageTag) - if err != nil { - return nil, err - } - pipeline.CustomTagObject = customTagObjectLatest - dockerImageTag = strings.ReplaceAll(customTagObjectLatest.CustomTagFormat, "{x}", strconv.Itoa(customTagObjectLatest.AutoIncreasingNumber-1)) //-1 because number is already incremented, current value will be used next time - err = ValidateTag(dockerImageTag) + customTag, err := impl.customTagService.GetCustomTagByEntityKeyAndValue(pkg.EntityTypeCiPipelineId, strconv.Itoa(pipeline.Id)) + if err != nil { + return nil, err + } + if customTag.Id == 0 { + imagePathReservation, err := impl.customTagService.GenerateImagePath(pkg.EntityTypeCiPipelineId, strconv.Itoa(pipeline.Id), pipeline.CiTemplate.DockerRegistry.RegistryURL, pipeline.CiTemplate.DockerRepository) if err != nil { + if errors.Is(err, pkg.ErrImagePathInUse) { + savedWf.Status = pipelineConfig.WorkflowFailed + savedWf.Message = pkg.ImageTagUnavailableMessage + return nil, impl.ciWorkflowRepository.UpdateWorkFlow(savedWf) + } return nil, err } + savedWf.ImagePathReservationId = imagePathReservation.Id + //imagePath = docker.io/avd0/dashboard:fd23414b + dockerImageTag = strings.Split(imagePathReservation.ImagePath, ":")[1] } else { dockerImageTag = impl.buildImageTag(commitHashes, pipeline.Id, savedWf.Id) } @@ -780,18 +778,7 @@ func (impl *CiServiceImpl) updateCiWorkflow(request *WorkflowRequest, savedWf *p ciBuildConfig := request.CiBuildConfig ciBuildType := string(ciBuildConfig.CiBuildType) savedWf.CiBuildType = ciBuildType - savedWf.TargetImageLocation = request.DockerRegistryURL + "/" + request.DockerRepository + ":" + request.DockerImageTag - tagUsedStatuses := []string{pipelineConfig.WorkflowSucceeded} - tagReleasedStatuses := []string{pipelineConfig.WorkflowFailed, pipelineConfig.WorkflowAborted, string(v1alpha1.NodeError)} - err := impl.ciWorkflowRepository.UpdateWorkFlowWithValidation(savedWf, func(tx *pg.Tx) error { - return impl.CanTargetImagePathBeReused(savedWf.TargetImageLocation, tagReleasedStatuses, tagUsedStatuses, tx) - }) - if err == ErrImagePathUnavailable { - savedWf.Status = pipelineConfig.WorkflowFailed - savedWf.Message = ImageTagUnavailableMessage - return impl.ciWorkflowRepository.UpdateWorkFlow(savedWf) - } - return nil + return impl.ciWorkflowRepository.UpdateWorkFlow(savedWf) } func _getTruncatedImageTag(imageTag string) string { @@ -808,29 +795,3 @@ func _getTruncatedImageTag(imageTag string) string { return imageTag[:_truncatedLength] } } - -func (impl *CiServiceImpl) CanTargetImagePathBeReused(targetImageURL string, tagUsedStatuses []string, tagReleasedStatuses []string, tx *pg.Tx) error { - allWfs, err := impl.ciWorkflowRepository.FindWorkFlowsByTargetImage(targetImageURL, tx) - if err != nil && err != pg.ErrNoRows { - return err - } - for _, wf := range allWfs { - if arrayContains(tagUsedStatuses, wf.Status) { - return ErrImagePathUnavailable - } else if arrayContains(tagReleasedStatuses, wf.Status) { - continue - } else { - return ErrImagePathUnavailable - } - } - return nil -} - -func arrayContains(arr []string, str string) bool { - for _, s := range arr { - if s == str { - return true - } - } - return false -} diff --git a/scripts/sql/167_custom_image_tag_generalize.up.sql b/scripts/sql/167_custom_image_tag_generalize.up.sql index b4cad1cf18..4ba7720556 100644 --- a/scripts/sql/167_custom_image_tag_generalize.up.sql +++ b/scripts/sql/167_custom_image_tag_generalize.up.sql @@ -1,8 +1,11 @@ ALTER TABLE custom_tag - ADD COLUMN entity_key varchar(30); + ADD COLUMN entity_key int; ALTER TABLE custom_tag ADD COLUMN entity_value varchar(100); +CREATE INDEX IF NOT EXISTS entity_key_value ON custom_tag (entity_key, entity_value); + ALTER TABLE custom_tag - ADD COLUMN metadata jsonb; + ADD CONSTRAINT constraint_name UNIQUE (entity_key, entity_value) + diff --git a/scripts/sql/168_image_path.down.sql b/scripts/sql/168_image_path.down.sql new file mode 100644 index 0000000000..e69de29bb2 diff --git a/scripts/sql/168_image_path.up.sql b/scripts/sql/168_image_path.up.sql new file mode 100644 index 0000000000..18a6c769dd --- /dev/null +++ b/scripts/sql/168_image_path.up.sql @@ -0,0 +1,12 @@ +CREATE TABLE "public"."image_path_reservation"( + id serial PRIMARY KEY, + custom_tag_id int, + image_path text, + active boolean default true, + FOREIGN KEY (custom_tag_id) REFERENCES custom_tag (id) +); + +CREATE INDEX IF NOT EXISTS image_path_index ON image_path_reservation (image_path); + +ALTER TABLE ci_workflow + ADD column IF NOT EXISTS image_path_reservation_id int; \ No newline at end of file diff --git a/wire_gen.go b/wire_gen.go index 7663ccf8c1..841ea0eae4 100644 --- a/wire_gen.go +++ b/wire_gen.go @@ -63,6 +63,7 @@ import ( "github.com/devtron-labs/devtron/internal/sql/repository/security" "github.com/devtron-labs/devtron/internal/util" "github.com/devtron-labs/devtron/internal/util/ArgoUtil" + "github.com/devtron-labs/devtron/pkg" "github.com/devtron-labs/devtron/pkg/apiToken" app2 "github.com/devtron-labs/devtron/pkg/app" "github.com/devtron-labs/devtron/pkg/app/status" @@ -427,7 +428,9 @@ func InitializeApp() (*App, error) { ciBuildConfigServiceImpl := pipeline.NewCiBuildConfigServiceImpl(sugaredLogger, ciBuildConfigRepositoryImpl) ciTemplateServiceImpl := pipeline.NewCiTemplateServiceImpl(sugaredLogger, ciBuildConfigServiceImpl, ciTemplateRepositoryImpl, ciTemplateOverrideRepositoryImpl) configMapServiceImpl := pipeline.NewConfigMapServiceImpl(chartRepositoryImpl, sugaredLogger, chartRepoRepositoryImpl, utilMergeUtil, pipelineConfigRepositoryImpl, configMapRepositoryImpl, envConfigOverrideRepositoryImpl, commonServiceImpl, appRepositoryImpl, configMapHistoryServiceImpl, environmentRepositoryImpl) - ciCdPipelineOrchestratorImpl := pipeline.NewCiCdPipelineOrchestrator(appRepositoryImpl, sugaredLogger, materialRepositoryImpl, pipelineRepositoryImpl, ciPipelineRepositoryImpl, ciPipelineMaterialRepositoryImpl, clientImpl, ciConfig, appWorkflowRepositoryImpl, environmentRepositoryImpl, attributesServiceImpl, appListingRepositoryImpl, appCrudOperationServiceImpl, userAuthServiceImpl, prePostCdScriptHistoryServiceImpl, prePostCiScriptHistoryServiceImpl, pipelineStageServiceImpl, ciTemplateOverrideRepositoryImpl, gitMaterialHistoryServiceImpl, ciPipelineHistoryServiceImpl, ciTemplateServiceImpl, dockerArtifactStoreRepositoryImpl, configMapServiceImpl, genericNoteServiceImpl) + imageTagRepositoryImpl := repository.NewImageTagRepository(db, sugaredLogger) + customTagServiceImpl := pkg.NewCustomTagService(sugaredLogger, imageTagRepositoryImpl) + ciCdPipelineOrchestratorImpl := pipeline.NewCiCdPipelineOrchestrator(appRepositoryImpl, sugaredLogger, materialRepositoryImpl, pipelineRepositoryImpl, ciPipelineRepositoryImpl, ciPipelineMaterialRepositoryImpl, clientImpl, ciConfig, appWorkflowRepositoryImpl, environmentRepositoryImpl, attributesServiceImpl, appListingRepositoryImpl, appCrudOperationServiceImpl, userAuthServiceImpl, prePostCdScriptHistoryServiceImpl, prePostCiScriptHistoryServiceImpl, pipelineStageServiceImpl, ciTemplateOverrideRepositoryImpl, gitMaterialHistoryServiceImpl, ciPipelineHistoryServiceImpl, ciTemplateServiceImpl, dockerArtifactStoreRepositoryImpl, configMapServiceImpl, customTagServiceImpl, genericNoteServiceImpl) propertiesConfigServiceImpl := pipeline.NewPropertiesConfigServiceImpl(sugaredLogger, envConfigOverrideRepositoryImpl, chartRepositoryImpl, chartRefRepositoryImpl, utilMergeUtil, environmentRepositoryImpl, ciCdPipelineOrchestratorImpl, applicationServiceClientImpl, envLevelAppMetricsRepositoryImpl, appLevelMetricsRepositoryImpl, deploymentTemplateHistoryServiceImpl) ecrConfig, err := pipeline.GetEcrConfig() if err != nil { @@ -453,12 +456,12 @@ func InitializeApp() (*App, error) { if err != nil { return nil, err } - ciServiceImpl := pipeline.NewCiServiceImpl(sugaredLogger, workflowServiceImpl, ciPipelineMaterialRepositoryImpl, ciWorkflowRepositoryImpl, ciConfig, eventRESTClientImpl, eventSimpleFactoryImpl, mergeUtil, ciPipelineRepositoryImpl, prePostCiScriptHistoryServiceImpl, pipelineStageServiceImpl, userServiceImpl, ciTemplateServiceImpl, appCrudOperationServiceImpl, environmentRepositoryImpl, appRepositoryImpl) + ciServiceImpl := pipeline.NewCiServiceImpl(sugaredLogger, workflowServiceImpl, ciPipelineMaterialRepositoryImpl, ciWorkflowRepositoryImpl, ciConfig, eventRESTClientImpl, eventSimpleFactoryImpl, mergeUtil, ciPipelineRepositoryImpl, prePostCiScriptHistoryServiceImpl, pipelineStageServiceImpl, userServiceImpl, customTagServiceImpl, ciTemplateServiceImpl, appCrudOperationServiceImpl, environmentRepositoryImpl, appRepositoryImpl) ciLogServiceImpl, err := pipeline.NewCiLogServiceImpl(sugaredLogger, ciServiceImpl, k8sUtil) if err != nil { return nil, err } - ciHandlerImpl := pipeline.NewCiHandlerImpl(sugaredLogger, ciServiceImpl, ciPipelineMaterialRepositoryImpl, clientImpl, ciWorkflowRepositoryImpl, workflowServiceImpl, ciLogServiceImpl, ciConfig, ciArtifactRepositoryImpl, userServiceImpl, eventRESTClientImpl, eventSimpleFactoryImpl, ciPipelineRepositoryImpl, appListingRepositoryImpl, k8sUtil, pipelineRepositoryImpl, enforcerUtilImpl, appGroupServiceImpl, environmentRepositoryImpl, imageTaggingServiceImpl) + ciHandlerImpl := pipeline.NewCiHandlerImpl(sugaredLogger, ciServiceImpl, ciPipelineMaterialRepositoryImpl, clientImpl, ciWorkflowRepositoryImpl, workflowServiceImpl, ciLogServiceImpl, ciConfig, ciArtifactRepositoryImpl, userServiceImpl, eventRESTClientImpl, eventSimpleFactoryImpl, ciPipelineRepositoryImpl, appListingRepositoryImpl, k8sUtil, pipelineRepositoryImpl, enforcerUtilImpl, appGroupServiceImpl, environmentRepositoryImpl, imageTaggingServiceImpl, customTagServiceImpl) gitRegistryConfigImpl := pipeline.NewGitRegistryConfigImpl(sugaredLogger, gitProviderRepositoryImpl, clientImpl) ociRegistryConfigRepositoryImpl := repository5.NewOCIRegistryConfigRepositoryImpl(db) dockerRegistryConfigImpl := pipeline.NewDockerRegistryConfigImpl(sugaredLogger, dockerArtifactStoreRepositoryImpl, dockerRegistryIpsConfigRepositoryImpl, ociRegistryConfigRepositoryImpl) From abd20e8a3a298893b6eacfd8ab634116f696cc8b Mon Sep 17 00:00:00 2001 From: Avdhesh Kumar Date: Mon, 28 Aug 2023 18:10:05 +0530 Subject: [PATCH 009/143] Fix build --- pkg/pipeline/PipelineBuilder.go | 16 ++++++++++++---- wire_gen.go | 2 +- 2 files changed, 13 insertions(+), 5 deletions(-) diff --git a/pkg/pipeline/PipelineBuilder.go b/pkg/pipeline/PipelineBuilder.go index 48a28faeb7..b19406f8b0 100644 --- a/pkg/pipeline/PipelineBuilder.go +++ b/pkg/pipeline/PipelineBuilder.go @@ -27,6 +27,7 @@ import ( "github.com/devtron-labs/devtron/internal/sql/repository/appStatus" "github.com/devtron-labs/devtron/internal/sql/repository/helper" "github.com/devtron-labs/devtron/internal/sql/repository/security" + "github.com/devtron-labs/devtron/pkg" appGroup2 "github.com/devtron-labs/devtron/pkg/appGroup" "github.com/devtron-labs/devtron/pkg/chart" chartRepoRepository "github.com/devtron-labs/devtron/pkg/chartRepo/repository" @@ -231,6 +232,7 @@ type PipelineBuilderImpl struct { attributesRepository repository.AttributesRepository securityConfig *SecurityConfig imageTaggingService ImageTaggingService + customTagService pkg.CustomTagService } func NewPipelineBuilderImpl(logger *zap.SugaredLogger, @@ -285,7 +287,8 @@ func NewPipelineBuilderImpl(logger *zap.SugaredLogger, chartDeploymentService util.ChartDeploymentService, K8sUtil *util4.K8sUtil, attributesRepository repository.AttributesRepository, - imageTaggingService ImageTaggingService) *PipelineBuilderImpl { + imageTaggingService ImageTaggingService, + customTagService pkg.CustomTagService) *PipelineBuilderImpl { securityConfig := &SecurityConfig{} err := env.Parse(securityConfig) if err != nil { @@ -354,6 +357,7 @@ func NewPipelineBuilderImpl(logger *zap.SugaredLogger, attributesRepository: attributesRepository, securityConfig: securityConfig, imageTaggingService: imageTaggingService, + customTagService: customTagService, } } @@ -4151,10 +4155,14 @@ func (impl *PipelineBuilderImpl) GetCiPipelineById(pipelineId int) (ciPipeline * ScanEnabled: pipeline.ScanEnabled, IsDockerConfigOverridden: pipeline.IsDockerConfigOverridden, } - if pipeline.CustomTagObject != nil { + customTag, err := impl.customTagService.GetCustomTagByEntityKeyAndValue(pkg.EntityTypeCiPipelineId, strconv.Itoa(pipeline.Id)) + if err != nil { + return nil, err + } + if customTag.Id != 0 { ciPipeline.CustomTagObject = &bean.CustomTagData{ - TagPattern: pipeline.CustomTagObject.CustomTagFormat, - CounterX: pipeline.CustomTagObject.AutoIncreasingNumber, + TagPattern: customTag.TagPattern, + CounterX: customTag.AutoIncreasingNumber, } } ciEnvMapping, err := impl.ciPipelineRepository.FindCiEnvMappingByCiPipelineId(pipelineId) diff --git a/wire_gen.go b/wire_gen.go index 841ea0eae4..39eb842f5a 100644 --- a/wire_gen.go +++ b/wire_gen.go @@ -450,7 +450,7 @@ func InitializeApp() (*App, error) { chartDeploymentServiceImpl := util.NewChartDeploymentServiceImpl(sugaredLogger, repositoryServiceClientImpl) imageTaggingRepositoryImpl := repository11.NewImageTaggingRepositoryImpl(db) imageTaggingServiceImpl := pipeline.NewImageTaggingServiceImpl(imageTaggingRepositoryImpl, ciPipelineRepositoryImpl, pipelineRepositoryImpl, environmentRepositoryImpl, sugaredLogger) - pipelineBuilderImpl := pipeline.NewPipelineBuilderImpl(sugaredLogger, ciCdPipelineOrchestratorImpl, dockerArtifactStoreRepositoryImpl, materialRepositoryImpl, appRepositoryImpl, pipelineRepositoryImpl, propertiesConfigServiceImpl, ciTemplateRepositoryImpl, ciPipelineRepositoryImpl, applicationServiceClientImpl, chartRepositoryImpl, ciArtifactRepositoryImpl, ecrConfig, envConfigOverrideRepositoryImpl, environmentRepositoryImpl, clusterRepositoryImpl, pipelineConfigRepositoryImpl, utilMergeUtil, appWorkflowRepositoryImpl, ciConfig, cdWorkflowRepositoryImpl, appServiceImpl, imageScanResultRepositoryImpl, argoK8sClientImpl, gitFactory, attributesServiceImpl, acdAuthConfig, gitOpsConfigRepositoryImpl, pipelineStrategyHistoryServiceImpl, prePostCiScriptHistoryServiceImpl, prePostCdScriptHistoryServiceImpl, deploymentTemplateHistoryServiceImpl, appLevelMetricsRepositoryImpl, pipelineStageServiceImpl, chartRefRepositoryImpl, chartTemplateServiceImpl, chartServiceImpl, helmAppServiceImpl, deploymentGroupRepositoryImpl, ciPipelineMaterialRepositoryImpl, userServiceImpl, ciTemplateServiceImpl, ciTemplateOverrideRepositoryImpl, gitMaterialHistoryServiceImpl, ciTemplateHistoryServiceImpl, ciPipelineHistoryServiceImpl, globalStrategyMetadataRepositoryImpl, globalStrategyMetadataChartRefMappingRepositoryImpl, pipelineDeploymentServiceTypeConfig, appStatusRepositoryImpl, workflowDagExecutorImpl, enforcerUtilImpl, argoUserServiceImpl, ciWorkflowRepositoryImpl, appGroupServiceImpl, chartDeploymentServiceImpl, k8sUtil, attributesRepositoryImpl, imageTaggingServiceImpl) + pipelineBuilderImpl := pipeline.NewPipelineBuilderImpl(sugaredLogger, ciCdPipelineOrchestratorImpl, dockerArtifactStoreRepositoryImpl, materialRepositoryImpl, appRepositoryImpl, pipelineRepositoryImpl, propertiesConfigServiceImpl, ciTemplateRepositoryImpl, ciPipelineRepositoryImpl, applicationServiceClientImpl, chartRepositoryImpl, ciArtifactRepositoryImpl, ecrConfig, envConfigOverrideRepositoryImpl, environmentRepositoryImpl, clusterRepositoryImpl, pipelineConfigRepositoryImpl, utilMergeUtil, appWorkflowRepositoryImpl, ciConfig, cdWorkflowRepositoryImpl, appServiceImpl, imageScanResultRepositoryImpl, argoK8sClientImpl, gitFactory, attributesServiceImpl, acdAuthConfig, gitOpsConfigRepositoryImpl, pipelineStrategyHistoryServiceImpl, prePostCiScriptHistoryServiceImpl, prePostCdScriptHistoryServiceImpl, deploymentTemplateHistoryServiceImpl, appLevelMetricsRepositoryImpl, pipelineStageServiceImpl, chartRefRepositoryImpl, chartTemplateServiceImpl, chartServiceImpl, helmAppServiceImpl, deploymentGroupRepositoryImpl, ciPipelineMaterialRepositoryImpl, userServiceImpl, ciTemplateServiceImpl, ciTemplateOverrideRepositoryImpl, gitMaterialHistoryServiceImpl, ciTemplateHistoryServiceImpl, ciPipelineHistoryServiceImpl, globalStrategyMetadataRepositoryImpl, globalStrategyMetadataChartRefMappingRepositoryImpl, pipelineDeploymentServiceTypeConfig, appStatusRepositoryImpl, workflowDagExecutorImpl, enforcerUtilImpl, argoUserServiceImpl, ciWorkflowRepositoryImpl, appGroupServiceImpl, chartDeploymentServiceImpl, k8sUtil, attributesRepositoryImpl, imageTaggingServiceImpl, customTagServiceImpl) dbMigrationServiceImpl := pipeline.NewDbMogrationService(sugaredLogger, dbMigrationConfigRepositoryImpl) workflowServiceImpl, err := pipeline.NewWorkflowServiceImpl(sugaredLogger, ciConfig, globalCMCSServiceImpl, appServiceImpl, configMapRepositoryImpl, k8sUtil, k8sCommonServiceImpl) if err != nil { From bd6b60316e99b27f9a86dc08e113e89c48ab55d9 Mon Sep 17 00:00:00 2001 From: Avdhesh Kumar Date: Mon, 28 Aug 2023 23:21:28 +0530 Subject: [PATCH 010/143] fix get query --- internal/sql/repository/CustomTagRepository.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/internal/sql/repository/CustomTagRepository.go b/internal/sql/repository/CustomTagRepository.go index 1f2b66df7c..f0daa1e4d9 100644 --- a/internal/sql/repository/CustomTagRepository.go +++ b/internal/sql/repository/CustomTagRepository.go @@ -64,7 +64,7 @@ func (impl *ImageTagRepositoryImpl) DeleteByEntityKeyAndValue(entityKey int, ent func (impl *ImageTagRepositoryImpl) FetchCustomTagData(entityType int, entityValue string) (*CustomTag, error) { var customTagData CustomTag err := impl.dbConnection.Model(&customTagData). - Where("entity_type = ?", entityType). + Where("entity_key = ?", entityType). Where("entity_value = ?", entityValue).Select() return &customTagData, err } From f97fac5e01787549ae1a9935dfaf0bb6921c1dbf Mon Sep 17 00:00:00 2001 From: Avdhesh Kumar Date: Mon, 28 Aug 2023 23:57:04 +0530 Subject: [PATCH 011/143] fix error handling --- pkg/CustomTagService.go | 2 +- pkg/pipeline/CiHandler.go | 2 +- pkg/pipeline/CiService.go | 2 +- pkg/pipeline/PipelineBuilder.go | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/pkg/CustomTagService.go b/pkg/CustomTagService.go index 3923926163..985c2686fc 100644 --- a/pkg/CustomTagService.go +++ b/pkg/CustomTagService.go @@ -51,7 +51,7 @@ func (impl *CustomTagServiceImpl) CreateOrUpdateCustomTag(tag *bean.CustomTag) e Metadata: tag.Metadata, } oldTagObject, err := impl.customTagRepository.FetchCustomTagData(customTagData.EntityKey, customTagData.EntityValue) - if err != nil { + if err != nil && err != pg.ErrNoRows { return err } if oldTagObject.Id == 0 { diff --git a/pkg/pipeline/CiHandler.go b/pkg/pipeline/CiHandler.go index 6a6fba89ce..a60cfc1af1 100644 --- a/pkg/pipeline/CiHandler.go +++ b/pkg/pipeline/CiHandler.go @@ -507,7 +507,7 @@ func (impl *CiHandlerImpl) GetBuildHistory(pipelineId int, appId int, offset int } if w.Message == pkg.ImageTagUnavailableMessage { customTag, err := impl.customTagService.GetCustomTagByEntityKeyAndValue(pkg.EntityTypeCiPipelineId, strconv.Itoa(w.CiPipelineId)) - if err != nil { + if err != nil && err != pg.ErrNoRows { return nil, err } wfResponse.CustomTag = &bean2.CustomTagErrorResponse{ diff --git a/pkg/pipeline/CiService.go b/pkg/pipeline/CiService.go index 266287f9ba..8688a3a981 100644 --- a/pkg/pipeline/CiService.go +++ b/pkg/pipeline/CiService.go @@ -457,7 +457,7 @@ func (impl *CiServiceImpl) buildWfRequestForCiPipeline(pipeline *pipelineConfig. var dockerImageTag string customTag, err := impl.customTagService.GetCustomTagByEntityKeyAndValue(pkg.EntityTypeCiPipelineId, strconv.Itoa(pipeline.Id)) - if err != nil { + if err != nil && err != pg.ErrNoRows { return nil, err } if customTag.Id == 0 { diff --git a/pkg/pipeline/PipelineBuilder.go b/pkg/pipeline/PipelineBuilder.go index b19406f8b0..200bec80ab 100644 --- a/pkg/pipeline/PipelineBuilder.go +++ b/pkg/pipeline/PipelineBuilder.go @@ -4156,7 +4156,7 @@ func (impl *PipelineBuilderImpl) GetCiPipelineById(pipelineId int) (ciPipeline * IsDockerConfigOverridden: pipeline.IsDockerConfigOverridden, } customTag, err := impl.customTagService.GetCustomTagByEntityKeyAndValue(pkg.EntityTypeCiPipelineId, strconv.Itoa(pipeline.Id)) - if err != nil { + if err != nil && err != pg.ErrNoRows { return nil, err } if customTag.Id != 0 { From 9ebfbb5f852a8ea28430efaf44e248d0292f0e3d Mon Sep 17 00:00:00 2001 From: Avdhesh Kumar Date: Tue, 29 Aug 2023 00:15:09 +0530 Subject: [PATCH 012/143] fix iota value --- pkg/CustomTagService.go | 3 ++- pkg/pipeline/CiHandler.go | 1 + 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/pkg/CustomTagService.go b/pkg/CustomTagService.go index 985c2686fc..9ef69e49cb 100644 --- a/pkg/CustomTagService.go +++ b/pkg/CustomTagService.go @@ -11,7 +11,8 @@ import ( ) const ( - EntityTypeCiPipelineId = iota + EntityNull = iota + EntityTypeCiPipelineId ) const ( diff --git a/pkg/pipeline/CiHandler.go b/pkg/pipeline/CiHandler.go index a60cfc1af1..70e0bf0aff 100644 --- a/pkg/pipeline/CiHandler.go +++ b/pkg/pipeline/CiHandler.go @@ -508,6 +508,7 @@ func (impl *CiHandlerImpl) GetBuildHistory(pipelineId int, appId int, offset int if w.Message == pkg.ImageTagUnavailableMessage { customTag, err := impl.customTagService.GetCustomTagByEntityKeyAndValue(pkg.EntityTypeCiPipelineId, strconv.Itoa(w.CiPipelineId)) if err != nil && err != pg.ErrNoRows { + //err == pg.ErrNoRows should never happen return nil, err } wfResponse.CustomTag = &bean2.CustomTagErrorResponse{ From 0818862c3e3f174a4d4fdbf8f43b480016b393e9 Mon Sep 17 00:00:00 2001 From: Avdhesh Kumar Date: Tue, 29 Aug 2023 00:53:16 +0530 Subject: [PATCH 013/143] Fix errors --- internal/sql/repository/CustomTagRepository.go | 6 +++--- pkg/CustomTagService.go | 4 ++-- pkg/pipeline/CiService.go | 2 +- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/internal/sql/repository/CustomTagRepository.go b/internal/sql/repository/CustomTagRepository.go index f0daa1e4d9..fe51d30bf9 100644 --- a/internal/sql/repository/CustomTagRepository.go +++ b/internal/sql/repository/CustomTagRepository.go @@ -29,7 +29,7 @@ type ImageTagRepository interface { FetchCustomTagData(entityType int, entityValue string) (*CustomTag, error) IncrementAndFetchByEntityKeyAndValue(tx *pg.Tx, entityKey int, entityValue string) (*CustomTag, error) FindByImagePath(tx *pg.Tx, path string) ([]*ImagePathReservation, error) - InsertImagePath(tx *pg.Tx, reservation ImagePathReservation) error + InsertImagePath(tx *pg.Tx, reservation *ImagePathReservation) error UpdateImageTag(customTag *CustomTag) error DeleteByEntityKeyAndValue(entityKey int, entityValue string) error } @@ -71,7 +71,7 @@ func (impl *ImageTagRepositoryImpl) FetchCustomTagData(entityType int, entityVal func (impl *ImageTagRepositoryImpl) IncrementAndFetchByEntityKeyAndValue(tx *pg.Tx, entityKey int, entityValue string) (*CustomTag, error) { var customTag CustomTag - query := `update custom_tag set auto_increasing_number=auto_increasing_number+1 where entity_key=? and entity_value=? returning id, custom_tag_format, auto_increasing_number, metadata, entity_key, entity_value` + query := `update custom_tag set auto_increasing_number=auto_increasing_number+1 where entity_key=? and entity_value=? returning id, tag_pattern, auto_increasing_number, entity_key, entity_value` _, err := tx.Query(&customTag, query, entityKey, entityValue) return &customTag, err } @@ -84,6 +84,6 @@ func (impl *ImageTagRepositoryImpl) FindByImagePath(tx *pg.Tx, path string) ([]* return imagePaths, err } -func (impl *ImageTagRepositoryImpl) InsertImagePath(tx *pg.Tx, reservation ImagePathReservation) error { +func (impl *ImageTagRepositoryImpl) InsertImagePath(tx *pg.Tx, reservation *ImagePathReservation) error { return tx.Insert(reservation) } diff --git a/pkg/CustomTagService.go b/pkg/CustomTagService.go index 9ef69e49cb..50570f3a8f 100644 --- a/pkg/CustomTagService.go +++ b/pkg/CustomTagService.go @@ -94,7 +94,7 @@ func (impl *CustomTagServiceImpl) GenerateImagePath(entityKey int, entityValue s if len(imagePathReservations) > 0 { return nil, ErrImagePathInUse } - imagePathReservation := repository.ImagePathReservation{ + imagePathReservation := &repository.ImagePathReservation{ ImagePath: imagePath, CustomTagId: customTagData.Id, } @@ -106,7 +106,7 @@ func (impl *CustomTagServiceImpl) GenerateImagePath(entityKey int, entityValue s if err != nil { return nil, err } - return &imagePathReservation, nil + return imagePathReservation, nil } func validateAndConstructTag(customTagData *repository.CustomTag) (string, error) { diff --git a/pkg/pipeline/CiService.go b/pkg/pipeline/CiService.go index 8688a3a981..05a99a480b 100644 --- a/pkg/pipeline/CiService.go +++ b/pkg/pipeline/CiService.go @@ -460,7 +460,7 @@ func (impl *CiServiceImpl) buildWfRequestForCiPipeline(pipeline *pipelineConfig. if err != nil && err != pg.ErrNoRows { return nil, err } - if customTag.Id == 0 { + if customTag.Id != 0 { imagePathReservation, err := impl.customTagService.GenerateImagePath(pkg.EntityTypeCiPipelineId, strconv.Itoa(pipeline.Id), pipeline.CiTemplate.DockerRegistry.RegistryURL, pipeline.CiTemplate.DockerRepository) if err != nil { if errors.Is(err, pkg.ErrImagePathInUse) { From ef9af2c6802eba8b39fde35131b2df4f05198c2e Mon Sep 17 00:00:00 2001 From: Avdhesh Kumar Date: Tue, 29 Aug 2023 09:04:40 +0530 Subject: [PATCH 014/143] Return error when image path conflicts --- pkg/pipeline/CiService.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/pkg/pipeline/CiService.go b/pkg/pipeline/CiService.go index 05a99a480b..0f6bd20e38 100644 --- a/pkg/pipeline/CiService.go +++ b/pkg/pipeline/CiService.go @@ -466,7 +466,8 @@ func (impl *CiServiceImpl) buildWfRequestForCiPipeline(pipeline *pipelineConfig. if errors.Is(err, pkg.ErrImagePathInUse) { savedWf.Status = pipelineConfig.WorkflowFailed savedWf.Message = pkg.ImageTagUnavailableMessage - return nil, impl.ciWorkflowRepository.UpdateWorkFlow(savedWf) + impl.ciWorkflowRepository.UpdateWorkFlow(savedWf) + return nil, err } return nil, err } From 7c672de96650d6a38d0b3ce580dd9b9aad7acde6 Mon Sep 17 00:00:00 2001 From: Avdhesh Kumar Date: Tue, 29 Aug 2023 09:12:03 +0530 Subject: [PATCH 015/143] Handle errors --- internal/sql/repository/CustomTagRepository.go | 7 +++++++ pkg/CustomTagService.go | 5 +++++ pkg/pipeline/CiHandler.go | 4 ++++ 3 files changed, 16 insertions(+) diff --git a/internal/sql/repository/CustomTagRepository.go b/internal/sql/repository/CustomTagRepository.go index fe51d30bf9..f582b05c15 100644 --- a/internal/sql/repository/CustomTagRepository.go +++ b/internal/sql/repository/CustomTagRepository.go @@ -32,6 +32,7 @@ type ImageTagRepository interface { InsertImagePath(tx *pg.Tx, reservation *ImagePathReservation) error UpdateImageTag(customTag *CustomTag) error DeleteByEntityKeyAndValue(entityKey int, entityValue string) error + DeactivateImagePathReservation(id int) error } type ImageTagRepositoryImpl struct { @@ -61,6 +62,12 @@ func (impl *ImageTagRepositoryImpl) DeleteByEntityKeyAndValue(entityKey int, ent return err } +func (impl *ImageTagRepositoryImpl) DeactivateImagePathReservation(id int) error { + query := `update image_path_reservation set active=? where id=?` + _, err := impl.dbConnection.Exec(query, false, id) + return err +} + func (impl *ImageTagRepositoryImpl) FetchCustomTagData(entityType int, entityValue string) (*CustomTag, error) { var customTagData CustomTag err := impl.dbConnection.Model(&customTagData). diff --git a/pkg/CustomTagService.go b/pkg/CustomTagService.go index 50570f3a8f..7395c37d1c 100644 --- a/pkg/CustomTagService.go +++ b/pkg/CustomTagService.go @@ -29,6 +29,7 @@ type CustomTagService interface { GetCustomTagByEntityKeyAndValue(entityKey int, entityValue string) (*repository.CustomTag, error) GenerateImagePath(entityKey int, entityValue string, dockerRegistryURL string, dockerRepo string) (*repository.ImagePathReservation, error) DeleteCustomTagIfExists(tag bean.CustomTag) error + DeactivateImagePathReservation(id int) error } type CustomTagServiceImpl struct { @@ -43,6 +44,10 @@ func NewCustomTagService(logger *zap.SugaredLogger, customTagRepo repository.Ima } } +func (impl *CustomTagServiceImpl) DeactivateImagePathReservation(id int) error { + return impl.customTagRepository.DeactivateImagePathReservation(id) +} + func (impl *CustomTagServiceImpl) CreateOrUpdateCustomTag(tag *bean.CustomTag) error { customTagData := repository.CustomTag{ EntityKey: tag.EntityKey, diff --git a/pkg/pipeline/CiHandler.go b/pkg/pipeline/CiHandler.go index 70e0bf0aff..77b607aef2 100644 --- a/pkg/pipeline/CiHandler.go +++ b/pkg/pipeline/CiHandler.go @@ -511,6 +511,10 @@ func (impl *CiHandlerImpl) GetBuildHistory(pipelineId int, appId int, offset int //err == pg.ErrNoRows should never happen return nil, err } + err = impl.customTagService.DeactivateImagePathReservation(w.ImagePathReservationId) + if err != nil { + return nil, err + } wfResponse.CustomTag = &bean2.CustomTagErrorResponse{ TagPattern: customTag.TagPattern, AutoIncreasingNumber: customTag.AutoIncreasingNumber, From 98cd35055ce877ef29f82cf15986bffedfaea689 Mon Sep 17 00:00:00 2001 From: Avdhesh Kumar Date: Tue, 29 Aug 2023 09:27:35 +0530 Subject: [PATCH 016/143] Handle failed ci_workflow --- pkg/pipeline/CiHandler.go | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/pkg/pipeline/CiHandler.go b/pkg/pipeline/CiHandler.go index 77b607aef2..aa2ce19619 100644 --- a/pkg/pipeline/CiHandler.go +++ b/pkg/pipeline/CiHandler.go @@ -1502,9 +1502,12 @@ func (impl *CiHandlerImpl) UpdateCiWorkflowStatusFailure(timeoutForFailureCiBuil err := impl.ciWorkflowRepository.UpdateWorkFlow(ciWorkflow) if err != nil { impl.Logger.Errorw("unable to update ci workflow, its eligible to mark failed", "err", err) - continue // skip this and process for next ci workflow } + err = impl.customTagService.DeactivateImagePathReservation(ciWorkflow.ImagePathReservationId) + if err != nil { + impl.Logger.Errorw("unable to update ci workflow, its eligible to mark failed", "err", err) + } } } return nil From 0b8efba5ff8ec8d6b9b6889d8ef95ee7ca8a0190 Mon Sep 17 00:00:00 2001 From: Avdhesh Kumar Date: Tue, 29 Aug 2023 09:37:33 +0530 Subject: [PATCH 017/143] Handle failed ci_workflow --- pkg/pipeline/WebhookService.go | 11 +++++++++++ wire_gen.go | 2 +- 2 files changed, 12 insertions(+), 1 deletion(-) diff --git a/pkg/pipeline/WebhookService.go b/pkg/pipeline/WebhookService.go index 0fb2c3ffe7..0a39eae07c 100644 --- a/pkg/pipeline/WebhookService.go +++ b/pkg/pipeline/WebhookService.go @@ -27,6 +27,7 @@ import ( "github.com/devtron-labs/devtron/internal/sql/repository" "github.com/devtron-labs/devtron/internal/sql/repository/pipelineConfig" util2 "github.com/devtron-labs/devtron/internal/util" + "github.com/devtron-labs/devtron/pkg" "github.com/devtron-labs/devtron/pkg/app" "github.com/devtron-labs/devtron/pkg/sql" "github.com/devtron-labs/devtron/util/event" @@ -66,6 +67,7 @@ type WebhookServiceImpl struct { eventFactory client.EventFactory workflowDagExecutor WorkflowDagExecutor ciHandler CiHandler + customTagService pkg.CustomTagService } func NewWebhookServiceImpl( @@ -75,6 +77,7 @@ func NewWebhookServiceImpl( appService app.AppService, eventClient client.EventClient, eventFactory client.EventFactory, ciWorkflowRepository pipelineConfig.CiWorkflowRepository, + customTagService pkg.CustomTagService, workflowDagExecutor WorkflowDagExecutor, ciHandler CiHandler) *WebhookServiceImpl { return &WebhookServiceImpl{ ciArtifactRepository: ciArtifactRepository, @@ -86,6 +89,7 @@ func NewWebhookServiceImpl( ciWorkflowRepository: ciWorkflowRepository, workflowDagExecutor: workflowDagExecutor, ciHandler: ciHandler, + customTagService: customTagService, } } @@ -131,6 +135,13 @@ func (impl WebhookServiceImpl) HandleCiStepFailedEvent(ciPipelineId int, request return err } + go func() { + err := impl.customTagService.DeactivateImagePathReservation(savedWorkflow.ImagePathReservationId) + if err != nil { + impl.logger.Errorw("unable to deactivate impage_path_reservation ", err) + } + }() + go impl.WriteCIStepFailedEvent(pipeline, request, savedWorkflow) return nil } diff --git a/wire_gen.go b/wire_gen.go index 39eb842f5a..b4f16f5c63 100644 --- a/wire_gen.go +++ b/wire_gen.go @@ -517,7 +517,7 @@ func InitializeApp() (*App, error) { gitWebhookRepositoryImpl := repository.NewGitWebhookRepositoryImpl(db) gitWebhookServiceImpl := git.NewGitWebhookServiceImpl(sugaredLogger, ciHandlerImpl, gitWebhookRepositoryImpl) gitWebhookRestHandlerImpl := restHandler.NewGitWebhookRestHandlerImpl(sugaredLogger, gitWebhookServiceImpl) - webhookServiceImpl := pipeline.NewWebhookServiceImpl(ciArtifactRepositoryImpl, sugaredLogger, ciPipelineRepositoryImpl, appServiceImpl, eventRESTClientImpl, eventSimpleFactoryImpl, ciWorkflowRepositoryImpl, workflowDagExecutorImpl, ciHandlerImpl) + webhookServiceImpl := pipeline.NewWebhookServiceImpl(ciArtifactRepositoryImpl, sugaredLogger, ciPipelineRepositoryImpl, appServiceImpl, eventRESTClientImpl, eventSimpleFactoryImpl, ciWorkflowRepositoryImpl, customTagServiceImpl, workflowDagExecutorImpl, ciHandlerImpl) ciEventConfig, err := pubsub.GetCiEventConfig() if err != nil { return nil, err From 47729b119fd4abaee182f425a69279912984bf0b Mon Sep 17 00:00:00 2001 From: Avdhesh Kumar Date: Tue, 29 Aug 2023 09:38:30 +0530 Subject: [PATCH 018/143] Remove unnecessary image path deactivation --- pkg/pipeline/CiHandler.go | 4 ---- 1 file changed, 4 deletions(-) diff --git a/pkg/pipeline/CiHandler.go b/pkg/pipeline/CiHandler.go index aa2ce19619..52f40b323d 100644 --- a/pkg/pipeline/CiHandler.go +++ b/pkg/pipeline/CiHandler.go @@ -511,10 +511,6 @@ func (impl *CiHandlerImpl) GetBuildHistory(pipelineId int, appId int, offset int //err == pg.ErrNoRows should never happen return nil, err } - err = impl.customTagService.DeactivateImagePathReservation(w.ImagePathReservationId) - if err != nil { - return nil, err - } wfResponse.CustomTag = &bean2.CustomTagErrorResponse{ TagPattern: customTag.TagPattern, AutoIncreasingNumber: customTag.AutoIncreasingNumber, From ec8510f5934b13e6fb301a85a20a346d8e18849a Mon Sep 17 00:00:00 2001 From: Avdhesh Kumar Date: Tue, 29 Aug 2023 09:51:58 +0530 Subject: [PATCH 019/143] Fix migration --- scripts/sql/166_custom_image_tag.down.sql | 5 --- scripts/sql/166_custom_image_tag.up.sql | 36 +++++++++++++------ .../167_custom_image_tag_generalize.down.sql | 0 .../167_custom_image_tag_generalize.up.sql | 11 ------ scripts/sql/168_image_path.down.sql | 0 scripts/sql/168_image_path.up.sql | 12 ------- 6 files changed, 26 insertions(+), 38 deletions(-) delete mode 100644 scripts/sql/167_custom_image_tag_generalize.down.sql delete mode 100644 scripts/sql/167_custom_image_tag_generalize.up.sql delete mode 100644 scripts/sql/168_image_path.down.sql delete mode 100644 scripts/sql/168_image_path.up.sql diff --git a/scripts/sql/166_custom_image_tag.down.sql b/scripts/sql/166_custom_image_tag.down.sql index 38919016af..5ce4650017 100644 --- a/scripts/sql/166_custom_image_tag.down.sql +++ b/scripts/sql/166_custom_image_tag.down.sql @@ -1,6 +1 @@ -ALTER TABLE ci_workflow - DROP COLUMN IF EXISTS target_image_location; - -DROP INDEX IF EXISTS target_image_path; - DROP TABLE IF EXISTS custom_tag; diff --git a/scripts/sql/166_custom_image_tag.up.sql b/scripts/sql/166_custom_image_tag.up.sql index aa099e7734..2f0daf4752 100644 --- a/scripts/sql/166_custom_image_tag.up.sql +++ b/scripts/sql/166_custom_image_tag.up.sql @@ -1,15 +1,31 @@ +CREATE TABLE "public"."custom_tag" +( + id serial PRIMARY KEY, + custom_tag_format text, + auto_increasing_number int DEFAULT 0, + entity_key int, + entity_value text, + metadata jsonb +); -ALTER TABLE ci_workflow - ADD COLUMN IF NOT EXISTS target_image_location text default null; -CREATE INDEX IF NOT EXISTS target_image_path ON ci_workflow (target_image_location); +CREATE INDEX IF NOT EXISTS entity_key_value ON custom_tag (entity_key, entity_value); +ALTER TABLE custom_tag + ADD CONSTRAINT unique_entity_key_entity_value UNIQUE (entity_key, entity_value) -CREATE TABLE "public"."custom_tag" + +CREATE TABLE IF not exists "public"."image_path_reservation" ( - id serial PRIMARY KEY, - ci_pipeline_id int NOT NULL UNIQUE, - custom_tag_format text, - auto_increasing_number int DEFAULT 0, + id serial PRIMARY KEY, + custom_tag_id int, + image_path text, + active boolean default true, + FOREIGN KEY (custom_tag_id) REFERENCES custom_tag (id) +); + +CREATE INDEX IF NOT EXISTS image_path_index ON image_path_reservation (image_path); - FOREIGN KEY (ci_pipeline_id) REFERENCES ci_pipeline (id) -); \ No newline at end of file +ALTER TABLE ci_workflow + ADD column IF NOT EXISTS image_path_reservation_id int; +ALTER TABLE ci_workflow + ADD CONSTRAINT fk_image_path_reservation_id FOREIGN KEY (image_path_reservation_id) REFERENCES image_path (id) \ No newline at end of file diff --git a/scripts/sql/167_custom_image_tag_generalize.down.sql b/scripts/sql/167_custom_image_tag_generalize.down.sql deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/scripts/sql/167_custom_image_tag_generalize.up.sql b/scripts/sql/167_custom_image_tag_generalize.up.sql deleted file mode 100644 index 4ba7720556..0000000000 --- a/scripts/sql/167_custom_image_tag_generalize.up.sql +++ /dev/null @@ -1,11 +0,0 @@ -ALTER TABLE custom_tag - ADD COLUMN entity_key int; -ALTER TABLE custom_tag - ADD COLUMN entity_value varchar(100); - -CREATE INDEX IF NOT EXISTS entity_key_value ON custom_tag (entity_key, entity_value); - -ALTER TABLE custom_tag - ADD CONSTRAINT constraint_name UNIQUE (entity_key, entity_value) - - diff --git a/scripts/sql/168_image_path.down.sql b/scripts/sql/168_image_path.down.sql deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/scripts/sql/168_image_path.up.sql b/scripts/sql/168_image_path.up.sql deleted file mode 100644 index 18a6c769dd..0000000000 --- a/scripts/sql/168_image_path.up.sql +++ /dev/null @@ -1,12 +0,0 @@ -CREATE TABLE "public"."image_path_reservation"( - id serial PRIMARY KEY, - custom_tag_id int, - image_path text, - active boolean default true, - FOREIGN KEY (custom_tag_id) REFERENCES custom_tag (id) -); - -CREATE INDEX IF NOT EXISTS image_path_index ON image_path_reservation (image_path); - -ALTER TABLE ci_workflow - ADD column IF NOT EXISTS image_path_reservation_id int; \ No newline at end of file From bf99dce012a6af4fae43afb487f04a934576035d Mon Sep 17 00:00:00 2001 From: Avdhesh Kumar Date: Tue, 29 Aug 2023 11:16:08 +0530 Subject: [PATCH 020/143] Refactor --- pkg/pipeline/CiService.go | 11 ++++++++++- scripts/sql/166_custom_image_tag.down.sql | 14 ++++++++++++++ scripts/sql/166_custom_image_tag.up.sql | 4 ++-- 3 files changed, 26 insertions(+), 3 deletions(-) diff --git a/pkg/pipeline/CiService.go b/pkg/pipeline/CiService.go index 0f6bd20e38..d752bded73 100644 --- a/pkg/pipeline/CiService.go +++ b/pkg/pipeline/CiService.go @@ -466,7 +466,16 @@ func (impl *CiServiceImpl) buildWfRequestForCiPipeline(pipeline *pipelineConfig. if errors.Is(err, pkg.ErrImagePathInUse) { savedWf.Status = pipelineConfig.WorkflowFailed savedWf.Message = pkg.ImageTagUnavailableMessage - impl.ciWorkflowRepository.UpdateWorkFlow(savedWf) + go func() { + err := impl.ciWorkflowRepository.UpdateWorkFlow(savedWf) + if err != nil { + impl.Logger.Errorw("could not save workflow, after failing due to conflicting image tag") + } + err = impl.customTagService.DeactivateImagePathReservation(imagePathReservation.Id) + if err != nil { + impl.Logger.Errorw("could not clear the image path reservation") + } + }() return nil, err } return nil, err diff --git a/scripts/sql/166_custom_image_tag.down.sql b/scripts/sql/166_custom_image_tag.down.sql index 5ce4650017..3033e71504 100644 --- a/scripts/sql/166_custom_image_tag.down.sql +++ b/scripts/sql/166_custom_image_tag.down.sql @@ -1 +1,15 @@ DROP TABLE IF EXISTS custom_tag; + +DROP INDEX IF EXISTS entity_key_value; + +ALTER TABLE custom_tag + DROP CONSTRAINT unique_entity_key_entity_value; + +DROP TABLE IF EXISTS image_path_reservation; + +DROP INDEX IF EXISTS image_path_index; + +ALTER TABLE ci_workflow + DROP column IF EXISTS image_path_reservation_id; +ALTER TABLE ci_workflow + DROP CONSTRAINT fk_image_path_reservation_id; \ No newline at end of file diff --git a/scripts/sql/166_custom_image_tag.up.sql b/scripts/sql/166_custom_image_tag.up.sql index 2f0daf4752..931e9b92da 100644 --- a/scripts/sql/166_custom_image_tag.up.sql +++ b/scripts/sql/166_custom_image_tag.up.sql @@ -11,7 +11,7 @@ CREATE TABLE "public"."custom_tag" CREATE INDEX IF NOT EXISTS entity_key_value ON custom_tag (entity_key, entity_value); ALTER TABLE custom_tag - ADD CONSTRAINT unique_entity_key_entity_value UNIQUE (entity_key, entity_value) + ADD CONSTRAINT unique_entity_key_entity_value UNIQUE (entity_key, entity_value); CREATE TABLE IF not exists "public"."image_path_reservation" @@ -28,4 +28,4 @@ CREATE INDEX IF NOT EXISTS image_path_index ON image_path_reservation (image_pat ALTER TABLE ci_workflow ADD column IF NOT EXISTS image_path_reservation_id int; ALTER TABLE ci_workflow - ADD CONSTRAINT fk_image_path_reservation_id FOREIGN KEY (image_path_reservation_id) REFERENCES image_path (id) \ No newline at end of file + ADD CONSTRAINT fk_image_path_reservation_id FOREIGN KEY (image_path_reservation_id) REFERENCES image_path (id); \ No newline at end of file From 720a99d6273b3dad555398b29920cd837726f727 Mon Sep 17 00:00:00 2001 From: Avdhesh Kumar Date: Tue, 29 Aug 2023 12:23:31 +0530 Subject: [PATCH 021/143] Refactor --- internal/sql/repository/CustomTagRepository.go | 17 ++++++++++++++--- .../appWorkflow/AppWorkflowRepository.go | 1 - pkg/CustomTagService.go | 7 +++++++ pkg/pipeline/CiHandler.go | 11 ++++++++++- pkg/pipeline/CiService.go | 11 ++++++----- pkg/pipeline/PipelineBuilder.go | 2 +- scripts/sql/166_custom_image_tag.up.sql | 3 ++- wire_gen.go | 2 +- 8 files changed, 41 insertions(+), 13 deletions(-) diff --git a/internal/sql/repository/CustomTagRepository.go b/internal/sql/repository/CustomTagRepository.go index f582b05c15..ab0065ec18 100644 --- a/internal/sql/repository/CustomTagRepository.go +++ b/internal/sql/repository/CustomTagRepository.go @@ -12,6 +12,7 @@ type CustomTag struct { EntityValue string `sql:"entity_value"` TagPattern string `sql:"tag_pattern"` AutoIncreasingNumber int `sql:"auto_increasing_number"` + Active bool `sql:"active"` Metadata string `sql:"metadata"` } @@ -33,6 +34,7 @@ type ImageTagRepository interface { UpdateImageTag(customTag *CustomTag) error DeleteByEntityKeyAndValue(entityKey int, entityValue string) error DeactivateImagePathReservation(id int) error + FetchActiveCustomTagData(entityKey int, entityValue string) (*CustomTag, error) } type ImageTagRepositoryImpl struct { @@ -57,7 +59,7 @@ func (impl *ImageTagRepositoryImpl) UpdateImageTag(customTag *CustomTag) error { } func (impl *ImageTagRepositoryImpl) DeleteByEntityKeyAndValue(entityKey int, entityValue string) error { - query := `delete from table custom_tag where entity_key = ? and entity_value = ?` + query := `update custom_tag set active = false where entity_key = ? and entity_value = ?` _, err := impl.dbConnection.Exec(query, entityKey, entityValue) return err } @@ -76,10 +78,19 @@ func (impl *ImageTagRepositoryImpl) FetchCustomTagData(entityType int, entityVal return &customTagData, err } +func (impl *ImageTagRepositoryImpl) FetchActiveCustomTagData(entityType int, entityValue string) (*CustomTag, error) { + var customTagData CustomTag + err := impl.dbConnection.Model(&customTagData). + Where("entity_key = ?", entityType). + Where("entity_value = ?", entityValue). + Where("active = ?", true).Select() + return &customTagData, err +} + func (impl *ImageTagRepositoryImpl) IncrementAndFetchByEntityKeyAndValue(tx *pg.Tx, entityKey int, entityValue string) (*CustomTag, error) { var customTag CustomTag - query := `update custom_tag set auto_increasing_number=auto_increasing_number+1 where entity_key=? and entity_value=? returning id, tag_pattern, auto_increasing_number, entity_key, entity_value` - _, err := tx.Query(&customTag, query, entityKey, entityValue) + query := `update custom_tag set auto_increasing_number=auto_increasing_number+1 where entity_key=? and entity_value=? and active = ? returning id, tag_pattern, auto_increasing_number, entity_key, entity_value` + _, err := tx.Query(&customTag, query, entityKey, entityValue, true) return &customTag, err } diff --git a/internal/sql/repository/appWorkflow/AppWorkflowRepository.go b/internal/sql/repository/appWorkflow/AppWorkflowRepository.go index bebb2540e1..ab9420aa9d 100644 --- a/internal/sql/repository/appWorkflow/AppWorkflowRepository.go +++ b/internal/sql/repository/appWorkflow/AppWorkflowRepository.go @@ -263,7 +263,6 @@ func (impl AppWorkflowRepositoryImpl) FindWFAllMappingByWorkflowId(workflowId in func (impl AppWorkflowRepositoryImpl) FindWFCIMappingByCIPipelineId(ciPipelineId int) ([]*AppWorkflowMapping, error) { var appWorkflowsMapping []*AppWorkflowMapping - err := impl.dbConnection.Model(&appWorkflowsMapping). Where("component_id = ?", ciPipelineId). Where("type = ?", CIPIPELINE). diff --git a/pkg/CustomTagService.go b/pkg/CustomTagService.go index 7395c37d1c..4de4ea864f 100644 --- a/pkg/CustomTagService.go +++ b/pkg/CustomTagService.go @@ -27,6 +27,7 @@ var ( type CustomTagService interface { CreateOrUpdateCustomTag(tag *bean.CustomTag) error GetCustomTagByEntityKeyAndValue(entityKey int, entityValue string) (*repository.CustomTag, error) + GetActiveCustomTagByEntityKeyAndValue(entityKey int, entityValue string) (*repository.CustomTag, error) GenerateImagePath(entityKey int, entityValue string, dockerRegistryURL string, dockerRepo string) (*repository.ImagePathReservation, error) DeleteCustomTagIfExists(tag bean.CustomTag) error DeactivateImagePathReservation(id int) error @@ -55,6 +56,7 @@ func (impl *CustomTagServiceImpl) CreateOrUpdateCustomTag(tag *bean.CustomTag) e TagPattern: strings.ReplaceAll(tag.TagPattern, "{X}", "{x}"), AutoIncreasingNumber: tag.AutoIncreasingNumber, Metadata: tag.Metadata, + Active: true, } oldTagObject, err := impl.customTagRepository.FetchCustomTagData(customTagData.EntityKey, customTagData.EntityValue) if err != nil && err != pg.ErrNoRows { @@ -64,6 +66,7 @@ func (impl *CustomTagServiceImpl) CreateOrUpdateCustomTag(tag *bean.CustomTag) e return impl.customTagRepository.CreateImageTag(&customTagData) } else { customTagData.Id = oldTagObject.Id + customTagData.Active = true return impl.customTagRepository.UpdateImageTag(&customTagData) } } @@ -76,6 +79,10 @@ func (impl *CustomTagServiceImpl) GetCustomTagByEntityKeyAndValue(entityKey int, return impl.customTagRepository.FetchCustomTagData(entityKey, entityValue) } +func (impl *CustomTagServiceImpl) GetActiveCustomTagByEntityKeyAndValue(entityKey int, entityValue string) (*repository.CustomTag, error) { + return impl.customTagRepository.FetchActiveCustomTagData(entityKey, entityValue) +} + func (impl *CustomTagServiceImpl) GenerateImagePath(entityKey int, entityValue string, dockerRegistryURL string, dockerRepo string) (*repository.ImagePathReservation, error) { connection := impl.customTagRepository.GetConnection() tx, err := connection.Begin() diff --git a/pkg/pipeline/CiHandler.go b/pkg/pipeline/CiHandler.go index 52f40b323d..82db5b03fa 100644 --- a/pkg/pipeline/CiHandler.go +++ b/pkg/pipeline/CiHandler.go @@ -27,6 +27,7 @@ import ( blob_storage "github.com/devtron-labs/common-lib/blob-storage" bean2 "github.com/devtron-labs/devtron/api/bean" "github.com/devtron-labs/devtron/client/gitSensor" + "github.com/devtron-labs/devtron/internal/sql/repository/appWorkflow" repository2 "github.com/devtron-labs/devtron/internal/sql/repository/imageTagging" "github.com/devtron-labs/devtron/pkg" appGroup2 "github.com/devtron-labs/devtron/pkg/appGroup" @@ -106,9 +107,10 @@ type CiHandlerImpl struct { envRepository repository3.EnvironmentRepository imageTaggingService ImageTaggingService customTagService pkg.CustomTagService + appWorkflowRepository appWorkflow.AppWorkflowRepository } -func NewCiHandlerImpl(Logger *zap.SugaredLogger, ciService CiService, ciPipelineMaterialRepository pipelineConfig.CiPipelineMaterialRepository, gitSensorClient gitSensor.Client, ciWorkflowRepository pipelineConfig.CiWorkflowRepository, workflowService WorkflowService, ciLogService CiLogService, ciConfig *CiConfig, ciArtifactRepository repository.CiArtifactRepository, userService user.UserService, eventClient client.EventClient, eventFactory client.EventFactory, ciPipelineRepository pipelineConfig.CiPipelineRepository, appListingRepository repository.AppListingRepository, K8sUtil *k8s.K8sUtil, cdPipelineRepository pipelineConfig.PipelineRepository, enforcerUtil rbac.EnforcerUtil, appGroupService appGroup2.AppGroupService, envRepository repository3.EnvironmentRepository, imageTaggingService ImageTaggingService, customTagService pkg.CustomTagService) *CiHandlerImpl { +func NewCiHandlerImpl(Logger *zap.SugaredLogger, ciService CiService, ciPipelineMaterialRepository pipelineConfig.CiPipelineMaterialRepository, gitSensorClient gitSensor.Client, ciWorkflowRepository pipelineConfig.CiWorkflowRepository, workflowService WorkflowService, ciLogService CiLogService, ciConfig *CiConfig, ciArtifactRepository repository.CiArtifactRepository, userService user.UserService, eventClient client.EventClient, eventFactory client.EventFactory, ciPipelineRepository pipelineConfig.CiPipelineRepository, appListingRepository repository.AppListingRepository, K8sUtil *k8s.K8sUtil, cdPipelineRepository pipelineConfig.PipelineRepository, enforcerUtil rbac.EnforcerUtil, appGroupService appGroup2.AppGroupService, envRepository repository3.EnvironmentRepository, imageTaggingService ImageTaggingService, customTagService pkg.CustomTagService, appWorkflowRepository appWorkflow.AppWorkflowRepository) *CiHandlerImpl { return &CiHandlerImpl{ Logger: Logger, ciService: ciService, @@ -131,6 +133,7 @@ func NewCiHandlerImpl(Logger *zap.SugaredLogger, ciService CiService, ciPipeline envRepository: envRepository, imageTaggingService: imageTaggingService, customTagService: customTagService, + appWorkflowRepository: appWorkflowRepository, } } @@ -160,6 +163,7 @@ type WorkflowResponse struct { EnvironmentName string `json:"environmentName"` ImageReleaseTags []*repository2.ImageTag `json:"imageReleaseTags"` ImageComment *repository2.ImageComment `json:"imageComment"` + AppWorkflowId int `json:"appWorkflowId"` CustomTag *bean2.CustomTagErrorResponse `json:"customTag,omitempty"` } @@ -511,6 +515,11 @@ func (impl *CiHandlerImpl) GetBuildHistory(pipelineId int, appId int, offset int //err == pg.ErrNoRows should never happen return nil, err } + appWorkflows, err := impl.appWorkflowRepository.FindWFCIMappingByCIPipelineId(w.CiPipelineId) + if err != nil && err != pg.ErrNoRows { + return nil, err + } + wfResponse.AppWorkflowId = appWorkflows[0].AppWorkflowId //it is guaranteed there will always be 1 entry (in case of ci_pipeline_id) wfResponse.CustomTag = &bean2.CustomTagErrorResponse{ TagPattern: customTag.TagPattern, AutoIncreasingNumber: customTag.AutoIncreasingNumber, diff --git a/pkg/pipeline/CiService.go b/pkg/pipeline/CiService.go index d752bded73..2a0f7cd5f7 100644 --- a/pkg/pipeline/CiService.go +++ b/pkg/pipeline/CiService.go @@ -456,7 +456,7 @@ func (impl *CiServiceImpl) buildWfRequestForCiPipeline(pipeline *pipelineConfig. } var dockerImageTag string - customTag, err := impl.customTagService.GetCustomTagByEntityKeyAndValue(pkg.EntityTypeCiPipelineId, strconv.Itoa(pipeline.Id)) + customTag, err := impl.customTagService.GetActiveCustomTagByEntityKeyAndValue(pkg.EntityTypeCiPipelineId, strconv.Itoa(pipeline.Id)) if err != nil && err != pg.ErrNoRows { return nil, err } @@ -466,11 +466,12 @@ func (impl *CiServiceImpl) buildWfRequestForCiPipeline(pipeline *pipelineConfig. if errors.Is(err, pkg.ErrImagePathInUse) { savedWf.Status = pipelineConfig.WorkflowFailed savedWf.Message = pkg.ImageTagUnavailableMessage + err = impl.ciWorkflowRepository.UpdateWorkFlow(savedWf) + if err != nil { + impl.Logger.Errorw("could not save workflow, after failing due to conflicting image tag") + return nil, err + } go func() { - err := impl.ciWorkflowRepository.UpdateWorkFlow(savedWf) - if err != nil { - impl.Logger.Errorw("could not save workflow, after failing due to conflicting image tag") - } err = impl.customTagService.DeactivateImagePathReservation(imagePathReservation.Id) if err != nil { impl.Logger.Errorw("could not clear the image path reservation") diff --git a/pkg/pipeline/PipelineBuilder.go b/pkg/pipeline/PipelineBuilder.go index 200bec80ab..9968c67cf9 100644 --- a/pkg/pipeline/PipelineBuilder.go +++ b/pkg/pipeline/PipelineBuilder.go @@ -4155,7 +4155,7 @@ func (impl *PipelineBuilderImpl) GetCiPipelineById(pipelineId int) (ciPipeline * ScanEnabled: pipeline.ScanEnabled, IsDockerConfigOverridden: pipeline.IsDockerConfigOverridden, } - customTag, err := impl.customTagService.GetCustomTagByEntityKeyAndValue(pkg.EntityTypeCiPipelineId, strconv.Itoa(pipeline.Id)) + customTag, err := impl.customTagService.GetActiveCustomTagByEntityKeyAndValue(pkg.EntityTypeCiPipelineId, strconv.Itoa(pipeline.Id)) if err != nil && err != pg.ErrNoRows { return nil, err } diff --git a/scripts/sql/166_custom_image_tag.up.sql b/scripts/sql/166_custom_image_tag.up.sql index 931e9b92da..f3b7dc748b 100644 --- a/scripts/sql/166_custom_image_tag.up.sql +++ b/scripts/sql/166_custom_image_tag.up.sql @@ -2,9 +2,10 @@ CREATE TABLE "public"."custom_tag" ( id serial PRIMARY KEY, custom_tag_format text, - auto_increasing_number int DEFAULT 0, + auto_increasing_number int DEFAULT 0, entity_key int, entity_value text, + active boolean DEFAULT true, metadata jsonb ); diff --git a/wire_gen.go b/wire_gen.go index b4f16f5c63..1189a0d3f5 100644 --- a/wire_gen.go +++ b/wire_gen.go @@ -461,7 +461,7 @@ func InitializeApp() (*App, error) { if err != nil { return nil, err } - ciHandlerImpl := pipeline.NewCiHandlerImpl(sugaredLogger, ciServiceImpl, ciPipelineMaterialRepositoryImpl, clientImpl, ciWorkflowRepositoryImpl, workflowServiceImpl, ciLogServiceImpl, ciConfig, ciArtifactRepositoryImpl, userServiceImpl, eventRESTClientImpl, eventSimpleFactoryImpl, ciPipelineRepositoryImpl, appListingRepositoryImpl, k8sUtil, pipelineRepositoryImpl, enforcerUtilImpl, appGroupServiceImpl, environmentRepositoryImpl, imageTaggingServiceImpl, customTagServiceImpl) + ciHandlerImpl := pipeline.NewCiHandlerImpl(sugaredLogger, ciServiceImpl, ciPipelineMaterialRepositoryImpl, clientImpl, ciWorkflowRepositoryImpl, workflowServiceImpl, ciLogServiceImpl, ciConfig, ciArtifactRepositoryImpl, userServiceImpl, eventRESTClientImpl, eventSimpleFactoryImpl, ciPipelineRepositoryImpl, appListingRepositoryImpl, k8sUtil, pipelineRepositoryImpl, enforcerUtilImpl, appGroupServiceImpl, environmentRepositoryImpl, imageTaggingServiceImpl, customTagServiceImpl, appWorkflowRepositoryImpl) gitRegistryConfigImpl := pipeline.NewGitRegistryConfigImpl(sugaredLogger, gitProviderRepositoryImpl, clientImpl) ociRegistryConfigRepositoryImpl := repository5.NewOCIRegistryConfigRepositoryImpl(db) dockerRegistryConfigImpl := pipeline.NewDockerRegistryConfigImpl(sugaredLogger, dockerArtifactStoreRepositoryImpl, dockerRegistryIpsConfigRepositoryImpl, ociRegistryConfigRepositoryImpl) From b841f29ba94d12b221674caee0498d3019392a93 Mon Sep 17 00:00:00 2001 From: Avdhesh Kumar Date: Tue, 29 Aug 2023 12:44:06 +0530 Subject: [PATCH 022/143] Add validations --- pkg/CustomTagService.go | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/pkg/CustomTagService.go b/pkg/CustomTagService.go index 4de4ea864f..eab18f0aba 100644 --- a/pkg/CustomTagService.go +++ b/pkg/CustomTagService.go @@ -6,6 +6,7 @@ import ( "github.com/devtron-labs/devtron/internal/sql/repository" "github.com/go-pg/pg" "go.uber.org/zap" + "regexp" "strconv" "strings" ) @@ -126,6 +127,9 @@ func validateAndConstructTag(customTagData *repository.CustomTag) (string, error if err != nil { return "", err } + if customTagData.AutoIncreasingNumber < 0 { + return "", fmt.Errorf("counter {x} can not be negative") + } dockerImageTag := strings.ReplaceAll(customTagData.TagPattern, "{x}", strconv.Itoa(customTagData.AutoIncreasingNumber-1)) //-1 because number is already incremented, current value will be used next time err = validateTag(dockerImageTag) if err != nil { @@ -143,6 +147,15 @@ func validateTagPattern(customTagPattern string) error { if totalX != 1 { return fmt.Errorf("variable {x} is allowed exactly once") } + regex := "^([A-Za-z0-9{x}{X}]?)[A-Za-z0-9.-{x}{X}]*[A-Za-z0-9_{x}{X}]$" + matched, err := regexp.MatchString(regex, customTagPattern) + if err != nil { + fmt.Println("Error:", err) + return fmt.Errorf("could not match the pattern against allowed regex") + } + if !matched { + return fmt.Errorf("allowed: Alphanumeric characters, including (_) (.) (-) but cannot begin or end with (.) or (-)") + } return nil } From c955e21cf1a671001ced32e077aa419041214328 Mon Sep 17 00:00:00 2001 From: Avdhesh Kumar Date: Tue, 29 Aug 2023 14:36:11 +0530 Subject: [PATCH 023/143] Remove regex check --- pkg/CustomTagService.go | 10 ---------- 1 file changed, 10 deletions(-) diff --git a/pkg/CustomTagService.go b/pkg/CustomTagService.go index eab18f0aba..c44fa45c0b 100644 --- a/pkg/CustomTagService.go +++ b/pkg/CustomTagService.go @@ -6,7 +6,6 @@ import ( "github.com/devtron-labs/devtron/internal/sql/repository" "github.com/go-pg/pg" "go.uber.org/zap" - "regexp" "strconv" "strings" ) @@ -147,15 +146,6 @@ func validateTagPattern(customTagPattern string) error { if totalX != 1 { return fmt.Errorf("variable {x} is allowed exactly once") } - regex := "^([A-Za-z0-9{x}{X}]?)[A-Za-z0-9.-{x}{X}]*[A-Za-z0-9_{x}{X}]$" - matched, err := regexp.MatchString(regex, customTagPattern) - if err != nil { - fmt.Println("Error:", err) - return fmt.Errorf("could not match the pattern against allowed regex") - } - if !matched { - return fmt.Errorf("allowed: Alphanumeric characters, including (_) (.) (-) but cannot begin or end with (.) or (-)") - } return nil } From 3ea5834245da2de0366a20daddce737d95d63270 Mon Sep 17 00:00:00 2001 From: Avdhesh Kumar Date: Tue, 29 Aug 2023 16:08:03 +0530 Subject: [PATCH 024/143] Fix error message --- pkg/CustomTagService.go | 2 +- pkg/pipeline/CiService.go | 11 ++--------- 2 files changed, 3 insertions(+), 10 deletions(-) diff --git a/pkg/CustomTagService.go b/pkg/CustomTagService.go index c44fa45c0b..054e3453f4 100644 --- a/pkg/CustomTagService.go +++ b/pkg/CustomTagService.go @@ -21,7 +21,7 @@ const ( ) var ( - ErrImagePathInUse = fmt.Errorf("image path is already being used by someone") + ErrImagePathInUse = fmt.Errorf(ImageTagUnavailableMessage) ) type CustomTagService interface { diff --git a/pkg/pipeline/CiService.go b/pkg/pipeline/CiService.go index 2a0f7cd5f7..b947a88c13 100644 --- a/pkg/pipeline/CiService.go +++ b/pkg/pipeline/CiService.go @@ -466,17 +466,10 @@ func (impl *CiServiceImpl) buildWfRequestForCiPipeline(pipeline *pipelineConfig. if errors.Is(err, pkg.ErrImagePathInUse) { savedWf.Status = pipelineConfig.WorkflowFailed savedWf.Message = pkg.ImageTagUnavailableMessage - err = impl.ciWorkflowRepository.UpdateWorkFlow(savedWf) - if err != nil { + err1 := impl.ciWorkflowRepository.UpdateWorkFlow(savedWf) + if err1 != nil { impl.Logger.Errorw("could not save workflow, after failing due to conflicting image tag") - return nil, err } - go func() { - err = impl.customTagService.DeactivateImagePathReservation(imagePathReservation.Id) - if err != nil { - impl.Logger.Errorw("could not clear the image path reservation") - } - }() return nil, err } return nil, err From 8fde21d8d9c572ef358326751fd906f5df680bc3 Mon Sep 17 00:00:00 2001 From: Avdhesh Kumar Date: Tue, 29 Aug 2023 16:31:33 +0530 Subject: [PATCH 025/143] Return bad request in case of trigger --- api/restHandler/app/BuildPipelineRestHandler.go | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/api/restHandler/app/BuildPipelineRestHandler.go b/api/restHandler/app/BuildPipelineRestHandler.go index 338fd36024..672e1f1cf5 100644 --- a/api/restHandler/app/BuildPipelineRestHandler.go +++ b/api/restHandler/app/BuildPipelineRestHandler.go @@ -13,6 +13,7 @@ import ( "github.com/devtron-labs/devtron/internal/sql/repository/helper" "github.com/devtron-labs/devtron/internal/sql/repository/pipelineConfig" "github.com/devtron-labs/devtron/internal/util" + "github.com/devtron-labs/devtron/pkg" appGroup2 "github.com/devtron-labs/devtron/pkg/appGroup" "github.com/devtron-labs/devtron/pkg/bean" "github.com/devtron-labs/devtron/pkg/pipeline" @@ -550,6 +551,11 @@ func (handler PipelineConfigRestHandlerImpl) TriggerCiPipeline(w http.ResponseWr //RBAC ENDS response := make(map[string]string) resp, err := handler.ciHandler.HandleCIManual(ciTriggerRequest) + if errors.Is(err, pkg.ErrImagePathInUse) { + handler.Logger.Errorw("service err duplicate image tag, TriggerCiPipeline", "err", err, "payload", ciTriggerRequest) + common.WriteJsonResp(w, err, response, http.StatusBadRequest) + return + } if err != nil { handler.Logger.Errorw("service err, TriggerCiPipeline", "err", err, "payload", ciTriggerRequest) common.WriteJsonResp(w, err, response, http.StatusInternalServerError) From 0f3c24887d0613bb5cd457ce2be3beddf9632f25 Mon Sep 17 00:00:00 2001 From: Avdhesh Kumar Date: Wed, 30 Aug 2023 09:54:02 +0530 Subject: [PATCH 026/143] add validation --- api/restHandler/app/BuildPipelineRestHandler.go | 2 +- pkg/CustomTagService.go | 16 ++++++++++++++++ 2 files changed, 17 insertions(+), 1 deletion(-) diff --git a/api/restHandler/app/BuildPipelineRestHandler.go b/api/restHandler/app/BuildPipelineRestHandler.go index 672e1f1cf5..0828c45e6d 100644 --- a/api/restHandler/app/BuildPipelineRestHandler.go +++ b/api/restHandler/app/BuildPipelineRestHandler.go @@ -553,7 +553,7 @@ func (handler PipelineConfigRestHandlerImpl) TriggerCiPipeline(w http.ResponseWr resp, err := handler.ciHandler.HandleCIManual(ciTriggerRequest) if errors.Is(err, pkg.ErrImagePathInUse) { handler.Logger.Errorw("service err duplicate image tag, TriggerCiPipeline", "err", err, "payload", ciTriggerRequest) - common.WriteJsonResp(w, err, response, http.StatusBadRequest) + common.WriteJsonResp(w, err, response, http.StatusConflict) return } if err != nil { diff --git a/pkg/CustomTagService.go b/pkg/CustomTagService.go index 054e3453f4..0f222618c1 100644 --- a/pkg/CustomTagService.go +++ b/pkg/CustomTagService.go @@ -50,6 +50,9 @@ func (impl *CustomTagServiceImpl) DeactivateImagePathReservation(id int) error { } func (impl *CustomTagServiceImpl) CreateOrUpdateCustomTag(tag *bean.CustomTag) error { + if err := validateTagPattern(tag.TagPattern); err != nil { + return err + } customTagData := repository.CustomTag{ EntityKey: tag.EntityKey, EntityValue: tag.EntityValue, @@ -138,6 +141,9 @@ func validateAndConstructTag(customTagData *repository.CustomTag) (string, error } func validateTagPattern(customTagPattern string) error { + if len(customTagPattern) == 0 { + return fmt.Errorf("tag length can not be zero") + } allowedVariables := []string{"{x}", "{X}"} totalX := 0 for _, variable := range allowedVariables { @@ -146,6 +152,16 @@ func validateTagPattern(customTagPattern string) error { if totalX != 1 { return fmt.Errorf("variable {x} is allowed exactly once") } + remainingString := strings.ReplaceAll(customTagPattern, "{x}", "") + remainingString = strings.ReplaceAll(remainingString, "{X}", "") + + if len(remainingString) == 0 { + return nil + } + n := len(remainingString) + if remainingString[0] == '.' || remainingString[n-1] == '.' || remainingString[0] == '-' || remainingString[n-1] == '-' { + return fmt.Errorf("tag can not start or end with an hyphen or a period") + } return nil } From 06edac9a3c74e77f963cd690d9bef56d82ee1421 Mon Sep 17 00:00:00 2001 From: Avdhesh Kumar Date: Wed, 30 Aug 2023 09:59:00 +0530 Subject: [PATCH 027/143] add validation --- scripts/sql/166_custom_image_tag.up.sql | 1 - 1 file changed, 1 deletion(-) diff --git a/scripts/sql/166_custom_image_tag.up.sql b/scripts/sql/166_custom_image_tag.up.sql index f3b7dc748b..d4b31fb677 100644 --- a/scripts/sql/166_custom_image_tag.up.sql +++ b/scripts/sql/166_custom_image_tag.up.sql @@ -14,7 +14,6 @@ CREATE INDEX IF NOT EXISTS entity_key_value ON custom_tag (entity_key, entity_va ALTER TABLE custom_tag ADD CONSTRAINT unique_entity_key_entity_value UNIQUE (entity_key, entity_value); - CREATE TABLE IF not exists "public"."image_path_reservation" ( id serial PRIMARY KEY, From df484ca27fb70006c5eb71a14a367ba32bf11912 Mon Sep 17 00:00:00 2001 From: Avdhesh Kumar Date: Wed, 30 Aug 2023 16:28:38 +0530 Subject: [PATCH 028/143] ad ci workflow id in status api --- internal/sql/repository/pipelineConfig/CdWorfkflowRepository.go | 1 + pkg/pipeline/CiHandler.go | 1 + 2 files changed, 2 insertions(+) diff --git a/internal/sql/repository/pipelineConfig/CdWorfkflowRepository.go b/internal/sql/repository/pipelineConfig/CdWorfkflowRepository.go index 1e83fba28d..410cc82e9e 100644 --- a/internal/sql/repository/pipelineConfig/CdWorfkflowRepository.go +++ b/internal/sql/repository/pipelineConfig/CdWorfkflowRepository.go @@ -230,6 +230,7 @@ type CiWorkflowStatus struct { CiPipelineName string `json:"ciPipelineName,omitempty"` CiStatus string `json:"ciStatus"` StorageConfigured bool `json:"storageConfigured"` + CiWorkflowId int `json:"ciWorkflowId,omitempty"` } type AppDeploymentStatus struct { diff --git a/pkg/pipeline/CiHandler.go b/pkg/pipeline/CiHandler.go index 82db5b03fa..c7e084b009 100644 --- a/pkg/pipeline/CiHandler.go +++ b/pkg/pipeline/CiHandler.go @@ -1605,6 +1605,7 @@ func (impl *CiHandlerImpl) FetchCiStatusForTriggerViewForEnvironment(request app ciWorkflowStatus.CiPipelineName = ciWorkflow.CiPipeline.Name ciWorkflowStatus.CiStatus = ciWorkflow.Status ciWorkflowStatus.StorageConfigured = ciWorkflow.BlobStorageEnabled + ciWorkflowStatus.CiWorkflowId = ciWorkflow.Id ciWorkflowStatuses = append(ciWorkflowStatuses, ciWorkflowStatus) notTriggeredWorkflows[ciWorkflowStatus.CiPipelineId] = true } From 786121bab2f6c1dc5e741747c55ced5e5d14d760 Mon Sep 17 00:00:00 2001 From: Avdhesh Kumar Date: Thu, 31 Aug 2023 17:02:13 +0530 Subject: [PATCH 029/143] add comments --- pkg/pipeline/CiCdPipelineOrchestrator.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/pkg/pipeline/CiCdPipelineOrchestrator.go b/pkg/pipeline/CiCdPipelineOrchestrator.go index 225014c218..343c5dec20 100644 --- a/pkg/pipeline/CiCdPipelineOrchestrator.go +++ b/pkg/pipeline/CiCdPipelineOrchestrator.go @@ -259,6 +259,8 @@ func (impl CiCdPipelineOrchestratorImpl) PatchMaterialValue(createRequest *bean. AuditLog: sql.AuditLog{UpdatedBy: userId, UpdatedOn: time.Now()}, } + //If customTagObject has been passed, create or update the resource + //Otherwise deleteIfExists if createRequest.CustomTagObject != nil { customTag := bean4.CustomTag{ EntityKey: pkg.EntityTypeCiPipelineId, @@ -693,6 +695,7 @@ func (impl CiCdPipelineOrchestratorImpl) CreateCiConf(createRequest *bean.CiConf return nil, err } + //If customTagObejct has been passed, save it if ciPipeline.CustomTagObject != nil { customTag := &bean4.CustomTag{ EntityKey: pkg.EntityTypeCiPipelineId, From 1febd7d3cca0cf333c202f6637ccb6e107e14f8b Mon Sep 17 00:00:00 2001 From: ayushmaheshwari Date: Wed, 4 Oct 2023 18:43:45 +0530 Subject: [PATCH 030/143] custom image tag migration --- ...66_custom_image_tag.down.sql => 177_custom_image_tag.down.sql} | 0 .../{166_custom_image_tag.up.sql => 177_custom_image_tag.up.sql} | 0 2 files changed, 0 insertions(+), 0 deletions(-) rename scripts/sql/{166_custom_image_tag.down.sql => 177_custom_image_tag.down.sql} (100%) rename scripts/sql/{166_custom_image_tag.up.sql => 177_custom_image_tag.up.sql} (100%) diff --git a/scripts/sql/166_custom_image_tag.down.sql b/scripts/sql/177_custom_image_tag.down.sql similarity index 100% rename from scripts/sql/166_custom_image_tag.down.sql rename to scripts/sql/177_custom_image_tag.down.sql diff --git a/scripts/sql/166_custom_image_tag.up.sql b/scripts/sql/177_custom_image_tag.up.sql similarity index 100% rename from scripts/sql/166_custom_image_tag.up.sql rename to scripts/sql/177_custom_image_tag.up.sql From ec1dd5346cd8fbdec2624f2e1150da362f3e6f02 Mon Sep 17 00:00:00 2001 From: ayushmaheshwari Date: Wed, 4 Oct 2023 18:46:16 +0530 Subject: [PATCH 031/143] sql script correction --- scripts/sql/177_custom_image_tag.up.sql | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/sql/177_custom_image_tag.up.sql b/scripts/sql/177_custom_image_tag.up.sql index d4b31fb677..358c741839 100644 --- a/scripts/sql/177_custom_image_tag.up.sql +++ b/scripts/sql/177_custom_image_tag.up.sql @@ -28,4 +28,4 @@ CREATE INDEX IF NOT EXISTS image_path_index ON image_path_reservation (image_pat ALTER TABLE ci_workflow ADD column IF NOT EXISTS image_path_reservation_id int; ALTER TABLE ci_workflow - ADD CONSTRAINT fk_image_path_reservation_id FOREIGN KEY (image_path_reservation_id) REFERENCES image_path (id); \ No newline at end of file + ADD CONSTRAINT fk_image_path_reservation_id FOREIGN KEY (image_path_reservation_id) REFERENCES image_path_reservation (id); \ No newline at end of file From 8f3a54b5a9fdab4571ff8bc56fa9dd0e32d591df Mon Sep 17 00:00:00 2001 From: ayushmaheshwari Date: Thu, 5 Oct 2023 15:56:53 +0530 Subject: [PATCH 032/143] fix: validation and sql script --- pkg/CustomTagService.go | 43 +++++++++++++++++-------- scripts/sql/177_custom_image_tag.up.sql | 1 + 2 files changed, 31 insertions(+), 13 deletions(-) diff --git a/pkg/CustomTagService.go b/pkg/CustomTagService.go index 0f222618c1..7fe7f2c962 100644 --- a/pkg/CustomTagService.go +++ b/pkg/CustomTagService.go @@ -6,6 +6,7 @@ import ( "github.com/devtron-labs/devtron/internal/sql/repository" "github.com/go-pg/pg" "go.uber.org/zap" + "regexp" "strconv" "strings" ) @@ -16,8 +17,10 @@ const ( ) const ( - imagePathPattern = "%s/%s:%s" // dockerReg/dockerRepo:Tag - ImageTagUnavailableMessage = "Desired image tag already exists" + imagePathPattern = "%s/%s:%s" // dockerReg/dockerRepo:Tag + ImageTagUnavailableMessage = "Desired image tag already exists" + REGEX_PATTERN_FOR_ENSURING_ONLY_ONE_VARIABLE_BETWEEN_BRACKETS = `\{.{2,}\}` + REGEX_PATTERN_FOR_CHARACTER_OTHER_THEN_X_OR_x = `\{[^xX]|{}\}` ) var ( @@ -144,27 +147,41 @@ func validateTagPattern(customTagPattern string) error { if len(customTagPattern) == 0 { return fmt.Errorf("tag length can not be zero") } - allowedVariables := []string{"{x}", "{X}"} - totalX := 0 - for _, variable := range allowedVariables { - totalX += strings.Count(customTagPattern, variable) - } - if totalX != 1 { - return fmt.Errorf("variable {x} is allowed exactly once") + + if IsInvalidVariableFormat(customTagPattern) { + return fmt.Errorf("only one variable is allowed. Allowed variable format : {x} or {X}") } - remainingString := strings.ReplaceAll(customTagPattern, "{x}", "") - remainingString = strings.ReplaceAll(remainingString, "{X}", "") + remainingString := strings.ReplaceAll(customTagPattern, ".{x}", "") + remainingString = strings.ReplaceAll(remainingString, ".{X}", "") if len(remainingString) == 0 { return nil } + n := len(remainingString) - if remainingString[0] == '.' || remainingString[n-1] == '.' || remainingString[0] == '-' || remainingString[n-1] == '-' { - return fmt.Errorf("tag can not start or end with an hyphen or a period") + if remainingString[0] == '.' || remainingString[0] == '-' { + return fmt.Errorf("tag can not start with an hyphen or a period") + } + if n != 0 && (remainingString[n-1] == '.' || remainingString[n-1] == '-') { + return fmt.Errorf("tag can not end with an hyphen or a period") } return nil } +func IsInvalidVariableFormat(customTagPattern string) bool { + regex := regexp.MustCompile(REGEX_PATTERN_FOR_ENSURING_ONLY_ONE_VARIABLE_BETWEEN_BRACKETS) + matches := regex.FindAllString(customTagPattern, -1) + if len(matches) > 0 { + return true + } + regex = regexp.MustCompile(REGEX_PATTERN_FOR_CHARACTER_OTHER_THEN_X_OR_x) + matches = regex.FindAllString(customTagPattern, -1) + if len(matches) > 0 { + return true + } + return false +} + func validateTag(imageTag string) error { if len(imageTag) == 0 || len(imageTag) > 128 { return fmt.Errorf("image tag should be of len 1-128 only, imageTag: %s", imageTag) diff --git a/scripts/sql/177_custom_image_tag.up.sql b/scripts/sql/177_custom_image_tag.up.sql index 358c741839..d6388c6ce3 100644 --- a/scripts/sql/177_custom_image_tag.up.sql +++ b/scripts/sql/177_custom_image_tag.up.sql @@ -2,6 +2,7 @@ CREATE TABLE "public"."custom_tag" ( id serial PRIMARY KEY, custom_tag_format text, + tag_pattern text, auto_increasing_number int DEFAULT 0, entity_key int, entity_value text, From 33ae16173623c461ebbc81043fbcd59c1e13cbf9 Mon Sep 17 00:00:00 2001 From: ayushmaheshwari Date: Thu, 5 Oct 2023 19:41:27 +0530 Subject: [PATCH 033/143] fixing null value in custom tag --- internal/sql/repository/CustomTagRepository.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/internal/sql/repository/CustomTagRepository.go b/internal/sql/repository/CustomTagRepository.go index ab0065ec18..b075491db0 100644 --- a/internal/sql/repository/CustomTagRepository.go +++ b/internal/sql/repository/CustomTagRepository.go @@ -11,7 +11,7 @@ type CustomTag struct { EntityKey int `sql:"entity_key"` EntityValue string `sql:"entity_value"` TagPattern string `sql:"tag_pattern"` - AutoIncreasingNumber int `sql:"auto_increasing_number"` + AutoIncreasingNumber int `sql:"auto_increasing_number, notnull"` Active bool `sql:"active"` Metadata string `sql:"metadata"` } From 74f1c38db32e776d35145a8354432733e38b4bf5 Mon Sep 17 00:00:00 2001 From: ayushmaheshwari Date: Wed, 11 Oct 2023 18:15:11 +0530 Subject: [PATCH 034/143] marking image tag unreserved on abort --- pkg/pipeline/CiHandler.go | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/pkg/pipeline/CiHandler.go b/pkg/pipeline/CiHandler.go index 6f17d5943d..dd2f4e9fb7 100644 --- a/pkg/pipeline/CiHandler.go +++ b/pkg/pipeline/CiHandler.go @@ -601,6 +601,12 @@ func (impl *CiHandlerImpl) CancelBuild(workflowId int) (int, error) { impl.Logger.Errorw("cannot update deleted workflow status, but wf deleted", "err", err) return 0, err } + imagePathReservationId := workflow.ImagePathReservationId + err = impl.customTagService.DeactivateImagePathReservation(imagePathReservationId) + if err != nil { + impl.Logger.Errorw("error in marking image tag unreserved", "err", err) + return 0, err + } return workflow.Id, nil } From a0c5a10794fcb085baa9be79f454e0d2f8276f51 Mon Sep 17 00:00:00 2001 From: Gireesh Naidu Date: Thu, 19 Oct 2023 13:59:37 +0530 Subject: [PATCH 035/143] wip --- api/bean/ValuesOverrideRequest.go | 17 ++ .../sql/repository/CiArtifactRepository.go | 52 +++++- pkg/pipeline/AppArtifactManager.go | 175 ++++++++++++++++++ 3 files changed, 243 insertions(+), 1 deletion(-) diff --git a/api/bean/ValuesOverrideRequest.go b/api/bean/ValuesOverrideRequest.go index 5dc7b24682..7896fa396b 100644 --- a/api/bean/ValuesOverrideRequest.go +++ b/api/bean/ValuesOverrideRequest.go @@ -94,3 +94,20 @@ type TriggerEvent struct { TriggeredBy int32 TriggerdAt time.Time } + +type ArtifactsListFilterOptions struct { + //list filter data + Limit int + Offset int + SearchString int + Order string + + //self stage data + PipelineId int + StageType WorkflowType + + //parent satge data + ParentCdId int + ParentId int + ParentStageType WorkflowType +} diff --git a/internal/sql/repository/CiArtifactRepository.go b/internal/sql/repository/CiArtifactRepository.go index 7c82b6248b..3981b63c6a 100644 --- a/internal/sql/repository/CiArtifactRepository.go +++ b/internal/sql/repository/CiArtifactRepository.go @@ -59,7 +59,7 @@ type CiArtifactRepository interface { GetArtifactParentCiAndWorkflowDetailsByIds(ids []int) ([]*CiArtifact, error) GetByWfId(wfId int) (artifact *CiArtifact, err error) GetArtifactsByCDPipeline(cdPipelineId, limit int, parentId int, parentType bean.WorkflowType) ([]*CiArtifact, error) - + GetArtifactsByCDPipelineV3(listingFilterOpts *bean.ArtifactsListFilterOptions) ([]*CiArtifact, error) GetLatestArtifactTimeByCiPipelineIds(ciPipelineIds []int) ([]*CiArtifact, error) GetLatestArtifactTimeByCiPipelineId(ciPipelineId int) (*CiArtifact, error) GetArtifactsByCDPipelineV2(cdPipelineId int) ([]CiArtifact, error) @@ -240,6 +240,56 @@ func (impl CiArtifactRepositoryImpl) GetArtifactsByCDPipeline(cdPipelineId, limi return artifactsAll, err } +func (impl CiArtifactRepositoryImpl) GetArtifactsByCDPipelineV3(listingFilterOpts *bean.ArtifactsListFilterOptions) ([]*CiArtifact, error) { + artifacts := make([]*CiArtifact, 0, listingFilterOpts.Limit) + if listingFilterOpts.ParentStageType == bean.CI_WORKFLOW_TYPE { + query := " SELECT cia.* " + + " FROM ci_artifact cia" + + " INNER JOIN ci_pipeline cp ON cp.id=cia.pipeline_id" + + " INNER JOIN pipeline p ON p.ci_pipeline_id = cp.id and p.id=?" + + " WHERE cia.image LIKE %?%" + + " ORDER BY cia.id DESC" + + " LIMIT ?" + + " OFFSET ?;" + + _, err := impl.dbConnection.Query(&artifacts, query) + if err != nil { + return artifacts, err + } + + } else if listingFilterOpts.ParentStageType == bean.WEBHOOK_WORKFLOW_TYPE { + query := " SELECT cia.* " + + " FROM ci_artifact cia " + + " WHERE cia.external_ci_pipeline_id = webhook_id " + + " AND cia.image LIKE %?% " + + " ORDER BY cia.id DESC " + + " LIMIT ? " + + " OFFSET ?;" + _, err := impl.dbConnection.Query(&artifacts, query) + if err != nil { + return artifacts, err + } + } else { + return artifacts, nil + } + + //processing + artifactsIds := make([]int, 0, len(artifacts)) + for _, artifact := range artifacts { + artifactsIds = append(artifactsIds, artifact.Id) + } + + //(this will fetch all the artifacts that were deployed on the given pipeline atleast once in new->old deployed order) + + query := " SELECT cia.id,pco.created_on AS created_on " + + " FROM ci_artifact cia" + + " INNER JOIN pipeline_config_override pco ON pco.ci_artifact_id=cia.id" + + " WHERE pco.pipeline_id = ? " + + " AND cia.id IN () " + + " ORDER BY pco.id desc;" + +} + func (impl CiArtifactRepositoryImpl) GetLatestArtifactTimeByCiPipelineIds(ciPipelineIds []int) ([]*CiArtifact, error) { artifacts := make([]*CiArtifact, 0) query := "select cws.pipeline_id, cws.created_on from " + diff --git a/pkg/pipeline/AppArtifactManager.go b/pkg/pipeline/AppArtifactManager.go index 6d84e63626..8a5b5914e2 100644 --- a/pkg/pipeline/AppArtifactManager.go +++ b/pkg/pipeline/AppArtifactManager.go @@ -184,6 +184,31 @@ func (impl *AppArtifactManagerImpl) BuildArtifactsForCIParent(cdPipelineId int, return ciArtifacts, nil } +func (impl *AppArtifactManagerImpl) BuildArtifactsForCIParentV2(listingFilterOpts *bean.ArtifactsListFilterOptions) ([]bean2.CiArtifactBean, error) { + artifacts, err := impl.ciArtifactRepository.GetArtifactsByCDPipelineV3(listingFilterOpts) + if err != nil { + impl.logger.Errorw("error in getting artifacts for ci", "err", err) + return ciArtifacts, err + } + for _, artifact := range artifacts { + if _, ok := artifactMap[artifact.Id]; !ok { + mInfo, err := parseMaterialInfo([]byte(artifact.MaterialInfo), artifact.DataSource) + if err != nil { + mInfo = []byte("[]") + impl.logger.Errorw("Error in parsing artifact material info", "err", err, "artifact", artifact) + } + ciArtifacts = append(ciArtifacts, bean2.CiArtifactBean{ + Id: artifact.Id, + Image: artifact.Image, + ImageDigest: artifact.ImageDigest, + MaterialInfo: mInfo, + ScanEnabled: artifact.ScanEnabled, + Scanned: artifact.Scanned, + }) + } + } + return ciArtifacts, nil +} func (impl *AppArtifactManagerImpl) FetchArtifactForRollback(cdPipelineId, appId, offset, limit int) (bean2.CiArtifactResponse, error) { var deployedCiArtifacts []bean2.CiArtifactBean var deployedCiArtifactsResponse bean2.CiArtifactResponse @@ -386,3 +411,153 @@ func (impl *AppArtifactManagerImpl) RetrieveArtifactsByCDPipeline(pipeline *pipe ciArtifactsResponse.CiArtifacts = ciArtifacts return ciArtifactsResponse, nil } + +func (impl *AppArtifactManagerImpl) RetrieveArtifactsByCDPipelineV2(pipeline *pipelineConfig.Pipeline, stage bean.WorkflowType) (*bean2.CiArtifactResponse, error) { + + // retrieve parent details + parentId, parentType, parentCdId, err := impl.extractParentMetaDataByPipeline(pipeline, stage) + if err != nil { + impl.logger.Errorw("error in finding parent meta data for pipeline", "pipelineId", pipeline.Id, "pipelineStage", stage, "err", err) + return nil, err + } + // Build artifacts for cd stages + var ciArtifacts []bean2.CiArtifactBean + ciArtifactsResponse := &bean2.CiArtifactResponse{} + + artifactMap := make(map[int]int) + limit := 10 + + ciArtifacts, artifactMap, latestWfArtifactId, latestWfArtifactStatus, err := impl. + BuildArtifactsForCdStage(pipeline.Id, stage, ciArtifacts, artifactMap, false, limit, parentCdId) + if err != nil && err != pg.ErrNoRows { + impl.logger.Errorw("error in getting artifacts for child cd stage", "err", err, "stage", stage) + return nil, err + } + + ciArtifacts, err = impl.BuildArtifactsForParentStage(pipeline.Id, parentId, parentType, ciArtifacts, artifactMap, limit, parentCdId) + if err != nil && err != pg.ErrNoRows { + impl.logger.Errorw("error in getting artifacts for cd", "err", err, "parentStage", parentType, "stage", stage) + return nil, err + } + + //sorting ci artifacts on the basis of creation time + if ciArtifacts != nil { + sort.SliceStable(ciArtifacts, func(i, j int) bool { + return ciArtifacts[i].Id > ciArtifacts[j].Id + }) + } + + artifactIds := make([]int, 0, len(ciArtifacts)) + for _, artifact := range ciArtifacts { + artifactIds = append(artifactIds, artifact.Id) + } + + artifacts, err := impl.ciArtifactRepository.GetArtifactParentCiAndWorkflowDetailsByIds(artifactIds) + if err != nil { + return ciArtifactsResponse, err + } + imageTagsDataMap, err := impl.imageTaggingService.GetTagsDataMapByAppId(pipeline.AppId) + if err != nil { + impl.logger.Errorw("error in getting image tagging data with appId", "err", err, "appId", pipeline.AppId) + return ciArtifactsResponse, err + } + + imageCommentsDataMap, err := impl.imageTaggingService.GetImageCommentsDataMapByArtifactIds(artifactIds) + if err != nil { + impl.logger.Errorw("error in getting GetImageCommentsDataMapByArtifactIds", "err", err, "appId", pipeline.AppId, "artifactIds", artifactIds) + return ciArtifactsResponse, err + } + + for i, artifact := range artifacts { + if imageTaggingResp := imageTagsDataMap[ciArtifacts[i].Id]; imageTaggingResp != nil { + ciArtifacts[i].ImageReleaseTags = imageTaggingResp + } + if imageCommentResp := imageCommentsDataMap[ciArtifacts[i].Id]; imageCommentResp != nil { + ciArtifacts[i].ImageComment = imageCommentResp + } + + if artifact.ExternalCiPipelineId != 0 { + // if external webhook continue + continue + } + + var ciWorkflow *pipelineConfig.CiWorkflow + if artifact.ParentCiArtifact != 0 { + ciWorkflow, err = impl.ciWorkflowRepository.FindLastTriggeredWorkflowGitTriggersByArtifactId(artifact.ParentCiArtifact) + if err != nil { + impl.logger.Errorw("error in getting ci_workflow for artifacts", "err", err, "artifact", artifact, "parentStage", parentType, "stage", stage) + return ciArtifactsResponse, err + } + + } else { + ciWorkflow, err = impl.ciWorkflowRepository.FindCiWorkflowGitTriggersById(*artifact.WorkflowId) + if err != nil { + impl.logger.Errorw("error in getting ci_workflow for artifacts", "err", err, "artifact", artifact, "parentStage", parentType, "stage", stage) + return ciArtifactsResponse, err + } + } + ciArtifacts[i].CiConfigureSourceType = ciWorkflow.GitTriggers[ciWorkflow.CiPipelineId].CiConfigureSourceType + ciArtifacts[i].CiConfigureSourceValue = ciWorkflow.GitTriggers[ciWorkflow.CiPipelineId].CiConfigureSourceValue + } + + ciArtifactsResponse.CdPipelineId = pipeline.Id + ciArtifactsResponse.LatestWfArtifactId = latestWfArtifactId + ciArtifactsResponse.LatestWfArtifactStatus = latestWfArtifactStatus + if ciArtifacts == nil { + ciArtifacts = []bean2.CiArtifactBean{} + } + ciArtifactsResponse.CiArtifacts = ciArtifacts + return ciArtifactsResponse, nil +} + +func (impl *AppArtifactManagerImpl) extractParentMetaDataByPipeline(pipeline *pipelineConfig.Pipeline, stage bean.WorkflowType) (parentId int, parentType bean.WorkflowType, parentCdId int, err error) { + // retrieve parent details + parentId, parentType, err = impl.cdPipelineConfigService.RetrieveParentDetails(pipeline.Id) + if err != nil { + impl.logger.Errorw("failed to retrieve parent details", + "cdPipelineId", pipeline.Id, + "err", err) + return parentId, parentType, parentCdId, err + } + + if parentType == bean.CD_WORKFLOW_TYPE_POST || (parentType == bean.CD_WORKFLOW_TYPE_DEPLOY && stage != bean.CD_WORKFLOW_TYPE_POST) { + // parentCdId is being set to store the artifact currently deployed on parent cd (if applicable). + // Parent component is CD only if parent type is POST/DEPLOY + parentCdId = parentId + } + + if stage == bean.CD_WORKFLOW_TYPE_DEPLOY { + pipelinePreStage, err := impl.pipelineStageService.GetCdStageByCdPipelineIdAndStageType(pipeline.Id, repository2.PIPELINE_STAGE_TYPE_PRE_CD) + if err != nil && err != pg.ErrNoRows { + impl.logger.Errorw("error in fetching PRE-CD stage by cd pipeline id", "pipelineId", pipeline.Id, "err", err) + return parentId, parentType, parentCdId, err + } + if (pipelinePreStage != nil && pipelinePreStage.Id != 0) || len(pipeline.PreStageConfig) > 0 { + // Parent type will be PRE for DEPLOY stage + parentId = pipeline.Id + parentType = bean.CD_WORKFLOW_TYPE_PRE + } + } + if stage == bean.CD_WORKFLOW_TYPE_POST { + // Parent type will be DEPLOY for POST stage + parentId = pipeline.Id + parentType = bean.CD_WORKFLOW_TYPE_DEPLOY + } + return parentId, parentType, parentCdId, err +} + + + +func (impl *AppArtifactManagerImpl) BuildArtifacts(listingFilterOpts *bean.ArtifactsListFilterOptions) ([]bean2.CiArtifactBean, error) { + + //1)get current deployed artifact on this pipeline + + //2) get artifact list limited by filterOptions + if listingFilterOpts.ParentStageType == bean.CI_WORKFLOW_TYPE || listingFilterOpts.ParentStageType == bean.WEBHOOK_WORKFLOW_TYPE { + impl.BuildArtifactsForCIParentV2(listingFilterOpts) + } else { + impl.cdWorkflowRepository. + } + + //got all the artifacts, process them +} From 4f803658fedf20381b57543f67fcc310addad3ea Mon Sep 17 00:00:00 2001 From: Gireesh Naidu Date: Thu, 19 Oct 2023 14:07:25 +0530 Subject: [PATCH 036/143] wip --- .../sql/repository/CiArtifactRepository.go | 22 ++++++++++++++----- 1 file changed, 16 insertions(+), 6 deletions(-) diff --git a/internal/sql/repository/CiArtifactRepository.go b/internal/sql/repository/CiArtifactRepository.go index 3981b63c6a..97177b7d03 100644 --- a/internal/sql/repository/CiArtifactRepository.go +++ b/internal/sql/repository/CiArtifactRepository.go @@ -247,12 +247,12 @@ func (impl CiArtifactRepositoryImpl) GetArtifactsByCDPipelineV3(listingFilterOpt " FROM ci_artifact cia" + " INNER JOIN ci_pipeline cp ON cp.id=cia.pipeline_id" + " INNER JOIN pipeline p ON p.ci_pipeline_id = cp.id and p.id=?" + - " WHERE cia.image LIKE %?%" + + " WHERE cia.image ILIKE %?%" + " ORDER BY cia.id DESC" + " LIMIT ?" + " OFFSET ?;" - _, err := impl.dbConnection.Query(&artifacts, query) + _, err := impl.dbConnection.Query(&artifacts, query, listingFilterOpts.PipelineId, listingFilterOpts.SearchString, listingFilterOpts.Limit, listingFilterOpts.Offset) if err != nil { return artifacts, err } @@ -261,11 +261,11 @@ func (impl CiArtifactRepositoryImpl) GetArtifactsByCDPipelineV3(listingFilterOpt query := " SELECT cia.* " + " FROM ci_artifact cia " + " WHERE cia.external_ci_pipeline_id = webhook_id " + - " AND cia.image LIKE %?% " + + " AND cia.image ILIKE %?% " + " ORDER BY cia.id DESC " + " LIMIT ? " + " OFFSET ?;" - _, err := impl.dbConnection.Query(&artifacts, query) + _, err := impl.dbConnection.Query(&artifacts, query, listingFilterOpts.SearchString, listingFilterOpts.Limit, listingFilterOpts.Offset) if err != nil { return artifacts, err } @@ -273,6 +273,9 @@ func (impl CiArtifactRepositoryImpl) GetArtifactsByCDPipelineV3(listingFilterOpt return artifacts, nil } + if len(artifacts) == 0 { + return artifacts, nil + } //processing artifactsIds := make([]int, 0, len(artifacts)) for _, artifact := range artifacts { @@ -280,14 +283,21 @@ func (impl CiArtifactRepositoryImpl) GetArtifactsByCDPipelineV3(listingFilterOpt } //(this will fetch all the artifacts that were deployed on the given pipeline atleast once in new->old deployed order) - + artifactsDeployed := make([]*CiArtifact, 0, len(artifactsIds)) query := " SELECT cia.id,pco.created_on AS created_on " + " FROM ci_artifact cia" + " INNER JOIN pipeline_config_override pco ON pco.ci_artifact_id=cia.id" + " WHERE pco.pipeline_id = ? " + - " AND cia.id IN () " + + " AND cia.id IN (?) " + " ORDER BY pco.id desc;" + _, err := impl.dbConnection.Query(&artifactsDeployed, query, pg.In(artifactsIds)) + if err != nil { + return artifacts, nil + } + + //check deploy time + } func (impl CiArtifactRepositoryImpl) GetLatestArtifactTimeByCiPipelineIds(ciPipelineIds []int) ([]*CiArtifact, error) { From 29d5e8cc9bde2803d0a150c84bc568f80e5fe7e1 Mon Sep 17 00:00:00 2001 From: Gireesh Naidu Date: Thu, 19 Oct 2023 14:43:30 +0530 Subject: [PATCH 037/143] ci type parent artifats fetched --- internal/sql/repository/CiArtifactRepository.go | 17 ++++++++++++++++- 1 file changed, 16 insertions(+), 1 deletion(-) diff --git a/internal/sql/repository/CiArtifactRepository.go b/internal/sql/repository/CiArtifactRepository.go index 97177b7d03..df8d44a3ee 100644 --- a/internal/sql/repository/CiArtifactRepository.go +++ b/internal/sql/repository/CiArtifactRepository.go @@ -277,8 +277,10 @@ func (impl CiArtifactRepositoryImpl) GetArtifactsByCDPipelineV3(listingFilterOpt return artifacts, nil } //processing + artifactsMap := make(map[int]*CiArtifact) artifactsIds := make([]int, 0, len(artifacts)) for _, artifact := range artifacts { + artifactsMap[artifact.Id] = artifact artifactsIds = append(artifactsIds, artifact.Id) } @@ -296,7 +298,20 @@ func (impl CiArtifactRepositoryImpl) GetArtifactsByCDPipelineV3(listingFilterOpt return artifacts, nil } - //check deploy time + //set deployed time and latest deployed artifact + for i, deployedArtifact := range artifactsDeployed { + artifactId := deployedArtifact.Id + if _, ok := artifactsMap[artifactId]; ok { + artifactsMap[artifactId].Deployed = true + artifactsMap[artifactId].DeployedTime = deployedArtifact.CreatedOn + if i == 0 { + artifactsMap[artifactId].Latest = true + + } + } + } + + return artifacts, nil } From 6beb311585808df72443e9aee94402b2ba11c98d Mon Sep 17 00:00:00 2001 From: Gireesh Naidu Date: Thu, 19 Oct 2023 14:47:01 +0530 Subject: [PATCH 038/143] commented not required data --- .../sql/repository/CiArtifactRepository.go | 76 ++++++++++--------- 1 file changed, 39 insertions(+), 37 deletions(-) diff --git a/internal/sql/repository/CiArtifactRepository.go b/internal/sql/repository/CiArtifactRepository.go index df8d44a3ee..0c0e665ad9 100644 --- a/internal/sql/repository/CiArtifactRepository.go +++ b/internal/sql/repository/CiArtifactRepository.go @@ -273,43 +273,45 @@ func (impl CiArtifactRepositoryImpl) GetArtifactsByCDPipelineV3(listingFilterOpt return artifacts, nil } - if len(artifacts) == 0 { - return artifacts, nil - } - //processing - artifactsMap := make(map[int]*CiArtifact) - artifactsIds := make([]int, 0, len(artifacts)) - for _, artifact := range artifacts { - artifactsMap[artifact.Id] = artifact - artifactsIds = append(artifactsIds, artifact.Id) - } - - //(this will fetch all the artifacts that were deployed on the given pipeline atleast once in new->old deployed order) - artifactsDeployed := make([]*CiArtifact, 0, len(artifactsIds)) - query := " SELECT cia.id,pco.created_on AS created_on " + - " FROM ci_artifact cia" + - " INNER JOIN pipeline_config_override pco ON pco.ci_artifact_id=cia.id" + - " WHERE pco.pipeline_id = ? " + - " AND cia.id IN (?) " + - " ORDER BY pco.id desc;" - - _, err := impl.dbConnection.Query(&artifactsDeployed, query, pg.In(artifactsIds)) - if err != nil { - return artifacts, nil - } - - //set deployed time and latest deployed artifact - for i, deployedArtifact := range artifactsDeployed { - artifactId := deployedArtifact.Id - if _, ok := artifactsMap[artifactId]; ok { - artifactsMap[artifactId].Deployed = true - artifactsMap[artifactId].DeployedTime = deployedArtifact.CreatedOn - if i == 0 { - artifactsMap[artifactId].Latest = true - - } - } - } + //Currently below computed data is not being used anywhere, if required use it + + //if len(artifacts) == 0 { + // return artifacts, nil + //} + ////processing + //artifactsMap := make(map[int]*CiArtifact) + //artifactsIds := make([]int, 0, len(artifacts)) + //for _, artifact := range artifacts { + // artifactsMap[artifact.Id] = artifact + // artifactsIds = append(artifactsIds, artifact.Id) + //} + // + ////(this will fetch all the artifacts that were deployed on the given pipeline atleast once in new->old deployed order) + //artifactsDeployed := make([]*CiArtifact, 0, len(artifactsIds)) + //query := " SELECT cia.id,pco.created_on AS created_on " + + // " FROM ci_artifact cia" + + // " INNER JOIN pipeline_config_override pco ON pco.ci_artifact_id=cia.id" + + // " WHERE pco.pipeline_id = ? " + + // " AND cia.id IN (?) " + + // " ORDER BY pco.id desc;" + // + //_, err := impl.dbConnection.Query(&artifactsDeployed, query, pg.In(artifactsIds)) + //if err != nil { + // return artifacts, nil + //} + // + ////set deployed time and latest deployed artifact + //for i, deployedArtifact := range artifactsDeployed { + // artifactId := deployedArtifact.Id + // if _, ok := artifactsMap[artifactId]; ok { + // artifactsMap[artifactId].Deployed = true + // artifactsMap[artifactId].DeployedTime = deployedArtifact.CreatedOn + // if i == 0 { + // artifactsMap[artifactId].Latest = true + // + // } + // } + //} return artifacts, nil From 3a686877231d1c3ed208ded2bc8931a1e0d26ae1 Mon Sep 17 00:00:00 2001 From: ayushmaheshwari Date: Thu, 19 Oct 2023 14:52:04 +0530 Subject: [PATCH 039/143] custom tag cd and in trigger api --- pkg/CustomTagService.go | 2 + pkg/bean/app.go | 3 + pkg/pipeline/CiCdPipelineOrchestrator.go | 30 ++--- pkg/pipeline/CiService.go | 38 +++--- .../DeploymentPipelineConfigService.go | 123 +++++++++++++++++- pkg/pipeline/WorkflowDagExecutor.go | 106 ++++++++++----- pkg/pipeline/WorkflowUtils.go | 44 ++++--- pkg/plugin/GlobalPluginService.go | 10 ++ pkg/plugin/bean.go | 8 ++ wire_gen.go | 2 +- 10 files changed, 269 insertions(+), 97 deletions(-) diff --git a/pkg/CustomTagService.go b/pkg/CustomTagService.go index 7fe7f2c962..b8e067b0d2 100644 --- a/pkg/CustomTagService.go +++ b/pkg/CustomTagService.go @@ -14,6 +14,8 @@ import ( const ( EntityNull = iota EntityTypeCiPipelineId + EntityTypePreCD + EntityTypePostCD ) const ( diff --git a/pkg/bean/app.go b/pkg/bean/app.go index cf0c84d4b6..6fd01e4b6c 100644 --- a/pkg/bean/app.go +++ b/pkg/bean/app.go @@ -25,6 +25,7 @@ import ( "github.com/devtron-labs/devtron/internal/sql/repository/pipelineConfig" "github.com/devtron-labs/devtron/pkg/chartRepo/repository" "github.com/devtron-labs/devtron/pkg/pipeline/bean" + "github.com/devtron-labs/devtron/pkg/pipeline/repository" "time" ) @@ -559,6 +560,8 @@ type CDPipelineConfigObject struct { ManifestStorageType string `json:"manifestStorageType"` PreDeployStage *bean.PipelineStageDto `json:"preDeployStage,omitempty"` PostDeployStage *bean.PipelineStageDto `json:"postDeployStage,omitempty"` + CustomTagObject *CustomTagData `json:"customTag,omitempty"` + CustomTagStage *repository.PipelineStageType `json:"customTagStage,omitempty"` } type PreStageConfigMapSecretNames struct { diff --git a/pkg/pipeline/CiCdPipelineOrchestrator.go b/pkg/pipeline/CiCdPipelineOrchestrator.go index d571ba0598..e0936e50c0 100644 --- a/pkg/pipeline/CiCdPipelineOrchestrator.go +++ b/pkg/pipeline/CiCdPipelineOrchestrator.go @@ -26,8 +26,8 @@ import ( "encoding/json" "errors" "fmt" - bean4 "github.com/devtron-labs/devtron/api/bean" util3 "github.com/devtron-labs/common-lib/utils/k8s" + bean4 "github.com/devtron-labs/devtron/api/bean" "github.com/devtron-labs/devtron/client/gitSensor" app2 "github.com/devtron-labs/devtron/internal/sql/repository/app" dockerRegistryRepository "github.com/devtron-labs/devtron/internal/sql/repository/dockerRegistry" @@ -71,7 +71,7 @@ type CiCdPipelineOrchestrator interface { UpdateMaterial(updateMaterialRequest *bean.UpdateMaterialDTO) (*bean.UpdateMaterialDTO, error) CreateCiConf(createRequest *bean.CiConfigRequest, templateId int) (*bean.CiConfigRequest, error) CreateCDPipelines(pipelineRequest *bean.CDPipelineConfigObject, appId int, userId int32, tx *pg.Tx, appName string) (pipelineId int, err error) - UpdateCDPipeline(pipelineRequest *bean.CDPipelineConfigObject, userId int32, tx *pg.Tx) (err error) + UpdateCDPipeline(pipelineRequest *bean.CDPipelineConfigObject, userId int32, tx *pg.Tx) (pipeline *pipelineConfig.Pipeline, err error) DeleteCiPipeline(pipeline *pipelineConfig.CiPipeline, request *bean.CiPatchRequest, tx *pg.Tx) error DeleteCdPipeline(pipelineId int, userId int32, tx *pg.Tx) error PatchMaterialValue(createRequest *bean.CiPipeline, userId int32, oldPipeline *pipelineConfig.CiPipeline) (*bean.CiPipeline, error) @@ -1515,14 +1515,14 @@ func (impl CiCdPipelineOrchestratorImpl) CreateCDPipelines(pipelineRequest *bean return pipeline.Id, nil } -func (impl CiCdPipelineOrchestratorImpl) UpdateCDPipeline(pipelineRequest *bean.CDPipelineConfigObject, userId int32, tx *pg.Tx) (err error) { - pipeline, err := impl.pipelineRepository.FindById(pipelineRequest.Id) +func (impl CiCdPipelineOrchestratorImpl) UpdateCDPipeline(pipelineRequest *bean.CDPipelineConfigObject, userId int32, tx *pg.Tx) (pipeline *pipelineConfig.Pipeline, err error) { + pipeline, err = impl.pipelineRepository.FindById(pipelineRequest.Id) if err == pg.ErrNoRows { - return fmt.Errorf("no cd pipeline found") + return pipeline, fmt.Errorf("no cd pipeline found") } else if err != nil { - return err + return pipeline, err } else if pipeline.Id == 0 { - return fmt.Errorf("no cd pipeline found") + return pipeline, fmt.Errorf("no cd pipeline found") } preStageConfig := "" preTriggerType := pipelineConfig.TriggerType("") @@ -1547,13 +1547,13 @@ func (impl CiCdPipelineOrchestratorImpl) UpdateCDPipeline(pipelineRequest *bean. preStageConfigMapSecretNames, err := json.Marshal(&pipelineRequest.PreStageConfigMapSecretNames) if err != nil { impl.logger.Error(err) - return err + return pipeline, err } postStageConfigMapSecretNames, err := json.Marshal(&pipelineRequest.PostStageConfigMapSecretNames) if err != nil { impl.logger.Error(err) - return err + return pipeline, err } pipeline.TriggerType = pipelineRequest.TriggerType @@ -1570,20 +1570,20 @@ func (impl CiCdPipelineOrchestratorImpl) UpdateCDPipeline(pipelineRequest *bean. err = impl.pipelineRepository.Update(pipeline, tx) if err != nil { impl.logger.Errorw("error in updating cd pipeline", "err", err, "pipeline", pipeline) - return err + return pipeline, err } if pipeline.PreStageConfig != "" { err = impl.prePostCdScriptHistoryService.CreatePrePostCdScriptHistory(pipeline, tx, repository4.PRE_CD_TYPE, false, 0, time.Time{}) if err != nil { impl.logger.Errorw("error in creating pre cd script entry", "err", err, "pipeline", pipeline) - return err + return pipeline, err } } if pipeline.PostStageConfig != "" { err = impl.prePostCdScriptHistoryService.CreatePrePostCdScriptHistory(pipeline, tx, repository4.POST_CD_TYPE, false, 0, time.Time{}) if err != nil { impl.logger.Errorw("error in creating post cd script entry", "err", err, "pipeline", pipeline) - return err + return pipeline, err } } @@ -1592,7 +1592,7 @@ func (impl CiCdPipelineOrchestratorImpl) UpdateCDPipeline(pipelineRequest *bean. err = impl.pipelineStageService.UpdatePipelineStage(pipelineRequest.PreDeployStage, repository5.PIPELINE_STAGE_TYPE_PRE_CD, pipelineRequest.Id, userId) if err != nil { impl.logger.Errorw("error in updating pre stage", "err", err, "preDeployStage", pipelineRequest.PreDeployStage, "cdPipelineId", pipelineRequest.Id) - return err + return pipeline, err } } if pipelineRequest.PostDeployStage != nil { @@ -1600,10 +1600,10 @@ func (impl CiCdPipelineOrchestratorImpl) UpdateCDPipeline(pipelineRequest *bean. err = impl.pipelineStageService.UpdatePipelineStage(pipelineRequest.PostDeployStage, repository5.PIPELINE_STAGE_TYPE_POST_CD, pipelineRequest.Id, userId) if err != nil { impl.logger.Errorw("error in updating post stage", "err", err, "postDeployStage", pipelineRequest.PostDeployStage, "cdPipelineId", pipelineRequest.Id) - return err + return pipeline, err } } - return err + return pipeline, nil } func (impl CiCdPipelineOrchestratorImpl) DeleteCdPipeline(pipelineId int, userId int32, tx *pg.Tx) error { diff --git a/pkg/pipeline/CiService.go b/pkg/pipeline/CiService.go index d542f3f45f..15b39010a0 100644 --- a/pkg/pipeline/CiService.go +++ b/pkg/pipeline/CiService.go @@ -57,23 +57,23 @@ type CiService interface { } type CiServiceImpl struct { - Logger *zap.SugaredLogger - workflowService WorkflowService - ciPipelineMaterialRepository pipelineConfig.CiPipelineMaterialRepository - ciWorkflowRepository pipelineConfig.CiWorkflowRepository - ciConfig *CiConfig - eventClient client.EventClient - eventFactory client.EventFactory - mergeUtil *util.MergeUtil - ciPipelineRepository pipelineConfig.CiPipelineRepository - prePostCiScriptHistoryService history.PrePostCiScriptHistoryService - pipelineStageService PipelineStageService - userService user.UserService - ciTemplateService CiTemplateService - appCrudOperationService app.AppCrudOperationService - envRepository repository1.EnvironmentRepository - appRepository appRepository.AppRepository - customTagService pkg.CustomTagService + Logger *zap.SugaredLogger + workflowService WorkflowService + ciPipelineMaterialRepository pipelineConfig.CiPipelineMaterialRepository + ciWorkflowRepository pipelineConfig.CiWorkflowRepository + ciConfig *CiConfig + eventClient client.EventClient + eventFactory client.EventFactory + mergeUtil *util.MergeUtil + ciPipelineRepository pipelineConfig.CiPipelineRepository + prePostCiScriptHistoryService history.PrePostCiScriptHistoryService + pipelineStageService PipelineStageService + userService user.UserService + ciTemplateService CiTemplateService + appCrudOperationService app.AppCrudOperationService + envRepository repository1.EnvironmentRepository + appRepository appRepository.AppRepository + customTagService pkg.CustomTagService variableSnapshotHistoryService variables.VariableSnapshotHistoryService config *CiConfig } @@ -106,7 +106,7 @@ func NewCiServiceImpl(Logger *zap.SugaredLogger, workflowService WorkflowService envRepository: envRepository, appRepository: appRepository, variableSnapshotHistoryService: variableSnapshotHistoryService, - customTagService: customTagService, + customTagService: customTagService, } config, err := GetCiConfig() if err != nil { @@ -241,7 +241,7 @@ func (impl *CiServiceImpl) TriggerCiPipeline(trigger Trigger) (int, error) { } impl.Logger.Debugw("ci triggered", " pipeline ", trigger.PipelineId) - //Save Scoped VariableSnapshot + //Save Scoped VariableSnapsVariableSnapshothot if len(variableSnapshot) > 0 { variableMapBytes, _ := json.Marshal(variableSnapshot) err := impl.variableSnapshotHistoryService.SaveVariableHistoriesForTrigger([]*repository4.VariableSnapshotHistoryBean{{ diff --git a/pkg/pipeline/DeploymentPipelineConfigService.go b/pkg/pipeline/DeploymentPipelineConfigService.go index e826c793d8..e07c354c6f 100644 --- a/pkg/pipeline/DeploymentPipelineConfigService.go +++ b/pkg/pipeline/DeploymentPipelineConfigService.go @@ -33,6 +33,7 @@ import ( "github.com/devtron-labs/devtron/internal/sql/repository/chartConfig" "github.com/devtron-labs/devtron/internal/sql/repository/pipelineConfig" "github.com/devtron-labs/devtron/internal/util" + "github.com/devtron-labs/devtron/pkg" "github.com/devtron-labs/devtron/pkg/app" "github.com/devtron-labs/devtron/pkg/bean" chartRepoRepository "github.com/devtron-labs/devtron/pkg/chartRepo/repository" @@ -139,8 +140,8 @@ type CdPipelineConfigServiceImpl struct { variableTemplateParser parsers.VariableTemplateParser deploymentConfig *DeploymentServiceTypeConfig application application.ServiceClient - - devtronAppCMCSService DevtronAppCMCSService + customTagService pkg.CustomTagService + devtronAppCMCSService DevtronAppCMCSService } func NewCdPipelineConfigServiceImpl( @@ -173,8 +174,8 @@ func NewCdPipelineConfigServiceImpl( variableTemplateParser parsers.VariableTemplateParser, deploymentConfig *DeploymentServiceTypeConfig, application application.ServiceClient, - - devtronAppCMCSService DevtronAppCMCSService) *CdPipelineConfigServiceImpl { + devtronAppCMCSService DevtronAppCMCSService, + customTagService pkg.CustomTagService) *CdPipelineConfigServiceImpl { return &CdPipelineConfigServiceImpl{ logger: logger, pipelineRepository: pipelineRepository, @@ -206,6 +207,7 @@ func NewCdPipelineConfigServiceImpl( deploymentConfig: deploymentConfig, application: application, devtronAppCMCSService: devtronAppCMCSService, + customTagService: customTagService, } } @@ -383,10 +385,112 @@ func (impl *CdPipelineConfigServiceImpl) CreateCdPipelines(pipelineCreateRequest return nil, err } } + // save custom tag data + err = impl.CDPipelineCustomTagDBOperations(pipeline) + if err != nil { + return nil, err + } + } + return pipelineCreateRequest, nil +} +func (impl *CdPipelineConfigServiceImpl) CDPipelineCustomTagDBOperations(pipeline *bean.CDPipelineConfigObject) error { + if pipeline.CustomTagObject == nil && pipeline.CustomTagStage == nil { + // delete custom tag if removed from request + err := impl.DeleteCustomTag(pipeline) + if err != nil { + return err + } + return nil + } else { + err := impl.SaveOrUpdateCustomTagForCDPipeline(pipeline) + if err != nil { + impl.logger.Errorw("error in creating custom tag for pipeline stage", "err", err) + return err + } + } + if *pipeline.CustomTagStage == repository5.PIPELINE_STAGE_TYPE_POST_CD { + // delete entry for post stage if any + preCDStageName := repository5.PIPELINE_STAGE_TYPE_PRE_CD + err := impl.DeleteCustomTagByPipelineStageType(&preCDStageName, pipeline.Id) + if err != nil { + return err + } + } else if *pipeline.CustomTagStage == repository5.PIPELINE_STAGE_TYPE_PRE_CD { + postCdStageName := repository5.PIPELINE_STAGE_TYPE_POST_CD + err := impl.DeleteCustomTagByPipelineStageType(&postCdStageName, pipeline.Id) + if err != nil { + return err + } } + return nil +} - return pipelineCreateRequest, nil +func (impl *CdPipelineConfigServiceImpl) DeleteCustomTag(pipeline *bean.CDPipelineConfigObject) error { + preStage := repository5.PIPELINE_STAGE_TYPE_PRE_CD + postStage := repository5.PIPELINE_STAGE_TYPE_POST_CD + err := impl.DeleteCustomTagByPipelineStageType(&preStage, pipeline.Id) + if err != nil { + return err + } + err = impl.DeleteCustomTagByPipelineStageType(&postStage, pipeline.Id) + if err != nil { + return err + } + return nil +} + +func (impl *CdPipelineConfigServiceImpl) DeleteCustomTagByPipelineStageType(pipelineStageType *repository5.PipelineStageType, pipelineId int) error { + err := impl.customTagService.DeleteCustomTagIfExists( + bean2.CustomTag{EntityKey: getEntityTypeByPipelineStageType(pipelineStageType), + EntityValue: fmt.Sprintf("%d", pipelineId), + }) + if err != nil { + impl.logger.Errorw("error in deleting custom tag for pre stage", "err", err, "pipeline-id", pipelineId) + return err + } + return nil +} + +func (impl *CdPipelineConfigServiceImpl) SaveOrUpdateCustomTagForCDPipeline(pipeline *bean.CDPipelineConfigObject) error { + customTag, err := impl.ParseCustomTagPatchRequest(pipeline.Id, pipeline.CustomTagObject, pipeline.CustomTagStage) + if err != nil { + impl.logger.Errorw("err", err) + return err + } + err = impl.customTagService.CreateOrUpdateCustomTag(customTag) + if err != nil { + impl.logger.Errorw("error in creating custom tag", "err", err) + return err + } + return nil +} + +func (impl *CdPipelineConfigServiceImpl) ParseCustomTagPatchRequest(pipelineId int, customTagData *bean.CustomTagData, pipelineStageType *repository5.PipelineStageType) (*bean2.CustomTag, error) { + entityType := getEntityTypeByPipelineStageType(pipelineStageType) + if entityType == 0 { + return nil, fmt.Errorf("invalid stage for cd pipeline custom tag; pipelineStageType: %s ", string(*pipelineStageType)) + } + customTag := &bean2.CustomTag{ + EntityKey: entityType, + EntityValue: fmt.Sprintf("%d", pipelineId), + TagPattern: customTagData.TagPattern, + AutoIncreasingNumber: customTagData.CounterX, + Metadata: "", + } + return customTag, nil +} + +func getEntityTypeByPipelineStageType(pipelineStageType *repository5.PipelineStageType) (customTagEntityType int) { + switch *pipelineStageType { + case repository5.PIPELINE_STAGE_TYPE_PRE_CD: + customTagEntityType = pkg.EntityTypePreCD + case repository5.PIPELINE_STAGE_TYPE_POST_CD: + customTagEntityType = pkg.EntityTypePostCD + default: + customTagEntityType = pkg.EntityNull + } + return customTagEntityType } func (impl *CdPipelineConfigServiceImpl) PatchCdPipelines(cdPipelines *bean.CDPatchRequest, ctx context.Context) (*bean.CdPipelines, error) { @@ -1548,7 +1652,7 @@ func (impl *CdPipelineConfigServiceImpl) updateCdPipeline(ctx context.Context, p } // Rollback tx on error. defer tx.Rollback() - err = impl.ciCdPipelineOrchestrator.UpdateCDPipeline(pipeline, userID, tx) + dbPipelineObj, err := impl.ciCdPipelineOrchestrator.UpdateCDPipeline(pipeline, userID, tx) if err != nil { impl.logger.Errorw("error in updating pipeline") return err @@ -1630,6 +1734,13 @@ func (impl *CdPipelineConfigServiceImpl) updateCdPipeline(ctx context.Context, p } } } + // update custom tag data + pipeline.Id = dbPipelineObj.Id // pipeline object is request received from FE + err = impl.SaveOrUpdateCustomTagForCDPipeline(pipeline) + if err != nil { + impl.logger.Errorw("error in updating custom tag data for pipeline", "err", err) + return err + } err = tx.Commit() if err != nil { return err diff --git a/pkg/pipeline/WorkflowDagExecutor.go b/pkg/pipeline/WorkflowDagExecutor.go index 193a705b63..7a7af416d1 100644 --- a/pkg/pipeline/WorkflowDagExecutor.go +++ b/pkg/pipeline/WorkflowDagExecutor.go @@ -25,10 +25,12 @@ import ( util5 "github.com/devtron-labs/common-lib/utils/k8s" "github.com/devtron-labs/common-lib/utils/k8s/health" gitSensorClient "github.com/devtron-labs/devtron/client/gitSensor" + "github.com/devtron-labs/devtron/pkg" "github.com/devtron-labs/devtron/pkg/app/status" "github.com/devtron-labs/devtron/pkg/k8s" bean3 "github.com/devtron-labs/devtron/pkg/pipeline/bean" repository4 "github.com/devtron-labs/devtron/pkg/pipeline/repository" + "github.com/devtron-labs/devtron/pkg/plugin" "github.com/devtron-labs/devtron/pkg/resourceQualifiers" "github.com/devtron-labs/devtron/pkg/variables" repository5 "github.com/devtron-labs/devtron/pkg/variables/repository" @@ -83,42 +85,43 @@ type WorkflowDagExecutor interface { } type WorkflowDagExecutorImpl struct { - logger *zap.SugaredLogger - pipelineRepository pipelineConfig.PipelineRepository - cdWorkflowRepository pipelineConfig.CdWorkflowRepository - pubsubClient *pubsub.PubSubClientServiceImpl - appService app.AppService - cdWorkflowService WorkflowService - ciPipelineRepository pipelineConfig.CiPipelineRepository - materialRepository pipelineConfig.MaterialRepository - pipelineOverrideRepository chartConfig.PipelineOverrideRepository - ciArtifactRepository repository.CiArtifactRepository - user user.UserService - enforcer casbin.Enforcer - enforcerUtil rbac.EnforcerUtil - groupRepository repository.DeploymentGroupRepository - tokenCache *util3.TokenCache - acdAuthConfig *util3.ACDAuthConfig - envRepository repository2.EnvironmentRepository - eventFactory client.EventFactory - eventClient client.EventClient - cvePolicyRepository security.CvePolicyRepository - scanResultRepository security.ImageScanResultRepository - appWorkflowRepository appWorkflow.AppWorkflowRepository - prePostCdScriptHistoryService history2.PrePostCdScriptHistoryService - argoUserService argo.ArgoUserService - cdPipelineStatusTimelineRepo pipelineConfig.PipelineStatusTimelineRepository - pipelineStatusTimelineService status.PipelineStatusTimelineService - CiTemplateRepository pipelineConfig.CiTemplateRepository - ciWorkflowRepository pipelineConfig.CiWorkflowRepository - appLabelRepository pipelineConfig.AppLabelRepository - gitSensorGrpcClient gitSensorClient.Client - k8sCommonService k8s.K8sCommonService - pipelineStageRepository repository4.PipelineStageRepository - pipelineStageService PipelineStageService - config *CdConfig - + logger *zap.SugaredLogger + pipelineRepository pipelineConfig.PipelineRepository + cdWorkflowRepository pipelineConfig.CdWorkflowRepository + pubsubClient *pubsub.PubSubClientServiceImpl + appService app.AppService + cdWorkflowService WorkflowService + ciPipelineRepository pipelineConfig.CiPipelineRepository + materialRepository pipelineConfig.MaterialRepository + pipelineOverrideRepository chartConfig.PipelineOverrideRepository + ciArtifactRepository repository.CiArtifactRepository + user user.UserService + enforcer casbin.Enforcer + enforcerUtil rbac.EnforcerUtil + groupRepository repository.DeploymentGroupRepository + tokenCache *util3.TokenCache + acdAuthConfig *util3.ACDAuthConfig + envRepository repository2.EnvironmentRepository + eventFactory client.EventFactory + eventClient client.EventClient + cvePolicyRepository security.CvePolicyRepository + scanResultRepository security.ImageScanResultRepository + appWorkflowRepository appWorkflow.AppWorkflowRepository + prePostCdScriptHistoryService history2.PrePostCdScriptHistoryService + argoUserService argo.ArgoUserService + cdPipelineStatusTimelineRepo pipelineConfig.PipelineStatusTimelineRepository + pipelineStatusTimelineService status.PipelineStatusTimelineService + CiTemplateRepository pipelineConfig.CiTemplateRepository + ciWorkflowRepository pipelineConfig.CiWorkflowRepository + appLabelRepository pipelineConfig.AppLabelRepository + gitSensorGrpcClient gitSensorClient.Client + k8sCommonService k8s.K8sCommonService + pipelineStageRepository repository4.PipelineStageRepository + pipelineStageService PipelineStageService + config *CdConfig + globalPluginService plugin.GlobalPluginService variableSnapshotHistoryService variables.VariableSnapshotHistoryService + pluginInputVariableParser plugin.InputVariableParser } const ( @@ -212,6 +215,8 @@ func NewWorkflowDagExecutorImpl(Logger *zap.SugaredLogger, pipelineRepository pi appLabelRepository pipelineConfig.AppLabelRepository, gitSensorGrpcClient gitSensorClient.Client, pipelineStageService PipelineStageService, k8sCommonService k8s.K8sCommonService, variableSnapshotHistoryService variables.VariableSnapshotHistoryService, + globalPluginService plugin.GlobalPluginService, + pluginInputVariableParser plugin.InputVariableParser, ) *WorkflowDagExecutorImpl { wde := &WorkflowDagExecutorImpl{logger: Logger, pipelineRepository: pipelineRepository, @@ -246,6 +251,8 @@ func NewWorkflowDagExecutorImpl(Logger *zap.SugaredLogger, pipelineRepository pi k8sCommonService: k8sCommonService, pipelineStageService: pipelineStageService, variableSnapshotHistoryService: variableSnapshotHistoryService, + globalPluginService: globalPluginService, + pluginInputVariableParser: pluginInputVariableParser, } config, err := GetCdConfig() if err != nil { @@ -579,13 +586,26 @@ func (impl *WorkflowDagExecutorImpl) TriggerPreStage(ctx context.Context, cdWf * return err } cdStageWorkflowRequest.StageType = PRE + // handling plugin specific logic + skopeoRefPluginId, err := impl.globalPluginService.GetRefPluginIdByRefPluginName(plugin.SKOPEO) + for _, step := range cdStageWorkflowRequest.PreCiSteps { + if step.RefPluginId == skopeoRefPluginId { + // for Skopeo plugin parse destination images and save its data in image path reservation table + registryDestinationImageMap, registryCredentialMap, err := impl.pluginInputVariableParser.ParseSkopeoPluginInputVariables(step.InputVars, pkg.EntityTypePreCD, strconv.Itoa(pipeline.Id), cdStageWorkflowRequest.CiArtifactDTO.Image) + if err != nil { + impl.logger.Errorw("error in parsing skopeo input variable", "err", err) + return err + } + cdStageWorkflowRequest.RegistryDestinationImageMap = registryDestinationImageMap + cdStageWorkflowRequest.RegistryCredentialMap = registryCredentialMap + } + } _, span = otel.Tracer("orchestrator").Start(ctx, "cdWorkflowService.SubmitWorkflow") cdStageWorkflowRequest.Pipeline = pipeline cdStageWorkflowRequest.Env = env cdStageWorkflowRequest.Type = bean3.CD_WORKFLOW_PIPELINE_TYPE _, err = impl.cdWorkflowService.SubmitWorkflow(cdStageWorkflowRequest) span.End() - err = impl.sendPreStageNotification(ctx, cdWf, pipeline) if err != nil { return err @@ -699,6 +719,20 @@ func (impl *WorkflowDagExecutorImpl) TriggerPostStage(cdWf *pipelineConfig.CdWor cdStageWorkflowRequest.Pipeline = pipeline cdStageWorkflowRequest.Env = env cdStageWorkflowRequest.Type = bean3.CD_WORKFLOW_PIPELINE_TYPE + // handling plugin specific logic + skopeoRefPluginId, err := impl.globalPluginService.GetRefPluginIdByRefPluginName(plugin.SKOPEO) + for _, step := range cdStageWorkflowRequest.PostCiSteps { + if step.RefPluginId == skopeoRefPluginId { + // for Skopeo plugin parse destination images and save its data in image path reservation table + registryDestinationImageMap, registryCredentialMap, err := impl.pluginInputVariableParser.ParseSkopeoPluginInputVariables(step.InputVars, pkg.EntityTypePostCD, strconv.Itoa(pipeline.Id), cdStageWorkflowRequest.CiArtifactDTO.Image) + if err != nil { + impl.logger.Errorw("error in parsing skopeo input variable", "err", err) + return err + } + cdStageWorkflowRequest.RegistryDestinationImageMap = registryDestinationImageMap + cdStageWorkflowRequest.RegistryCredentialMap = registryCredentialMap + } + } _, err = impl.cdWorkflowService.SubmitWorkflow(cdStageWorkflowRequest) if err != nil { impl.logger.Errorw("error in submitting workflow", "err", err, "cdStageWorkflowRequest", cdStageWorkflowRequest, "pipeline", pipeline, "env", env) diff --git a/pkg/pipeline/WorkflowUtils.go b/pkg/pipeline/WorkflowUtils.go index 4263e40889..e08da6989e 100644 --- a/pkg/pipeline/WorkflowUtils.go +++ b/pkg/pipeline/WorkflowUtils.go @@ -14,6 +14,7 @@ import ( bean3 "github.com/devtron-labs/devtron/pkg/bean" repository2 "github.com/devtron-labs/devtron/pkg/cluster/repository" "github.com/devtron-labs/devtron/pkg/pipeline/bean" + "github.com/devtron-labs/devtron/pkg/plugin" "github.com/devtron-labs/devtron/util" v12 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" @@ -350,26 +351,29 @@ type WorkflowRequest struct { ImageRetryCount int `json:"imageRetryCount"` ImageRetryInterval int `json:"imageRetryInterval"` // Data from CD Workflow service - WorkflowRunnerId int `json:"workflowRunnerId"` - CdPipelineId int `json:"cdPipelineId"` - StageYaml string `json:"stageYaml"` - ArtifactLocation string `json:"artifactLocation"` - CiArtifactDTO CiArtifactDTO `json:"ciArtifactDTO"` - CdImage string `json:"cdImage"` - StageType string `json:"stageType"` - CdCacheLocation string `json:"cdCacheLocation"` - CdCacheRegion string `json:"cdCacheRegion"` - WorkflowPrefixForLog string `json:"workflowPrefixForLog"` - DeploymentTriggeredBy string `json:"deploymentTriggeredBy,omitempty"` - DeploymentTriggerTime time.Time `json:"deploymentTriggerTime,omitempty"` - DeploymentReleaseCounter int `json:"deploymentReleaseCounter,omitempty"` - WorkflowExecutor pipelineConfig.WorkflowExecutorType `json:"workflowExecutor"` - PrePostDeploySteps []*bean.StepObject `json:"prePostDeploySteps"` - CiArtifactLastFetch time.Time `json:"ciArtifactLastFetch"` - Type bean.WorkflowPipelineType - Pipeline *pipelineConfig.Pipeline - Env *repository2.Environment - AppLabels map[string]string + WorkflowRunnerId int `json:"workflowRunnerId"` + CdPipelineId int `json:"cdPipelineId"` + StageYaml string `json:"stageYaml"` + ArtifactLocation string `json:"artifactLocation"` + CiArtifactDTO CiArtifactDTO `json:"ciArtifactDTO"` + CdImage string `json:"cdImage"` + StageType string `json:"stageType"` + CdCacheLocation string `json:"cdCacheLocation"` + CdCacheRegion string `json:"cdCacheRegion"` + WorkflowPrefixForLog string `json:"workflowPrefixForLog"` + DeploymentTriggeredBy string `json:"deploymentTriggeredBy,omitempty"` + DeploymentTriggerTime time.Time `json:"deploymentTriggerTime,omitempty"` + DeploymentReleaseCounter int `json:"deploymentReleaseCounter,omitempty"` + WorkflowExecutor pipelineConfig.WorkflowExecutorType `json:"workflowExecutor"` + PrePostDeploySteps []*bean.StepObject `json:"prePostDeploySteps"` + CiArtifactLastFetch time.Time `json:"ciArtifactLastFetch"` + RegistryDestinationImageMap map[string][]string `json:"registryDestinationImageMap"` + RegistryCredentialMap map[string]plugin.RegistryCredentials `json:"registryCredentialMap"` + + Type bean.WorkflowPipelineType + Pipeline *pipelineConfig.Pipeline + Env *repository2.Environment + AppLabels map[string]string } type CiCdTriggerEvent struct { diff --git a/pkg/plugin/GlobalPluginService.go b/pkg/plugin/GlobalPluginService.go index 857f6f86c3..3b727b5569 100644 --- a/pkg/plugin/GlobalPluginService.go +++ b/pkg/plugin/GlobalPluginService.go @@ -19,6 +19,7 @@ type GlobalPluginService interface { GetAllGlobalVariables() ([]*GlobalVariable, error) ListAllPlugins(stageType int) ([]*PluginListComponentDto, error) GetPluginDetailById(pluginId int) (*PluginDetailDto, error) + GetRefPluginIdByRefPluginName(pluginName string) (refPluginId int, err error) } func NewGlobalPluginService(logger *zap.SugaredLogger, globalPluginRepository repository.GlobalPluginRepository) *GlobalPluginServiceImpl { @@ -294,3 +295,12 @@ func getVariableDto(pluginVariable *repository.PluginStepVariable) *PluginVariab ReferenceVariableName: pluginVariable.ReferenceVariableName, } } + +func (impl *GlobalPluginServiceImpl) GetRefPluginIdByRefPluginName(pluginName string) (refPluginId int, err error) { + pluginMetadata, err := impl.globalPluginRepository.GetPluginByName(pluginName) + if err != nil { + impl.logger.Errorw("error in fetching plugin metadata by name", "err", err) + return 0, err + } + return pluginMetadata[0].Id, nil +} diff --git a/pkg/plugin/bean.go b/pkg/plugin/bean.go index 5a7607d3f1..b078b69c56 100644 --- a/pkg/plugin/bean.go +++ b/pkg/plugin/bean.go @@ -37,3 +37,11 @@ type PluginVariableDto struct { VariableStepIndex int `json:"variableStepIndex"` ReferenceVariableName string `json:"referenceVariableName,omitempty"` } + +type RegistryCredentials struct { + Username string `json:"username"` + Password string `json:"password"` + AWSAccessKeyId string `json:"awsAccessKeyId,omitempty"` + AWSSecretAccessKey string `json:"awsSecretAccessKey,omitempty"` + AWSRegion string `json:"awsRegion,omitempty"` +} diff --git a/wire_gen.go b/wire_gen.go index fd82abf011..7638ed8cde 100644 --- a/wire_gen.go +++ b/wire_gen.go @@ -486,7 +486,7 @@ func InitializeApp() (*App, error) { return nil, err } devtronAppCMCSServiceImpl := pipeline.NewDevtronAppCMCSServiceImpl(sugaredLogger, appServiceImpl, attributesRepositoryImpl) - cdPipelineConfigServiceImpl := pipeline.NewCdPipelineConfigServiceImpl(sugaredLogger, pipelineRepositoryImpl, environmentRepositoryImpl, pipelineConfigRepositoryImpl, appWorkflowRepositoryImpl, pipelineStageServiceImpl, appRepositoryImpl, appServiceImpl, deploymentGroupRepositoryImpl, ciCdPipelineOrchestratorImpl, appStatusRepositoryImpl, ciPipelineRepositoryImpl, prePostCdScriptHistoryServiceImpl, clusterRepositoryImpl, helmAppServiceImpl, enforcerUtilImpl, gitOpsConfigRepositoryImpl, pipelineStrategyHistoryServiceImpl, chartRepositoryImpl, resourceGroupServiceImpl, chartDeploymentServiceImpl, chartTemplateServiceImpl, propertiesConfigServiceImpl, appLevelMetricsRepositoryImpl, deploymentTemplateHistoryServiceImpl, variableEntityMappingServiceImpl, variableTemplateParserImpl, pipelineDeploymentServiceTypeConfig, applicationServiceClientImpl, devtronAppCMCSServiceImpl) + cdPipelineConfigServiceImpl := pipeline.NewCdPipelineConfigServiceImpl(sugaredLogger, pipelineRepositoryImpl, environmentRepositoryImpl, pipelineConfigRepositoryImpl, appWorkflowRepositoryImpl, pipelineStageServiceImpl, appRepositoryImpl, appServiceImpl, deploymentGroupRepositoryImpl, ciCdPipelineOrchestratorImpl, appStatusRepositoryImpl, ciPipelineRepositoryImpl, prePostCdScriptHistoryServiceImpl, clusterRepositoryImpl, helmAppServiceImpl, enforcerUtilImpl, gitOpsConfigRepositoryImpl, pipelineStrategyHistoryServiceImpl, chartRepositoryImpl, resourceGroupServiceImpl, chartDeploymentServiceImpl, chartTemplateServiceImpl, propertiesConfigServiceImpl, appLevelMetricsRepositoryImpl, deploymentTemplateHistoryServiceImpl, variableEntityMappingServiceImpl, variableTemplateParserImpl, pipelineDeploymentServiceTypeConfig, applicationServiceClientImpl, devtronAppCMCSServiceImpl, customTagServiceImpl) appArtifactManagerImpl := pipeline.NewAppArtifactManagerImpl(sugaredLogger, cdWorkflowRepositoryImpl, userServiceImpl, imageTaggingServiceImpl, ciArtifactRepositoryImpl, ciWorkflowRepositoryImpl, pipelineStageServiceImpl, cdPipelineConfigServiceImpl) globalStrategyMetadataChartRefMappingRepositoryImpl := chartRepoRepository.NewGlobalStrategyMetadataChartRefMappingRepositoryImpl(db, sugaredLogger) devtronAppStrategyServiceImpl := pipeline.NewDevtronAppStrategyServiceImpl(sugaredLogger, chartRepositoryImpl, globalStrategyMetadataChartRefMappingRepositoryImpl, ciCdPipelineOrchestratorImpl, cdPipelineConfigServiceImpl) From 0d0ed1818e87774a03c61b451fc9a6fccdc2c4e8 Mon Sep 17 00:00:00 2001 From: Gireesh Naidu Date: Thu, 19 Oct 2023 18:41:12 +0530 Subject: [PATCH 040/143] almost done, optimise TODOs --- api/bean/ValuesOverrideRequest.go | 5 +- .../sql/repository/CiArtifactRepository.go | 82 +++--- .../pipelineConfig/CdWorfkflowRepository.go | 41 ++- pkg/bean/app.go | 3 + pkg/pipeline/AppArtifactManager.go | 240 ++++++++++++------ 5 files changed, 246 insertions(+), 125 deletions(-) diff --git a/api/bean/ValuesOverrideRequest.go b/api/bean/ValuesOverrideRequest.go index 7896fa396b..51c9deb79a 100644 --- a/api/bean/ValuesOverrideRequest.go +++ b/api/bean/ValuesOverrideRequest.go @@ -99,7 +99,7 @@ type ArtifactsListFilterOptions struct { //list filter data Limit int Offset int - SearchString int + SearchString string Order string //self stage data @@ -110,4 +110,7 @@ type ArtifactsListFilterOptions struct { ParentCdId int ParentId int ParentStageType WorkflowType + + //excludeArtifactIds + ExcludeArtifactIds []int } diff --git a/internal/sql/repository/CiArtifactRepository.go b/internal/sql/repository/CiArtifactRepository.go index 0c0e665ad9..badf7d8701 100644 --- a/internal/sql/repository/CiArtifactRepository.go +++ b/internal/sql/repository/CiArtifactRepository.go @@ -247,12 +247,13 @@ func (impl CiArtifactRepositoryImpl) GetArtifactsByCDPipelineV3(listingFilterOpt " FROM ci_artifact cia" + " INNER JOIN ci_pipeline cp ON cp.id=cia.pipeline_id" + " INNER JOIN pipeline p ON p.ci_pipeline_id = cp.id and p.id=?" + - " WHERE cia.image ILIKE %?%" + + " WHERE cia.id NOT IN (?) " + + " AND cia.image ILIKE %?%" + " ORDER BY cia.id DESC" + " LIMIT ?" + " OFFSET ?;" - _, err := impl.dbConnection.Query(&artifacts, query, listingFilterOpts.PipelineId, listingFilterOpts.SearchString, listingFilterOpts.Limit, listingFilterOpts.Offset) + _, err := impl.dbConnection.Query(&artifacts, query, listingFilterOpts.PipelineId, pg.In(listingFilterOpts.ExcludeArtifactIds), listingFilterOpts.SearchString, listingFilterOpts.Limit, listingFilterOpts.Offset) if err != nil { return artifacts, err } @@ -260,12 +261,13 @@ func (impl CiArtifactRepositoryImpl) GetArtifactsByCDPipelineV3(listingFilterOpt } else if listingFilterOpts.ParentStageType == bean.WEBHOOK_WORKFLOW_TYPE { query := " SELECT cia.* " + " FROM ci_artifact cia " + - " WHERE cia.external_ci_pipeline_id = webhook_id " + + " WHERE cia.external_ci_pipeline_id = ? " + + " AND cia.id NOT IN (?) " + " AND cia.image ILIKE %?% " + " ORDER BY cia.id DESC " + " LIMIT ? " + " OFFSET ?;" - _, err := impl.dbConnection.Query(&artifacts, query, listingFilterOpts.SearchString, listingFilterOpts.Limit, listingFilterOpts.Offset) + _, err := impl.dbConnection.Query(&artifacts, query, listingFilterOpts.ParentId, pg.In(listingFilterOpts.ExcludeArtifactIds), listingFilterOpts.SearchString, listingFilterOpts.Limit, listingFilterOpts.Offset) if err != nil { return artifacts, err } @@ -273,45 +275,39 @@ func (impl CiArtifactRepositoryImpl) GetArtifactsByCDPipelineV3(listingFilterOpt return artifacts, nil } - //Currently below computed data is not being used anywhere, if required use it - - //if len(artifacts) == 0 { - // return artifacts, nil - //} - ////processing - //artifactsMap := make(map[int]*CiArtifact) - //artifactsIds := make([]int, 0, len(artifacts)) - //for _, artifact := range artifacts { - // artifactsMap[artifact.Id] = artifact - // artifactsIds = append(artifactsIds, artifact.Id) - //} - // - ////(this will fetch all the artifacts that were deployed on the given pipeline atleast once in new->old deployed order) - //artifactsDeployed := make([]*CiArtifact, 0, len(artifactsIds)) - //query := " SELECT cia.id,pco.created_on AS created_on " + - // " FROM ci_artifact cia" + - // " INNER JOIN pipeline_config_override pco ON pco.ci_artifact_id=cia.id" + - // " WHERE pco.pipeline_id = ? " + - // " AND cia.id IN (?) " + - // " ORDER BY pco.id desc;" - // - //_, err := impl.dbConnection.Query(&artifactsDeployed, query, pg.In(artifactsIds)) - //if err != nil { - // return artifacts, nil - //} - // - ////set deployed time and latest deployed artifact - //for i, deployedArtifact := range artifactsDeployed { - // artifactId := deployedArtifact.Id - // if _, ok := artifactsMap[artifactId]; ok { - // artifactsMap[artifactId].Deployed = true - // artifactsMap[artifactId].DeployedTime = deployedArtifact.CreatedOn - // if i == 0 { - // artifactsMap[artifactId].Latest = true - // - // } - // } - //} + if len(artifacts) == 0 { + return artifacts, nil + } + //processing + artifactsMap := make(map[int]*CiArtifact) + artifactsIds := make([]int, 0, len(artifacts)) + for _, artifact := range artifacts { + artifactsMap[artifact.Id] = artifact + artifactsIds = append(artifactsIds, artifact.Id) + } + + //(this will fetch all the artifacts that were deployed on the given pipeline atleast once in new->old deployed order) + artifactsDeployed := make([]*CiArtifact, 0, len(artifactsIds)) + query := " SELECT cia.id,pco.created_on AS created_on " + + " FROM ci_artifact cia" + + " INNER JOIN pipeline_config_override pco ON pco.ci_artifact_id=cia.id" + + " WHERE pco.pipeline_id = ? " + + " AND cia.id IN (?) " + + " ORDER BY pco.id desc;" + + _, err := impl.dbConnection.Query(&artifactsDeployed, query, pg.In(artifactsIds)) + if err != nil { + return artifacts, nil + } + + //set deployed time and latest deployed artifact + for _, deployedArtifact := range artifactsDeployed { + artifactId := deployedArtifact.Id + if _, ok := artifactsMap[artifactId]; ok { + artifactsMap[artifactId].Deployed = true + artifactsMap[artifactId].DeployedTime = deployedArtifact.CreatedOn + } + } return artifacts, nil diff --git a/internal/sql/repository/pipelineConfig/CdWorfkflowRepository.go b/internal/sql/repository/pipelineConfig/CdWorfkflowRepository.go index 352b426612..d73d908b73 100644 --- a/internal/sql/repository/pipelineConfig/CdWorfkflowRepository.go +++ b/internal/sql/repository/pipelineConfig/CdWorfkflowRepository.go @@ -41,7 +41,7 @@ type CdWorkflowRepository interface { FindCdWorkflowMetaByEnvironmentId(appId int, environmentId int, offset int, size int) ([]CdWorkflowRunner, error) FindCdWorkflowMetaByPipelineId(pipelineId int, offset int, size int) ([]CdWorkflowRunner, error) FindArtifactByPipelineIdAndRunnerType(pipelineId int, runnerType bean.WorkflowType, limit int) ([]CdWorkflowRunner, error) - + FindArtifactByListFilter(listingFilterOptions *bean.ArtifactsListFilterOptions) ([]CdWorkflowRunner, error) SaveWorkFlowRunner(wfr *CdWorkflowRunner) (*CdWorkflowRunner, error) UpdateWorkFlowRunner(wfr *CdWorkflowRunner) error UpdateWorkFlowRunnersWithTxn(wfrs []*CdWorkflowRunner, tx *pg.Tx) error @@ -377,6 +377,40 @@ func (impl *CdWorkflowRepositoryImpl) FindCdWorkflowMetaByPipelineId(pipelineId } return wfrList, err } +func (impl *CdWorkflowRepositoryImpl) FindArtifactByListFilter(listingFilterOptions *bean.ArtifactsListFilterOptions) ([]CdWorkflowRunner, error) { + var wfrList []CdWorkflowRunner + var wfIds []int + err := impl.dbConnection.Model(&wfIds). + Column("MAX(cd_workflow_runner.id) AS id"). + Join("INNER JOIN cd_workflow ON cd_workflow.id=cd_workflow_runner.cd_workflow_id"). + Join("INNER JOIN ci_artifact cia ON cia.id = cd_workflow.ci_artifact_id"). + Where("(cd_workflow.pipeline_id = ? AND cd_workflow_runner.workflow_type = ?) OR (cd_workflow.pipeline_id = ? AND cd_workflow_runner.workflow_type = ? AND cd_workflow_runner.status IN (?))", + listingFilterOptions.PipelineId, + listingFilterOptions.StageType, + listingFilterOptions.ParentId, + listingFilterOptions.ParentStageType, + pg.In([]string{application.Healthy, application.SUCCEEDED})). + Where("cia.image ILIKE %?%", listingFilterOptions.SearchString). + Where("cd_workflow.ci_artifact_id NOT IN (?)", pg.In(listingFilterOptions.ExcludeArtifactIds)). + Group("cd_workflow.ci_artifact_id"). + Limit(listingFilterOptions.Limit). + Offset(listingFilterOptions.Offset). + Select() + + if err == pg.ErrNoRows || len(wfIds) == 0 { + return wfrList, nil + } + err = impl.dbConnection. + Model(&wfrList). + Column("cd_workflow_runner.*", "CdWorkflow", "CdWorkflow.Pipeline", "CdWorkflow.CiArtifact"). + Where("cd_workflow_runner IN (?) ", pg.In(wfIds)). + Select() + + if err == pg.ErrNoRows { + return wfrList, nil + } + return wfrList, err +} func (impl *CdWorkflowRepositoryImpl) FindArtifactByPipelineIdAndRunnerType(pipelineId int, runnerType bean.WorkflowType, limit int) ([]CdWorkflowRunner, error) { var wfrList []CdWorkflowRunner @@ -386,11 +420,6 @@ func (impl *CdWorkflowRepositoryImpl) FindArtifactByPipelineIdAndRunnerType(pipe Where("cd_workflow.pipeline_id = ?", pipelineId). Where("cd_workflow_runner.workflow_type = ?", runnerType). Order("cd_workflow_runner.id DESC"). - //Join("inner join cd_workflow wf on wf.id = cd_workflow_runner.cd_workflow_id"). - //Join("inner join ci_artifact cia on cia.id = wf.ci_artifact_id"). - //Join("inner join pipeline p on p.id = wf.pipeline_id"). - //Join("left join users u on u.id = wfr.triggered_by"). - //Order("ORDER BY cd_workflow_runner.started_on DESC"). Limit(limit). Select() if err != nil { diff --git a/pkg/bean/app.go b/pkg/bean/app.go index 29919a091e..fffdb5d9b0 100644 --- a/pkg/bean/app.go +++ b/pkg/bean/app.go @@ -721,6 +721,9 @@ type CiArtifactBean struct { CiConfigureSourceValue string `json:"ciConfigureSourceValue"` ImageReleaseTags []*repository2.ImageTag `json:"imageReleaseTags"` ImageComment *repository2.ImageComment `json:"imageComment"` + ExternalCiPipelineId int `json:"-"` + ParentCiArtifact int `json:"-"` + CiWorkflowId int `json:"-"` } type CiArtifactResponse struct { diff --git a/pkg/pipeline/AppArtifactManager.go b/pkg/pipeline/AppArtifactManager.go index 8a5b5914e2..14744b76fa 100644 --- a/pkg/pipeline/AppArtifactManager.go +++ b/pkg/pipeline/AppArtifactManager.go @@ -184,31 +184,6 @@ func (impl *AppArtifactManagerImpl) BuildArtifactsForCIParent(cdPipelineId int, return ciArtifacts, nil } -func (impl *AppArtifactManagerImpl) BuildArtifactsForCIParentV2(listingFilterOpts *bean.ArtifactsListFilterOptions) ([]bean2.CiArtifactBean, error) { - artifacts, err := impl.ciArtifactRepository.GetArtifactsByCDPipelineV3(listingFilterOpts) - if err != nil { - impl.logger.Errorw("error in getting artifacts for ci", "err", err) - return ciArtifacts, err - } - for _, artifact := range artifacts { - if _, ok := artifactMap[artifact.Id]; !ok { - mInfo, err := parseMaterialInfo([]byte(artifact.MaterialInfo), artifact.DataSource) - if err != nil { - mInfo = []byte("[]") - impl.logger.Errorw("Error in parsing artifact material info", "err", err, "artifact", artifact) - } - ciArtifacts = append(ciArtifacts, bean2.CiArtifactBean{ - Id: artifact.Id, - Image: artifact.Image, - ImageDigest: artifact.ImageDigest, - MaterialInfo: mInfo, - ScanEnabled: artifact.ScanEnabled, - Scanned: artifact.Scanned, - }) - } - } - return ciArtifacts, nil -} func (impl *AppArtifactManagerImpl) FetchArtifactForRollback(cdPipelineId, appId, offset, limit int) (bean2.CiArtifactResponse, error) { var deployedCiArtifacts []bean2.CiArtifactBean var deployedCiArtifactsResponse bean2.CiArtifactResponse @@ -412,7 +387,43 @@ func (impl *AppArtifactManagerImpl) RetrieveArtifactsByCDPipeline(pipeline *pipe return ciArtifactsResponse, nil } -func (impl *AppArtifactManagerImpl) RetrieveArtifactsByCDPipelineV2(pipeline *pipelineConfig.Pipeline, stage bean.WorkflowType) (*bean2.CiArtifactResponse, error) { +func (impl *AppArtifactManagerImpl) extractParentMetaDataByPipeline(pipeline *pipelineConfig.Pipeline, stage bean.WorkflowType) (parentId int, parentType bean.WorkflowType, parentCdId int, err error) { + // retrieve parent details + parentId, parentType, err = impl.cdPipelineConfigService.RetrieveParentDetails(pipeline.Id) + if err != nil { + impl.logger.Errorw("failed to retrieve parent details", + "cdPipelineId", pipeline.Id, + "err", err) + return parentId, parentType, parentCdId, err + } + + if parentType == bean.CD_WORKFLOW_TYPE_POST || (parentType == bean.CD_WORKFLOW_TYPE_DEPLOY && stage != bean.CD_WORKFLOW_TYPE_POST) { + // parentCdId is being set to store the artifact currently deployed on parent cd (if applicable). + // Parent component is CD only if parent type is POST/DEPLOY + parentCdId = parentId + } + + if stage == bean.CD_WORKFLOW_TYPE_DEPLOY { + pipelinePreStage, err := impl.pipelineStageService.GetCdStageByCdPipelineIdAndStageType(pipeline.Id, repository2.PIPELINE_STAGE_TYPE_PRE_CD) + if err != nil && err != pg.ErrNoRows { + impl.logger.Errorw("error in fetching PRE-CD stage by cd pipeline id", "pipelineId", pipeline.Id, "err", err) + return parentId, parentType, parentCdId, err + } + if (pipelinePreStage != nil && pipelinePreStage.Id != 0) || len(pipeline.PreStageConfig) > 0 { + // Parent type will be PRE for DEPLOY stage + parentId = pipeline.Id + parentType = bean.CD_WORKFLOW_TYPE_PRE + } + } + if stage == bean.CD_WORKFLOW_TYPE_POST { + // Parent type will be DEPLOY for POST stage + parentId = pipeline.Id + parentType = bean.CD_WORKFLOW_TYPE_DEPLOY + } + return parentId, parentType, parentCdId, err +} + +func (impl *AppArtifactManagerImpl) RetrieveArtifactsByCDPipelineV2(pipeline *pipelineConfig.Pipeline, stage bean.WorkflowType, artifactListingFilterOpts *bean.ArtifactsListFilterOptions) (*bean2.CiArtifactResponse, error) { // retrieve parent details parentId, parentType, parentCdId, err := impl.extractParentMetaDataByPipeline(pipeline, stage) @@ -424,20 +435,20 @@ func (impl *AppArtifactManagerImpl) RetrieveArtifactsByCDPipelineV2(pipeline *pi var ciArtifacts []bean2.CiArtifactBean ciArtifactsResponse := &bean2.CiArtifactResponse{} - artifactMap := make(map[int]int) - limit := 10 + artifactListingFilterOpts.PipelineId = pipeline.Id + artifactListingFilterOpts.ParentId = parentId + artifactListingFilterOpts.ParentCdId = parentCdId + artifactListingFilterOpts.ParentStageType = parentType + artifactListingFilterOpts.StageType = stage - ciArtifacts, artifactMap, latestWfArtifactId, latestWfArtifactStatus, err := impl. - BuildArtifactsForCdStage(pipeline.Id, stage, ciArtifacts, artifactMap, false, limit, parentCdId) + ciArtifactsRefs, latestWfArtifactId, latestWfArtifactStatus, err := impl.BuildArtifactsList(artifactListingFilterOpts) if err != nil && err != pg.ErrNoRows { impl.logger.Errorw("error in getting artifacts for child cd stage", "err", err, "stage", stage) return nil, err } - ciArtifacts, err = impl.BuildArtifactsForParentStage(pipeline.Id, parentId, parentType, ciArtifacts, artifactMap, limit, parentCdId) - if err != nil && err != pg.ErrNoRows { - impl.logger.Errorw("error in getting artifacts for cd", "err", err, "parentStage", parentType, "stage", stage) - return nil, err + for _, ciArtifactsRef := range ciArtifactsRefs { + ciArtifacts = append(ciArtifacts, *ciArtifactsRef) } //sorting ci artifacts on the basis of creation time @@ -452,10 +463,6 @@ func (impl *AppArtifactManagerImpl) RetrieveArtifactsByCDPipelineV2(pipeline *pi artifactIds = append(artifactIds, artifact.Id) } - artifacts, err := impl.ciArtifactRepository.GetArtifactParentCiAndWorkflowDetailsByIds(artifactIds) - if err != nil { - return ciArtifactsResponse, err - } imageTagsDataMap, err := impl.imageTaggingService.GetTagsDataMapByAppId(pipeline.AppId) if err != nil { impl.logger.Errorw("error in getting image tagging data with appId", "err", err, "appId", pipeline.AppId) @@ -468,7 +475,7 @@ func (impl *AppArtifactManagerImpl) RetrieveArtifactsByCDPipelineV2(pipeline *pi return ciArtifactsResponse, err } - for i, artifact := range artifacts { + for i, artifact := range ciArtifacts { if imageTaggingResp := imageTagsDataMap[ciArtifacts[i].Id]; imageTaggingResp != nil { ciArtifacts[i].ImageReleaseTags = imageTaggingResp } @@ -481,6 +488,7 @@ func (impl *AppArtifactManagerImpl) RetrieveArtifactsByCDPipelineV2(pipeline *pi continue } + //TODO: can be optimised var ciWorkflow *pipelineConfig.CiWorkflow if artifact.ParentCiArtifact != 0 { ciWorkflow, err = impl.ciWorkflowRepository.FindLastTriggeredWorkflowGitTriggersByArtifactId(artifact.ParentCiArtifact) @@ -490,7 +498,7 @@ func (impl *AppArtifactManagerImpl) RetrieveArtifactsByCDPipelineV2(pipeline *pi } } else { - ciWorkflow, err = impl.ciWorkflowRepository.FindCiWorkflowGitTriggersById(*artifact.WorkflowId) + ciWorkflow, err = impl.ciWorkflowRepository.FindCiWorkflowGitTriggersById(artifact.CiWorkflowId) if err != nil { impl.logger.Errorw("error in getting ci_workflow for artifacts", "err", err, "artifact", artifact, "parentStage", parentType, "stage", stage) return ciArtifactsResponse, err @@ -510,54 +518,136 @@ func (impl *AppArtifactManagerImpl) RetrieveArtifactsByCDPipelineV2(pipeline *pi return ciArtifactsResponse, nil } -func (impl *AppArtifactManagerImpl) extractParentMetaDataByPipeline(pipeline *pipelineConfig.Pipeline, stage bean.WorkflowType) (parentId int, parentType bean.WorkflowType, parentCdId int, err error) { - // retrieve parent details - parentId, parentType, err = impl.cdPipelineConfigService.RetrieveParentDetails(pipeline.Id) +func (impl *AppArtifactManagerImpl) BuildArtifactsList(listingFilterOpts *bean.ArtifactsListFilterOptions) ([]*bean2.CiArtifactBean, int, string, error) { + + var ciArtifacts []*bean2.CiArtifactBean + //1)get current deployed artifact on this pipeline + latestWf, err := impl.cdWorkflowRepository.FindArtifactByPipelineIdAndRunnerType(listingFilterOpts.PipelineId, listingFilterOpts.StageType, 1) if err != nil { - impl.logger.Errorw("failed to retrieve parent details", - "cdPipelineId", pipeline.Id, - "err", err) - return parentId, parentType, parentCdId, err + impl.logger.Errorw("error in getting latest workflow by pipelineId", "pipelineId", listingFilterOpts.PipelineId, "currentStageType", listingFilterOpts.StageType) + return ciArtifacts, 0, "", err } - - if parentType == bean.CD_WORKFLOW_TYPE_POST || (parentType == bean.CD_WORKFLOW_TYPE_DEPLOY && stage != bean.CD_WORKFLOW_TYPE_POST) { - // parentCdId is being set to store the artifact currently deployed on parent cd (if applicable). - // Parent component is CD only if parent type is POST/DEPLOY - parentCdId = parentId + if len(latestWf) == 0 { + return ciArtifacts, 0, "", err } + currentRunningArtifact := latestWf[0].CdWorkflow.CiArtifact + listingFilterOpts.ExcludeArtifactIds = []int{currentRunningArtifact.Id} - if stage == bean.CD_WORKFLOW_TYPE_DEPLOY { - pipelinePreStage, err := impl.pipelineStageService.GetCdStageByCdPipelineIdAndStageType(pipeline.Id, repository2.PIPELINE_STAGE_TYPE_PRE_CD) - if err != nil && err != pg.ErrNoRows { - impl.logger.Errorw("error in fetching PRE-CD stage by cd pipeline id", "pipelineId", pipeline.Id, "err", err) - return parentId, parentType, parentCdId, err + //2) get artifact list limited by filterOptions + if listingFilterOpts.ParentStageType == bean.CI_WORKFLOW_TYPE || listingFilterOpts.ParentStageType == bean.WEBHOOK_WORKFLOW_TYPE { + ciArtifacts, err = impl.BuildArtifactsForCIParentV2(listingFilterOpts) + if err != nil { + impl.logger.Errorw("error in getting ci artifacts for ci/webhook type parent", "pipelineId", listingFilterOpts.PipelineId, "parentPipelineId", listingFilterOpts.ParentId, "parentStageType", listingFilterOpts.ParentStageType, "currentStageType", listingFilterOpts.StageType) + return ciArtifacts, 0, "", err } - if (pipelinePreStage != nil && pipelinePreStage.Id != 0) || len(pipeline.PreStageConfig) > 0 { - // Parent type will be PRE for DEPLOY stage - parentId = pipeline.Id - parentType = bean.CD_WORKFLOW_TYPE_PRE + } else { + ciArtifacts, err = impl.BuildArtifactsForCdStageV2(listingFilterOpts) + if err != nil { + impl.logger.Errorw("error in getting ci artifacts for ci/webhook type parent", "pipelineId", listingFilterOpts.PipelineId, "parentPipelineId", listingFilterOpts.ParentId, "parentStageType", listingFilterOpts.ParentStageType, "currentStageType", listingFilterOpts.StageType) + return ciArtifacts, 0, "", err } } - if stage == bean.CD_WORKFLOW_TYPE_POST { - // Parent type will be DEPLOY for POST stage - parentId = pipeline.Id - parentType = bean.CD_WORKFLOW_TYPE_DEPLOY - } - return parentId, parentType, parentCdId, err + + //current deployed artifact is not included in the above computed ciArtifacts, we have to add this too + mInfo, err := parseMaterialInfo([]byte(currentRunningArtifact.MaterialInfo), currentRunningArtifact.DataSource) + if err != nil { + mInfo = []byte("[]") + impl.logger.Errorw("Error in parsing artifact material info", "err", err, "artifact", currentRunningArtifact) + } + currentRunningArtifactBean := &bean2.CiArtifactBean{ + Id: currentRunningArtifact.Id, + Image: currentRunningArtifact.Image, + ImageDigest: currentRunningArtifact.ImageDigest, + MaterialInfo: mInfo, + ScanEnabled: currentRunningArtifact.ScanEnabled, + Scanned: currentRunningArtifact.Scanned, + Deployed: true, + DeployedTime: formatDate(latestWf[0].CdWorkflow.CreatedOn, bean2.LayoutRFC3339), + Latest: true, + } + ciArtifacts = append(ciArtifacts, currentRunningArtifactBean) + return ciArtifacts, currentRunningArtifact.Id, latestWf[0].Status, nil } +func (impl *AppArtifactManagerImpl) BuildArtifactsForCdStageV2(listingFilterOpts *bean.ArtifactsListFilterOptions) ([]*bean2.CiArtifactBean, error) { + cdWfrList, err := impl.cdWorkflowRepository.FindArtifactByListFilter(listingFilterOpts) + if err != nil { + impl.logger.Errorw("error in fetching cd workflow runners using filter", "filterOptions", listingFilterOpts, "err", err) + return nil, err + } + ciArtifacts := make([]*bean2.CiArtifactBean, 0, len(cdWfrList)) + //get artifact running on parent cd + artifactRunningOnParentCd := 0 + if listingFilterOpts.ParentCdId > 0 { + //TODO: check if we can fetch LastSuccessfulTriggerOnParent wfr along with last running wf + parentCdWfrList, err := impl.cdWorkflowRepository.FindArtifactByPipelineIdAndRunnerType(listingFilterOpts.ParentCdId, bean.CD_WORKFLOW_TYPE_DEPLOY, 1) + if err != nil || len(parentCdWfrList) == 0 { + impl.logger.Errorw("error in getting artifact for parent cd", "parentCdPipelineId", listingFilterOpts.ParentCdId) + return ciArtifacts, err + } + artifactRunningOnParentCd = parentCdWfrList[0].CdWorkflow.CiArtifact.Id + } -func (impl *AppArtifactManagerImpl) BuildArtifacts(listingFilterOpts *bean.ArtifactsListFilterOptions) ([]bean2.CiArtifactBean, error) { + for _, wfr := range cdWfrList { + mInfo, err := parseMaterialInfo([]byte(wfr.CdWorkflow.CiArtifact.MaterialInfo), wfr.CdWorkflow.CiArtifact.DataSource) + if err != nil { + mInfo = []byte("[]") + impl.logger.Errorw("Error in parsing artifact material info", "err", err) + } + ciArtifact := &bean2.CiArtifactBean{ + Id: wfr.CdWorkflow.CiArtifact.Id, + Image: wfr.CdWorkflow.CiArtifact.Image, + ImageDigest: wfr.CdWorkflow.CiArtifact.ImageDigest, + MaterialInfo: mInfo, + //TODO:LastSuccessfulTriggerOnParent + Scanned: wfr.CdWorkflow.CiArtifact.Scanned, + ScanEnabled: wfr.CdWorkflow.CiArtifact.ScanEnabled, + RunningOnParentCd: wfr.CdWorkflow.CiArtifact.Id == artifactRunningOnParentCd, + ExternalCiPipelineId: wfr.CdWorkflow.CiArtifact.ExternalCiPipelineId, + ParentCiArtifact: wfr.CdWorkflow.CiArtifact.ParentCiArtifact, + } + if wfr.CdWorkflow.CiArtifact.WorkflowId != nil { + ciArtifact.CiWorkflowId = *wfr.CdWorkflow.CiArtifact.WorkflowId + } + ciArtifacts = append(ciArtifacts, ciArtifact) + } - //1)get current deployed artifact on this pipeline + return ciArtifacts, nil +} - //2) get artifact list limited by filterOptions - if listingFilterOpts.ParentStageType == bean.CI_WORKFLOW_TYPE || listingFilterOpts.ParentStageType == bean.WEBHOOK_WORKFLOW_TYPE { - impl.BuildArtifactsForCIParentV2(listingFilterOpts) - } else { - impl.cdWorkflowRepository. +func (impl *AppArtifactManagerImpl) BuildArtifactsForCIParentV2(listingFilterOpts *bean.ArtifactsListFilterOptions) ([]*bean2.CiArtifactBean, error) { + + artifacts, err := impl.ciArtifactRepository.GetArtifactsByCDPipelineV3(listingFilterOpts) + if err != nil { + impl.logger.Errorw("error in getting artifacts for ci", "err", err) + return nil, err } - //got all the artifacts, process them + ciArtifacts := make([]*bean2.CiArtifactBean, 0, len(artifacts)) + for _, artifact := range artifacts { + mInfo, err := parseMaterialInfo([]byte(artifact.MaterialInfo), artifact.DataSource) + if err != nil { + mInfo = []byte("[]") + impl.logger.Errorw("Error in parsing artifact material info", "err", err, "artifact", artifact) + } + ciArtifact := &bean2.CiArtifactBean{ + Id: artifact.Id, + Image: artifact.Image, + ImageDigest: artifact.ImageDigest, + MaterialInfo: mInfo, + ScanEnabled: artifact.ScanEnabled, + Scanned: artifact.Scanned, + Deployed: artifact.Deployed, + DeployedTime: formatDate(artifact.DeployedTime, bean2.LayoutRFC3339), + ExternalCiPipelineId: artifact.ExternalCiPipelineId, + ParentCiArtifact: artifact.ParentCiArtifact, + } + if artifact.WorkflowId != nil { + ciArtifact.CiWorkflowId = *artifact.WorkflowId + } + ciArtifacts = append(ciArtifacts, ciArtifact) + } + + return ciArtifacts, nil } From 673570dbf0c24f3a988ffdff619a5e99d3cbdca3 Mon Sep 17 00:00:00 2001 From: Gireesh Naidu Date: Thu, 19 Oct 2023 21:50:15 +0530 Subject: [PATCH 041/143] pagination done for OSS --- .../app/DeploymentPipelineRestHandler.go | 4 ++- .../pipelineConfig/CdWorfkflowRepository.go | 5 ++-- .../pipelineConfig/CiWorkflowRepository.go | 27 +++++++++++++++++++ pkg/pipeline/AppArtifactManager.go | 7 ++--- 4 files changed, 37 insertions(+), 6 deletions(-) diff --git a/api/restHandler/app/DeploymentPipelineRestHandler.go b/api/restHandler/app/DeploymentPipelineRestHandler.go index 1d614ff2a3..eec504f365 100644 --- a/api/restHandler/app/DeploymentPipelineRestHandler.go +++ b/api/restHandler/app/DeploymentPipelineRestHandler.go @@ -1419,6 +1419,8 @@ func (handler PipelineConfigRestHandlerImpl) GetArtifactsForRollback(w http.Resp common.WriteJsonResp(w, err, "invalid size", http.StatusBadRequest) return } + + searchString := r.URL.Query().Get("searchString") //rbac block starts from here object := handler.enforcerUtil.GetAppRBACName(app.AppName) if ok := handler.enforcer.Enforce(token, casbin.ResourceApplications, casbin.ActionGet, object); !ok { @@ -1434,7 +1436,7 @@ func (handler PipelineConfigRestHandlerImpl) GetArtifactsForRollback(w http.Resp //rbac for edit tags access triggerAccess := handler.enforcer.Enforce(token, casbin.ResourceApplications, casbin.ActionTrigger, object) - ciArtifactResponse, err := handler.pipelineBuilder.FetchArtifactForRollback(cdPipelineId, app.Id, offset, limit) + ciArtifactResponse, err := handler.pipelineBuilder.FetchArtifactForRollback(cdPipelineId, app.Id, offset, limit, searchString) if err != nil { handler.Logger.Errorw("service err, GetArtifactsForRollback", "err", err, "cdPipelineId", cdPipelineId) common.WriteJsonResp(w, err, "unable to fetch artifacts", http.StatusInternalServerError) diff --git a/internal/sql/repository/pipelineConfig/CdWorfkflowRepository.go b/internal/sql/repository/pipelineConfig/CdWorfkflowRepository.go index d73d908b73..e46681e47e 100644 --- a/internal/sql/repository/pipelineConfig/CdWorfkflowRepository.go +++ b/internal/sql/repository/pipelineConfig/CdWorfkflowRepository.go @@ -69,7 +69,7 @@ type CdWorkflowRepository interface { FetchAllCdStagesLatestEntityStatus(wfrIds []int) ([]*CdWorkflowRunner, error) ExistsByStatus(status string) (bool, error) - FetchArtifactsByCdPipelineId(pipelineId int, runnerType bean.WorkflowType, offset, limit int) ([]CdWorkflowRunner, error) + FetchArtifactsByCdPipelineId(pipelineId int, runnerType bean.WorkflowType, offset, limit int, searchString string) ([]CdWorkflowRunner, error) GetLatestTriggersOfHelmPipelinesStuckInNonTerminalStatuses(getPipelineDeployedWithinHours int) ([]*CdWorkflowRunner, error) } @@ -600,13 +600,14 @@ func (impl *CdWorkflowRepositoryImpl) ExistsByStatus(status string) (bool, error return exists, err } -func (impl *CdWorkflowRepositoryImpl) FetchArtifactsByCdPipelineId(pipelineId int, runnerType bean.WorkflowType, offset, limit int) ([]CdWorkflowRunner, error) { +func (impl *CdWorkflowRepositoryImpl) FetchArtifactsByCdPipelineId(pipelineId int, runnerType bean.WorkflowType, offset, limit int, searchString string) ([]CdWorkflowRunner, error) { var wfrList []CdWorkflowRunner err := impl.dbConnection. Model(&wfrList). Column("cd_workflow_runner.*", "CdWorkflow", "CdWorkflow.Pipeline", "CdWorkflow.CiArtifact"). Where("cd_workflow.pipeline_id = ?", pipelineId). Where("cd_workflow_runner.workflow_type = ?", runnerType). + Where("ci_artifact.image ILIKE %?%", searchString). Order("cd_workflow_runner.id DESC"). Limit(limit).Offset(offset). Select() diff --git a/internal/sql/repository/pipelineConfig/CiWorkflowRepository.go b/internal/sql/repository/pipelineConfig/CiWorkflowRepository.go index d713b5751c..c10e22737b 100644 --- a/internal/sql/repository/pipelineConfig/CiWorkflowRepository.go +++ b/internal/sql/repository/pipelineConfig/CiWorkflowRepository.go @@ -36,12 +36,14 @@ type CiWorkflowRepository interface { FindById(id int) (*CiWorkflow, error) FindRetriedWorkflowCountByReferenceId(id int) (int, error) FindCiWorkflowGitTriggersById(id int) (workflow *CiWorkflow, err error) + FindCiWorkflowGitTriggersByIds(ids []int) ([]*CiWorkflow, error) FindByName(name string) (*CiWorkflow, error) FindLastTriggeredWorkflowByCiIds(pipelineId []int) (ciWorkflow []*CiWorkflow, err error) FindLastTriggeredWorkflowByArtifactId(ciArtifactId int) (ciWorkflow *CiWorkflow, err error) FindAllLastTriggeredWorkflowByArtifactId(ciArtifactId []int) (ciWorkflow []*CiWorkflow, err error) FindLastTriggeredWorkflowGitTriggersByArtifactId(ciArtifactId int) (ciWorkflow *CiWorkflow, err error) + FindLastTriggeredWorkflowGitTriggersByArtifactIds(ciArtifactIds []int) ([]*CiWorkflow, error) ExistsByStatus(status string) (bool, error) FindBuildTypeAndStatusDataOfLast1Day() []*BuildTypeCount FIndCiWorkflowStatusesByAppId(appId int) ([]*CiWorkflowStatus, error) @@ -218,6 +220,18 @@ func (impl *CiWorkflowRepositoryImpl) FindCiWorkflowGitTriggersById(id int) (ciW return workflow, err } +func (impl *CiWorkflowRepositoryImpl) FindCiWorkflowGitTriggersByIds(ids []int) ([]*CiWorkflow, error) { + workflows := make([]*CiWorkflow, 0) + if len(ids) == 0 { + return workflows, nil + } + err := impl.dbConnection.Model(&workflows). + Column("ci_workflow.git_triggers"). + Where("ci_workflow.id IN (?)", pg.In(ids)). + Select() + + return workflows, err +} func (impl *CiWorkflowRepositoryImpl) SaveWorkFlowConfig(config *CiWorkflowConfig) error { err := impl.dbConnection.Insert(config) return err @@ -278,6 +292,19 @@ func (impl *CiWorkflowRepositoryImpl) FindLastTriggeredWorkflowGitTriggersByArti return workflow, err } +func (impl *CiWorkflowRepositoryImpl) FindLastTriggeredWorkflowGitTriggersByArtifactIds(ciArtifactIds []int) ([]*CiWorkflow, error) { + workflows := make([]*CiWorkflow, 0) + if len(ciArtifactIds) == 0 { + return workflows, nil + } + err := impl.dbConnection.Model(&workflows). + Column("ci_workflow.git_triggers"). + Join("inner join ci_artifact cia on cia.ci_workflow_id = ci_workflow.id"). + Where("cia.id IN (?)", pg.In(ciArtifactIds)). + Select() + return workflows, err +} + func (impl *CiWorkflowRepositoryImpl) ExistsByStatus(status string) (bool, error) { exists, err := impl.dbConnection.Model(&CiWorkflow{}). Where("status =?", status). diff --git a/pkg/pipeline/AppArtifactManager.go b/pkg/pipeline/AppArtifactManager.go index 14744b76fa..668f85bbf5 100644 --- a/pkg/pipeline/AppArtifactManager.go +++ b/pkg/pipeline/AppArtifactManager.go @@ -35,7 +35,7 @@ type AppArtifactManager interface { RetrieveArtifactsByCDPipeline(pipeline *pipelineConfig.Pipeline, stage bean.WorkflowType) (*bean2.CiArtifactResponse, error) //FetchArtifactForRollback : - FetchArtifactForRollback(cdPipelineId, appId, offset, limit int) (bean2.CiArtifactResponse, error) + FetchArtifactForRollback(cdPipelineId, appId, offset, limit int, searchString string) (bean2.CiArtifactResponse, error) BuildArtifactsForCdStage(pipelineId int, stageType bean.WorkflowType, ciArtifacts []bean2.CiArtifactBean, artifactMap map[int]int, parent bool, limit int, parentCdId int) ([]bean2.CiArtifactBean, map[int]int, int, string, error) @@ -184,11 +184,11 @@ func (impl *AppArtifactManagerImpl) BuildArtifactsForCIParent(cdPipelineId int, return ciArtifacts, nil } -func (impl *AppArtifactManagerImpl) FetchArtifactForRollback(cdPipelineId, appId, offset, limit int) (bean2.CiArtifactResponse, error) { +func (impl *AppArtifactManagerImpl) FetchArtifactForRollback(cdPipelineId, appId, offset, limit int, searchString string) (bean2.CiArtifactResponse, error) { var deployedCiArtifacts []bean2.CiArtifactBean var deployedCiArtifactsResponse bean2.CiArtifactResponse - cdWfrs, err := impl.cdWorkflowRepository.FetchArtifactsByCdPipelineId(cdPipelineId, bean.CD_WORKFLOW_TYPE_DEPLOY, offset, limit) + cdWfrs, err := impl.cdWorkflowRepository.FetchArtifactsByCdPipelineId(cdPipelineId, bean.CD_WORKFLOW_TYPE_DEPLOY, offset, limit, searchString) if err != nil { impl.logger.Errorw("error in getting artifacts for rollback by cdPipelineId", "err", err, "cdPipelineId", cdPipelineId) return deployedCiArtifactsResponse, err @@ -423,6 +423,7 @@ func (impl *AppArtifactManagerImpl) extractParentMetaDataByPipeline(pipeline *pi return parentId, parentType, parentCdId, err } +// continue here func (impl *AppArtifactManagerImpl) RetrieveArtifactsByCDPipelineV2(pipeline *pipelineConfig.Pipeline, stage bean.WorkflowType, artifactListingFilterOpts *bean.ArtifactsListFilterOptions) (*bean2.CiArtifactResponse, error) { // retrieve parent details From d043980d8a9a90dc892695ac49dea91bd44b140e Mon Sep 17 00:00:00 2001 From: Gireesh Naidu Date: Fri, 20 Oct 2023 01:41:12 +0530 Subject: [PATCH 042/143] calling V2 function in resthandler --- .../app/DeploymentPipelineRestHandler.go | 38 +++++++++++++++++-- .../app/PipelineConfigRestHandler.go | 12 ++++++ pkg/pipeline/AppArtifactManager.go | 3 +- 3 files changed, 49 insertions(+), 4 deletions(-) diff --git a/api/restHandler/app/DeploymentPipelineRestHandler.go b/api/restHandler/app/DeploymentPipelineRestHandler.go index eec504f365..8ce8a5f2da 100644 --- a/api/restHandler/app/DeploymentPipelineRestHandler.go +++ b/api/restHandler/app/DeploymentPipelineRestHandler.go @@ -1164,7 +1164,30 @@ func (handler PipelineConfigRestHandlerImpl) GetArtifactsByCDPipeline(w http.Res } stage := r.URL.Query().Get("stage") if len(stage) == 0 { - stage = "PRE" + stage = pipeline.WorklowTypePre + } + searchString := "" + search := r.URL.Query().Get("search") + if len(search) != 0 { + searchString = search + } + + offset := 0 + limit := 10 + offsetQueryParam := r.URL.Query().Get("offset") + if offsetQueryParam != "" { + offset, err = strconv.Atoi(offsetQueryParam) + handler.Logger.Errorw("request err, GetArtifactsForRollback", "err", err, "offsetQueryParam", offsetQueryParam) + common.WriteJsonResp(w, err, "invalid offset", http.StatusBadRequest) + return + } + + sizeQueryParam := r.URL.Query().Get("size") + if sizeQueryParam != "" { + limit, err = strconv.Atoi(sizeQueryParam) + handler.Logger.Errorw("request err, GetArtifactsForRollback", "err", err, "sizeQueryParam", sizeQueryParam) + common.WriteJsonResp(w, err, "invalid size", http.StatusBadRequest) + return } handler.Logger.Infow("request payload, GetArtifactsByCDPipeline", "cdPipelineId", cdPipelineId, "stage", stage) @@ -1195,8 +1218,17 @@ func (handler PipelineConfigRestHandlerImpl) GetArtifactsByCDPipeline(w http.Res return } //rbac block ends here - - ciArtifactResponse, err := handler.pipelineBuilder.RetrieveArtifactsByCDPipeline(pipeline, bean2.WorkflowType(stage)) + var ciArtifactResponse *bean.CiArtifactResponse + if handler.pipelineRestHandlerEnvConfig.UseArtifactListApiV2 { + artifactsListFilterOptions := &bean2.ArtifactsListFilterOptions{ + Limit: limit, + Offset: offset, + SearchString: searchString, + } + ciArtifactResponse, err = handler.pipelineBuilder.RetrieveArtifactsByCDPipelineV2(pipeline, bean2.WorkflowType(stage), artifactsListFilterOptions) + } else { + ciArtifactResponse, err = handler.pipelineBuilder.RetrieveArtifactsByCDPipeline(pipeline, bean2.WorkflowType(stage)) + } if err != nil { handler.Logger.Errorw("service err, GetArtifactsByCDPipeline", "err", err, "cdPipelineId", cdPipelineId, "stage", stage) common.WriteJsonResp(w, err, nil, http.StatusInternalServerError) diff --git a/api/restHandler/app/PipelineConfigRestHandler.go b/api/restHandler/app/PipelineConfigRestHandler.go index 2060f5b973..693bafedac 100644 --- a/api/restHandler/app/PipelineConfigRestHandler.go +++ b/api/restHandler/app/PipelineConfigRestHandler.go @@ -22,6 +22,7 @@ import ( "context" "encoding/json" "fmt" + "github.com/caarlos0/env" "github.com/devtron-labs/devtron/api/restHandler/common" "github.com/devtron-labs/devtron/client/gitSensor" "github.com/devtron-labs/devtron/internal/sql/repository/helper" @@ -62,6 +63,10 @@ import ( "gopkg.in/go-playground/validator.v9" ) +type PipelineRestHandlerEnvConfig struct { + UseArtifactListApiV2 bool `env:"USE_ARTIFACT_LISTING_API_V2"` +} + type DevtronAppRestHandler interface { CreateApp(w http.ResponseWriter, r *http.Request) DeleteApp(w http.ResponseWriter, r *http.Request) @@ -124,6 +129,7 @@ type PipelineConfigRestHandlerImpl struct { argoUserService argo.ArgoUserService imageTaggingService pipeline.ImageTaggingService deploymentTemplateService generateManifest.DeploymentTemplateService + pipelineRestHandlerEnvConfig *PipelineRestHandlerEnvConfig } func NewPipelineRestHandlerImpl(pipelineBuilder pipeline.PipelineBuilder, Logger *zap.SugaredLogger, @@ -148,6 +154,11 @@ func NewPipelineRestHandlerImpl(pipelineBuilder pipeline.PipelineBuilder, Logger scanResultRepository security.ImageScanResultRepository, gitProviderRepo repository.GitProviderRepository, argoUserService argo.ArgoUserService, ciPipelineMaterialRepository pipelineConfig.CiPipelineMaterialRepository, imageTaggingService pipeline.ImageTaggingService) *PipelineConfigRestHandlerImpl { + envConfig := &PipelineRestHandlerEnvConfig{} + err := env.Parse(envConfig) + if err != nil { + Logger.Errorw("error in parsing PipelineRestHandlerEnvConfig", "err", err) + } return &PipelineConfigRestHandlerImpl{ pipelineBuilder: pipelineBuilder, Logger: Logger, @@ -178,6 +189,7 @@ func NewPipelineRestHandlerImpl(pipelineBuilder pipeline.PipelineBuilder, Logger ciPipelineMaterialRepository: ciPipelineMaterialRepository, imageTaggingService: imageTaggingService, deploymentTemplateService: deploymentTemplateService, + pipelineRestHandlerEnvConfig: envConfig, } } diff --git a/pkg/pipeline/AppArtifactManager.go b/pkg/pipeline/AppArtifactManager.go index 668f85bbf5..dac88aacfc 100644 --- a/pkg/pipeline/AppArtifactManager.go +++ b/pkg/pipeline/AppArtifactManager.go @@ -34,6 +34,8 @@ type AppArtifactManager interface { //RetrieveArtifactsByCDPipeline : RetrieveArtifactsByCDPipeline returns all the artifacts for the cd pipeline (pre / deploy / post) RetrieveArtifactsByCDPipeline(pipeline *pipelineConfig.Pipeline, stage bean.WorkflowType) (*bean2.CiArtifactResponse, error) + RetrieveArtifactsByCDPipelineV2(pipeline *pipelineConfig.Pipeline, stage bean.WorkflowType, artifactListingFilterOpts *bean.ArtifactsListFilterOptions) (*bean2.CiArtifactResponse, error) + //FetchArtifactForRollback : FetchArtifactForRollback(cdPipelineId, appId, offset, limit int, searchString string) (bean2.CiArtifactResponse, error) @@ -423,7 +425,6 @@ func (impl *AppArtifactManagerImpl) extractParentMetaDataByPipeline(pipeline *pi return parentId, parentType, parentCdId, err } -// continue here func (impl *AppArtifactManagerImpl) RetrieveArtifactsByCDPipelineV2(pipeline *pipelineConfig.Pipeline, stage bean.WorkflowType, artifactListingFilterOpts *bean.ArtifactsListFilterOptions) (*bean2.CiArtifactResponse, error) { // retrieve parent details From 2bf1cd3f5cde306dbb4148947847d7adf5c6f1ab Mon Sep 17 00:00:00 2001 From: Gireesh Naidu Date: Fri, 20 Oct 2023 10:30:47 +0530 Subject: [PATCH 043/143] bug fix for no deployment triggered on the pipeline and query fix for emptyexclude artifacts ids --- .../sql/repository/CiArtifactRepository.go | 29 +++++---- pkg/pipeline/AppArtifactManager.go | 61 +++++++++++-------- 2 files changed, 52 insertions(+), 38 deletions(-) diff --git a/internal/sql/repository/CiArtifactRepository.go b/internal/sql/repository/CiArtifactRepository.go index badf7d8701..6f9c798785 100644 --- a/internal/sql/repository/CiArtifactRepository.go +++ b/internal/sql/repository/CiArtifactRepository.go @@ -242,18 +242,22 @@ func (impl CiArtifactRepositoryImpl) GetArtifactsByCDPipeline(cdPipelineId, limi func (impl CiArtifactRepositoryImpl) GetArtifactsByCDPipelineV3(listingFilterOpts *bean.ArtifactsListFilterOptions) ([]*CiArtifact, error) { artifacts := make([]*CiArtifact, 0, listingFilterOpts.Limit) + commonPaginationQueryPart := " cia.image ILIKE %?%" + + " ORDER BY cia.id DESC" + + " LIMIT ?" + + " OFFSET ?;" if listingFilterOpts.ParentStageType == bean.CI_WORKFLOW_TYPE { query := " SELECT cia.* " + " FROM ci_artifact cia" + " INNER JOIN ci_pipeline cp ON cp.id=cia.pipeline_id" + " INNER JOIN pipeline p ON p.ci_pipeline_id = cp.id and p.id=?" + - " WHERE cia.id NOT IN (?) " + - " AND cia.image ILIKE %?%" + - " ORDER BY cia.id DESC" + - " LIMIT ?" + - " OFFSET ?;" + " WHERE " + if len(listingFilterOpts.ExcludeArtifactIds) > 0 { + query += fmt.Sprintf(" cia.id NOT IN (%s) AND ", helper.GetCommaSepratedString(listingFilterOpts.ExcludeArtifactIds)) + } + query += commonPaginationQueryPart - _, err := impl.dbConnection.Query(&artifacts, query, listingFilterOpts.PipelineId, pg.In(listingFilterOpts.ExcludeArtifactIds), listingFilterOpts.SearchString, listingFilterOpts.Limit, listingFilterOpts.Offset) + _, err := impl.dbConnection.Query(&artifacts, query, listingFilterOpts.PipelineId, listingFilterOpts.SearchString, listingFilterOpts.Limit, listingFilterOpts.Offset) if err != nil { return artifacts, err } @@ -261,13 +265,12 @@ func (impl CiArtifactRepositoryImpl) GetArtifactsByCDPipelineV3(listingFilterOpt } else if listingFilterOpts.ParentStageType == bean.WEBHOOK_WORKFLOW_TYPE { query := " SELECT cia.* " + " FROM ci_artifact cia " + - " WHERE cia.external_ci_pipeline_id = ? " + - " AND cia.id NOT IN (?) " + - " AND cia.image ILIKE %?% " + - " ORDER BY cia.id DESC " + - " LIMIT ? " + - " OFFSET ?;" - _, err := impl.dbConnection.Query(&artifacts, query, listingFilterOpts.ParentId, pg.In(listingFilterOpts.ExcludeArtifactIds), listingFilterOpts.SearchString, listingFilterOpts.Limit, listingFilterOpts.Offset) + " WHERE cia.external_ci_pipeline_id = ? AND " + if len(listingFilterOpts.ExcludeArtifactIds) > 0 { + query += fmt.Sprintf(" cia.id NOT IN (%s) AND ", helper.GetCommaSepratedString(listingFilterOpts.ExcludeArtifactIds)) + } + query += commonPaginationQueryPart + _, err := impl.dbConnection.Query(&artifacts, query, listingFilterOpts.ParentId, listingFilterOpts.SearchString, listingFilterOpts.Limit, listingFilterOpts.Offset) if err != nil { return artifacts, err } diff --git a/pkg/pipeline/AppArtifactManager.go b/pkg/pipeline/AppArtifactManager.go index dac88aacfc..386010a1c3 100644 --- a/pkg/pipeline/AppArtifactManager.go +++ b/pkg/pipeline/AppArtifactManager.go @@ -523,18 +523,43 @@ func (impl *AppArtifactManagerImpl) RetrieveArtifactsByCDPipelineV2(pipeline *pi func (impl *AppArtifactManagerImpl) BuildArtifactsList(listingFilterOpts *bean.ArtifactsListFilterOptions) ([]*bean2.CiArtifactBean, int, string, error) { var ciArtifacts []*bean2.CiArtifactBean + //1)get current deployed artifact on this pipeline latestWf, err := impl.cdWorkflowRepository.FindArtifactByPipelineIdAndRunnerType(listingFilterOpts.PipelineId, listingFilterOpts.StageType, 1) - if err != nil { + if err != nil && err != pg.ErrNoRows { impl.logger.Errorw("error in getting latest workflow by pipelineId", "pipelineId", listingFilterOpts.PipelineId, "currentStageType", listingFilterOpts.StageType) return ciArtifacts, 0, "", err } - if len(latestWf) == 0 { - return ciArtifacts, 0, "", err - } - currentRunningArtifact := latestWf[0].CdWorkflow.CiArtifact - listingFilterOpts.ExcludeArtifactIds = []int{currentRunningArtifact.Id} + var currentRunningArtifactBean *bean2.CiArtifactBean + currentRunningArtifactId := 0 + currentRunningWorkflowStatus := "" + + //no artifacts deployed on this pipeline yet if latestWf is empty + if len(latestWf) > 0 { + + currentRunningArtifact := latestWf[0].CdWorkflow.CiArtifact + listingFilterOpts.ExcludeArtifactIds = []int{currentRunningArtifact.Id} + currentRunningArtifactId = currentRunningArtifact.Id + currentRunningWorkflowStatus = latestWf[0].Status + //current deployed artifact should always be computed, as we have to show it every time + mInfo, err := parseMaterialInfo([]byte(currentRunningArtifact.MaterialInfo), currentRunningArtifact.DataSource) + if err != nil { + mInfo = []byte("[]") + impl.logger.Errorw("Error in parsing artifact material info", "err", err, "artifact", currentRunningArtifact) + } + currentRunningArtifactBean = &bean2.CiArtifactBean{ + Id: currentRunningArtifact.Id, + Image: currentRunningArtifact.Image, + ImageDigest: currentRunningArtifact.ImageDigest, + MaterialInfo: mInfo, + ScanEnabled: currentRunningArtifact.ScanEnabled, + Scanned: currentRunningArtifact.Scanned, + Deployed: true, + DeployedTime: formatDate(latestWf[0].CdWorkflow.CreatedOn, bean2.LayoutRFC3339), + Latest: true, + } + } //2) get artifact list limited by filterOptions if listingFilterOpts.ParentStageType == bean.CI_WORKFLOW_TYPE || listingFilterOpts.ParentStageType == bean.WEBHOOK_WORKFLOW_TYPE { ciArtifacts, err = impl.BuildArtifactsForCIParentV2(listingFilterOpts) @@ -550,25 +575,11 @@ func (impl *AppArtifactManagerImpl) BuildArtifactsList(listingFilterOpts *bean.A } } - //current deployed artifact is not included in the above computed ciArtifacts, we have to add this too - mInfo, err := parseMaterialInfo([]byte(currentRunningArtifact.MaterialInfo), currentRunningArtifact.DataSource) - if err != nil { - mInfo = []byte("[]") - impl.logger.Errorw("Error in parsing artifact material info", "err", err, "artifact", currentRunningArtifact) - } - currentRunningArtifactBean := &bean2.CiArtifactBean{ - Id: currentRunningArtifact.Id, - Image: currentRunningArtifact.Image, - ImageDigest: currentRunningArtifact.ImageDigest, - MaterialInfo: mInfo, - ScanEnabled: currentRunningArtifact.ScanEnabled, - Scanned: currentRunningArtifact.Scanned, - Deployed: true, - DeployedTime: formatDate(latestWf[0].CdWorkflow.CreatedOn, bean2.LayoutRFC3339), - Latest: true, - } - ciArtifacts = append(ciArtifacts, currentRunningArtifactBean) - return ciArtifacts, currentRunningArtifact.Id, latestWf[0].Status, nil + //if no artifact deployed skip adding currentRunningArtifactBean in ciArtifacts arr + if currentRunningArtifactBean != nil { + ciArtifacts = append(ciArtifacts, currentRunningArtifactBean) + } + return ciArtifacts, currentRunningArtifactId, currentRunningWorkflowStatus, nil } func (impl *AppArtifactManagerImpl) BuildArtifactsForCdStageV2(listingFilterOpts *bean.ArtifactsListFilterOptions) ([]*bean2.CiArtifactBean, error) { From d78a09738eb6c73912e8bf1d181beec85271673e Mon Sep 17 00:00:00 2001 From: Gireesh Naidu Date: Fri, 20 Oct 2023 11:24:09 +0530 Subject: [PATCH 044/143] query fix for emptyexclude artifacts ids --- .../pipelineConfig/CdWorfkflowRepository.go | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/internal/sql/repository/pipelineConfig/CdWorfkflowRepository.go b/internal/sql/repository/pipelineConfig/CdWorfkflowRepository.go index e46681e47e..570c3a7634 100644 --- a/internal/sql/repository/pipelineConfig/CdWorfkflowRepository.go +++ b/internal/sql/repository/pipelineConfig/CdWorfkflowRepository.go @@ -380,7 +380,7 @@ func (impl *CdWorkflowRepositoryImpl) FindCdWorkflowMetaByPipelineId(pipelineId func (impl *CdWorkflowRepositoryImpl) FindArtifactByListFilter(listingFilterOptions *bean.ArtifactsListFilterOptions) ([]CdWorkflowRunner, error) { var wfrList []CdWorkflowRunner var wfIds []int - err := impl.dbConnection.Model(&wfIds). + query := impl.dbConnection.Model(&wfIds). Column("MAX(cd_workflow_runner.id) AS id"). Join("INNER JOIN cd_workflow ON cd_workflow.id=cd_workflow_runner.cd_workflow_id"). Join("INNER JOIN ci_artifact cia ON cia.id = cd_workflow.ci_artifact_id"). @@ -390,12 +390,16 @@ func (impl *CdWorkflowRepositoryImpl) FindArtifactByListFilter(listingFilterOpti listingFilterOptions.ParentId, listingFilterOptions.ParentStageType, pg.In([]string{application.Healthy, application.SUCCEEDED})). - Where("cia.image ILIKE %?%", listingFilterOptions.SearchString). - Where("cd_workflow.ci_artifact_id NOT IN (?)", pg.In(listingFilterOptions.ExcludeArtifactIds)). + Where("cia.image ILIKE %?%", listingFilterOptions.SearchString) + if len(listingFilterOptions.ExcludeArtifactIds) > 0 { + query = query.Where("cd_workflow.ci_artifact_id NOT IN (?)", pg.In(listingFilterOptions.ExcludeArtifactIds)) + } + query = query. Group("cd_workflow.ci_artifact_id"). Limit(listingFilterOptions.Limit). - Offset(listingFilterOptions.Offset). - Select() + Offset(listingFilterOptions.Offset) + + err := query.Select() if err == pg.ErrNoRows || len(wfIds) == 0 { return wfrList, nil From 89351cf3b71caf4f25c1c29d09bc6d4ce1b2d945 Mon Sep 17 00:00:00 2001 From: Kripansh Date: Fri, 20 Oct 2023 13:08:48 +0530 Subject: [PATCH 045/143] code review comments --- go.mod | 1 + go.sum | 2 + .../sql/repository/CiArtifactRepository.go | 5 + .../pipelineConfig/CdWorfkflowRepository.go | 4 +- .../DeployementTemplateService_test.go | 1063 +++--- pkg/pipeline/AppArtifactManager.go | 15 +- vendor/github.com/samber/lo/.gitignore | 38 + vendor/github.com/samber/lo/.travis.yml | 7 + vendor/github.com/samber/lo/CHANGELOG.md | 429 +++ vendor/github.com/samber/lo/Dockerfile | 8 + vendor/github.com/samber/lo/LICENSE | 21 + vendor/github.com/samber/lo/Makefile | 44 + vendor/github.com/samber/lo/README.md | 2933 +++++++++++++++++ vendor/github.com/samber/lo/channel.go | 309 ++ vendor/github.com/samber/lo/concurrency.go | 95 + vendor/github.com/samber/lo/condition.go | 150 + vendor/github.com/samber/lo/constraints.go | 6 + vendor/github.com/samber/lo/errors.go | 354 ++ vendor/github.com/samber/lo/find.go | 372 +++ vendor/github.com/samber/lo/func.go | 41 + vendor/github.com/samber/lo/intersect.go | 185 ++ vendor/github.com/samber/lo/map.go | 224 ++ vendor/github.com/samber/lo/math.go | 84 + vendor/github.com/samber/lo/retry.go | 290 ++ vendor/github.com/samber/lo/slice.go | 594 ++++ vendor/github.com/samber/lo/string.go | 96 + vendor/github.com/samber/lo/tuples.go | 513 +++ .../github.com/samber/lo/type_manipulation.go | 102 + vendor/github.com/samber/lo/types.go | 123 + vendor/modules.txt | 3 + 30 files changed, 7574 insertions(+), 537 deletions(-) create mode 100644 vendor/github.com/samber/lo/.gitignore create mode 100644 vendor/github.com/samber/lo/.travis.yml create mode 100644 vendor/github.com/samber/lo/CHANGELOG.md create mode 100644 vendor/github.com/samber/lo/Dockerfile create mode 100644 vendor/github.com/samber/lo/LICENSE create mode 100644 vendor/github.com/samber/lo/Makefile create mode 100644 vendor/github.com/samber/lo/README.md create mode 100644 vendor/github.com/samber/lo/channel.go create mode 100644 vendor/github.com/samber/lo/concurrency.go create mode 100644 vendor/github.com/samber/lo/condition.go create mode 100644 vendor/github.com/samber/lo/constraints.go create mode 100644 vendor/github.com/samber/lo/errors.go create mode 100644 vendor/github.com/samber/lo/find.go create mode 100644 vendor/github.com/samber/lo/func.go create mode 100644 vendor/github.com/samber/lo/intersect.go create mode 100644 vendor/github.com/samber/lo/map.go create mode 100644 vendor/github.com/samber/lo/math.go create mode 100644 vendor/github.com/samber/lo/retry.go create mode 100644 vendor/github.com/samber/lo/slice.go create mode 100644 vendor/github.com/samber/lo/string.go create mode 100644 vendor/github.com/samber/lo/tuples.go create mode 100644 vendor/github.com/samber/lo/type_manipulation.go create mode 100644 vendor/github.com/samber/lo/types.go diff --git a/go.mod b/go.mod index e7e5df8f94..8838ec5a3b 100644 --- a/go.mod +++ b/go.mod @@ -216,6 +216,7 @@ require ( github.com/prometheus/procfs v0.8.0 // indirect github.com/robfig/cron v1.2.0 // indirect github.com/russross/blackfriday v1.5.2 // indirect + github.com/samber/lo v1.38.1 // indirect github.com/sergi/go-diff v1.1.0 // indirect github.com/shopspring/decimal v1.2.0 // indirect github.com/sirupsen/logrus v1.9.0 // indirect diff --git a/go.sum b/go.sum index 7cd7f50f86..dc937f244d 100644 --- a/go.sum +++ b/go.sum @@ -1038,6 +1038,8 @@ github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= +github.com/samber/lo v1.38.1 h1:j2XEAqXKb09Am4ebOg31SpvzUTTs6EN3VfgeLUhPdXM= +github.com/samber/lo v1.38.1/go.mod h1:+m/ZKRl6ClXCE2Lgf3MsQlWfh4bn1bz6CXEOxnEXnEA= github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E= github.com/sanity-io/litter v1.2.0/go.mod h1:JF6pZUFgu2Q0sBZ+HSV35P8TVPI1TTzEwyu9FXAw2W4= github.com/satori/go.uuid v1.2.0 h1:0uYX9dsZ2yD7q2RtLRtPSdGDWzjeM3TbMJP9utgA0ww= diff --git a/internal/sql/repository/CiArtifactRepository.go b/internal/sql/repository/CiArtifactRepository.go index 6f9c798785..e370688fd5 100644 --- a/internal/sql/repository/CiArtifactRepository.go +++ b/internal/sql/repository/CiArtifactRepository.go @@ -241,12 +241,14 @@ func (impl CiArtifactRepositoryImpl) GetArtifactsByCDPipeline(cdPipelineId, limi } func (impl CiArtifactRepositoryImpl) GetArtifactsByCDPipelineV3(listingFilterOpts *bean.ArtifactsListFilterOptions) ([]*CiArtifact, error) { + //TODO Gireesh: listingFilterOpts.SearchString should be conditional, artifacts := make([]*CiArtifact, 0, listingFilterOpts.Limit) commonPaginationQueryPart := " cia.image ILIKE %?%" + " ORDER BY cia.id DESC" + " LIMIT ?" + " OFFSET ?;" if listingFilterOpts.ParentStageType == bean.CI_WORKFLOW_TYPE { + //TODO Gireesh: listingFilterOpts.PipelineId is ciPipelineId in this case why are we taking join query := " SELECT cia.* " + " FROM ci_artifact cia" + " INNER JOIN ci_pipeline cp ON cp.id=cia.pipeline_id" + @@ -291,6 +293,7 @@ func (impl CiArtifactRepositoryImpl) GetArtifactsByCDPipelineV3(listingFilterOpt //(this will fetch all the artifacts that were deployed on the given pipeline atleast once in new->old deployed order) artifactsDeployed := make([]*CiArtifact, 0, len(artifactsIds)) + //TODO Gireesh: compare this query plan with cd_workflow & cd_workflow_runner join query Plan, since pco is heavy query := " SELECT cia.id,pco.created_on AS created_on " + " FROM ci_artifact cia" + " INNER JOIN pipeline_config_override pco ON pco.ci_artifact_id=cia.id" + @@ -312,6 +315,8 @@ func (impl CiArtifactRepositoryImpl) GetArtifactsByCDPipelineV3(listingFilterOpt } } + //TODO Gireesh: create separate meaningful functions of these queries + return artifacts, nil } diff --git a/internal/sql/repository/pipelineConfig/CdWorfkflowRepository.go b/internal/sql/repository/pipelineConfig/CdWorfkflowRepository.go index 570c3a7634..44a982b844 100644 --- a/internal/sql/repository/pipelineConfig/CdWorfkflowRepository.go +++ b/internal/sql/repository/pipelineConfig/CdWorfkflowRepository.go @@ -380,11 +380,13 @@ func (impl *CdWorkflowRepositoryImpl) FindCdWorkflowMetaByPipelineId(pipelineId func (impl *CdWorkflowRepositoryImpl) FindArtifactByListFilter(listingFilterOptions *bean.ArtifactsListFilterOptions) ([]CdWorkflowRunner, error) { var wfrList []CdWorkflowRunner var wfIds []int + //TODO Gireesh: why are we extracting artifacts which belongs to current pipeline as it will impact page size of response ?? query := impl.dbConnection.Model(&wfIds). Column("MAX(cd_workflow_runner.id) AS id"). Join("INNER JOIN cd_workflow ON cd_workflow.id=cd_workflow_runner.cd_workflow_id"). Join("INNER JOIN ci_artifact cia ON cia.id = cd_workflow.ci_artifact_id"). - Where("(cd_workflow.pipeline_id = ? AND cd_workflow_runner.workflow_type = ?) OR (cd_workflow.pipeline_id = ? AND cd_workflow_runner.workflow_type = ? AND cd_workflow_runner.status IN (?))", + Where("(cd_workflow.pipeline_id = ? AND cd_workflow_runner.workflow_type = ?) "+ + "OR (cd_workflow.pipeline_id = ? AND cd_workflow_runner.workflow_type = ? AND cd_workflow_runner.status IN (?))", listingFilterOptions.PipelineId, listingFilterOptions.StageType, listingFilterOptions.ParentId, diff --git a/pkg/generateManifest/DeployementTemplateService_test.go b/pkg/generateManifest/DeployementTemplateService_test.go index ad97dcc5b6..910c85f986 100644 --- a/pkg/generateManifest/DeployementTemplateService_test.go +++ b/pkg/generateManifest/DeployementTemplateService_test.go @@ -1,533 +1,534 @@ package generateManifest -import ( - "context" - "errors" - client2 "github.com/devtron-labs/authenticator/client" - "github.com/devtron-labs/devtron/api/bean" - client "github.com/devtron-labs/devtron/api/helm-app" - mocks4 "github.com/devtron-labs/devtron/api/helm-app/mocks" - "github.com/devtron-labs/devtron/internal/sql/repository" - mocks3 "github.com/devtron-labs/devtron/internal/sql/repository/mocks" - "github.com/devtron-labs/devtron/internal/util" - mocks6 "github.com/devtron-labs/devtron/internal/util/mocks" - mocks2 "github.com/devtron-labs/devtron/pkg/app/mocks" - "github.com/devtron-labs/devtron/pkg/chart" - "github.com/devtron-labs/devtron/pkg/chart/mocks" - chartRepoRepository "github.com/devtron-labs/devtron/pkg/chartRepo/repository" - mocks5 "github.com/devtron-labs/devtron/pkg/chartRepo/repository/mocks" - "github.com/devtron-labs/devtron/util/k8s" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/mock" - "testing" -) - -var K8sUtilObj *k8s.K8sUtil - -func TestDeploymentTemplateServiceImpl_FetchDeploymentsWithChartRefs(t *testing.T) { - defaultVersions := &chart.ChartRefResponse{ - ChartRefs: []chart.ChartRef{ - { - Id: 1, - Version: "v1.0.1", - Name: "Deployment", - Description: "This is a deployment chart", - UserUploaded: false, - IsAppMetricsSupported: false, - }, - { - Id: 2, - Version: "v1.0.2", - Name: "Deployment", - Description: "This is a deployment chart", - UserUploaded: false, - IsAppMetricsSupported: false, - }, - { - Id: 3, - Version: "v1.0.3", - Name: "Deployment", - Description: "This is a deployment chart", - UserUploaded: false, - IsAppMetricsSupported: false, - }, - }, - LatestAppChartRef: 2, - LatestEnvChartRef: 2, - } - publishedOnEnvs := []*bean.Environment{ - { - ChartRefId: 2, - EnvironmentId: 1, - EnvironmentName: "devtron-demo", - }, - } - - deployedOnEnv := []*repository.DeploymentTemplateComparisonMetadata{ - { - ChartId: 1, - ChartVersion: "4.18.1", - EnvironmentId: 1, - PipelineConfigOverrideId: 5, - //StartedOn: 2023-08-26T16:36:55.732551Z, - //FinishedOn: 2023-08-26T16:40:00.174576Z, - Status: "Succeeded", - }, - { - ChartId: 1, - ChartVersion: "4.18.1", - EnvironmentId: 1, - PipelineConfigOverrideId: 5, - //StartedOn: 2023-08-26T16:36:55.732551Z, - //FinishedOn: 2023-08-26T16:40:00.174576Z, - Status: "Succeeded", - }, - { - ChartId: 1, - ChartVersion: "4.18.1", - EnvironmentId: 1, - PipelineConfigOverrideId: 5, - //StartedOn: 2023-08-26T16:36:55.732551Z, - //FinishedOn: 2023-08-26T16:40:00.174576Z, - Status: "Succeeded", - }, - } - - deployedOnOtherEnvs := []*repository.DeploymentTemplateComparisonMetadata{ - { - ChartId: 1, - ChartVersion: "4.18.1", - EnvironmentId: 2, - PipelineConfigOverrideId: 9, - }, - } - - type args struct { - appId int - envId int - } - tests := []struct { - name string - args args - want []*repository.DeploymentTemplateComparisonMetadata - wantErr error - }{ - - { - name: "test for successfully fetching the list", - args: args{ - appId: 1, - envId: 1, - }, - want: []*repository.DeploymentTemplateComparisonMetadata{ - { - ChartId: 1, - ChartVersion: "v1.0.1", - ChartType: "Deployment", - EnvironmentId: 0, - EnvironmentName: "", - PipelineConfigOverrideId: 0, - StartedOn: nil, - FinishedOn: nil, - Status: "", - Type: 1, - }, - { - ChartId: 2, - ChartVersion: "v1.0.2", - ChartType: "Deployment", - EnvironmentId: 0, - EnvironmentName: "", - PipelineConfigOverrideId: 0, - StartedOn: nil, - FinishedOn: nil, - Status: "", - Type: 1, - }, { - ChartId: 3, - ChartVersion: "v1.0.3", - ChartType: "Deployment", - EnvironmentId: 0, - EnvironmentName: "", - PipelineConfigOverrideId: 0, - StartedOn: nil, - FinishedOn: nil, - Status: "", - Type: 1, - }, { - ChartId: 2, - ChartVersion: "", - ChartType: "", - EnvironmentId: 1, - EnvironmentName: "devtron-demo", - PipelineConfigOverrideId: 0, - StartedOn: nil, - FinishedOn: nil, - Status: "", - Type: 2, - }, { - ChartId: 1, - ChartVersion: "4.18.1", - ChartType: "", - EnvironmentId: 1, - EnvironmentName: "", - PipelineConfigOverrideId: 5, - StartedOn: nil, - FinishedOn: nil, - Status: "Succeeded", - Type: 3, - }, { - ChartId: 1, - ChartVersion: "4.18.1", - ChartType: "", - EnvironmentId: 1, - EnvironmentName: "", - PipelineConfigOverrideId: 5, - StartedOn: nil, - FinishedOn: nil, - Status: "Succeeded", - Type: 3, - }, { - ChartId: 1, - ChartVersion: "4.18.1", - ChartType: "", - EnvironmentId: 1, - EnvironmentName: "", - PipelineConfigOverrideId: 5, - StartedOn: nil, - FinishedOn: nil, - Status: "Succeeded", - Type: 3, - }, { - ChartId: 1, - ChartVersion: "v1.0.1", - ChartType: "Deployment", - EnvironmentId: 2, - EnvironmentName: "", - PipelineConfigOverrideId: 9, - StartedOn: nil, - FinishedOn: nil, - Status: "", - Type: 4, - }, - }, - }, - { - name: "test for error in chart", - args: args{ - appId: 1, - envId: 1, - }, - wantErr: errors.New("error in getting defaultVersions"), - }, - { - name: "test for error in publishedOnEnvs", - args: args{ - appId: 1, - envId: 1, - }, - wantErr: errors.New("error in getting publishedOnEnvs"), - }, - { - name: "test for error in deployedOnEnv", - args: args{ - appId: 1, - envId: 1, - }, - wantErr: errors.New("error in getting deployedOnEnv"), - }, - { - name: "test for error in deployedOnOtherEnvs", - args: args{ - appId: 1, - envId: 1, - }, - wantErr: errors.New("error in getting deployedOnOtherEnvs"), - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - impl, chartService, appListingService, deploymentTemplateRepository, _, _, _, _ := InitEventSimpleFactoryImpl(t) - - if tt.name == "test for successfully fetching the list" { - chartService.On("ChartRefAutocompleteForAppOrEnv", tt.args.appId, 0).Return(defaultVersions, nil) - appListingService.On("FetchMinDetailOtherEnvironment", tt.args.appId).Return(publishedOnEnvs, nil) - deploymentTemplateRepository.On("FetchDeploymentHistoryWithChartRefs", tt.args.appId, tt.args.envId).Return(deployedOnEnv, nil) - deploymentTemplateRepository.On("FetchLatestDeploymentWithChartRefs", tt.args.appId, tt.args.envId).Return(deployedOnOtherEnvs, nil) - } - - if tt.name == "test for error in chart" { - chartService.On("ChartRefAutocompleteForAppOrEnv", tt.args.appId, 0).Return(nil, errors.New("error in getting defaultVersions")) - } - - if tt.name == "test for error in publishedOnEnvs" { - chartService.On("ChartRefAutocompleteForAppOrEnv", tt.args.appId, 0).Return(defaultVersions, nil) - appListingService.On("FetchMinDetailOtherEnvironment", tt.args.appId).Return(nil, errors.New("error in getting publishedOnEnvs")) - } - - if tt.name == "test for error in deployedOnEnv" { - chartService.On("ChartRefAutocompleteForAppOrEnv", tt.args.appId, 0).Return(defaultVersions, nil) - appListingService.On("FetchMinDetailOtherEnvironment", tt.args.appId).Return(publishedOnEnvs, nil) - deploymentTemplateRepository.On("FetchDeploymentHistoryWithChartRefs", tt.args.appId, tt.args.envId).Return(nil, errors.New("error in getting deployedOnEnv")) - } - - if tt.name == "test for error in deployedOnOtherEnvs" { - chartService.On("ChartRefAutocompleteForAppOrEnv", tt.args.appId, 0).Return(defaultVersions, nil) - appListingService.On("FetchMinDetailOtherEnvironment", tt.args.appId).Return(publishedOnEnvs, nil) - deploymentTemplateRepository.On("FetchDeploymentHistoryWithChartRefs", tt.args.appId, tt.args.envId).Return(deployedOnEnv, nil) - deploymentTemplateRepository.On("FetchLatestDeploymentWithChartRefs", tt.args.appId, tt.args.envId).Return(deployedOnOtherEnvs, errors.New("error in getting deployedOnOtherEnvs")) - } - - got, err := impl.FetchDeploymentsWithChartRefs(tt.args.appId, tt.args.envId) - - assert.Equal(t, err, tt.wantErr) - - assert.Equal(t, len(got), len(tt.want)) - }) - } -} - -func TestDeploymentTemplateServiceImpl_GetDeploymentTemplate(t *testing.T) { - - var myMap = make(map[string]interface{}) - myString := "{\\\"ContainerPort\\\":[{\\\"envoyPort\\\":6969,\\\"idleTimeout\\\":\\\"6969s\\\",\\\"name\\\":\\\"app\\\",\\\"port\\\":6969,\\\"servicePort\\\":69,\\\"supportStreaming\\\":false,\\\"useHTTP2\\\":false}],\\\"EnvVariables\\\":[],\\\"GracePeriod\\\":30,\\\"LivenessProbe\\\":{\\\"Path\\\":\\\"\\\",\\\"command\\\":[],\\\"failureThreshold\\\":3,\\\"httpHeaders\\\":[],\\\"initialDelaySeconds\\\":20,\\\"periodSeconds\\\":10,\\\"port\\\":6969,\\\"scheme\\\":\\\"\\\",\\\"successThreshold\\\":1,\\\"tcp\\\":false,\\\"timeoutSeconds\\\":5},\\\"MaxSurge\\\":1,\\\"MaxUnavailable\\\":0,\\\"MinReadySeconds\\\":60,\\\"ReadinessProbe\\\":{\\\"Path\\\":\\\"\\\",\\\"command\\\":[],\\\"failureThreshold\\\":3,\\\"httpHeaders\\\":[],\\\"initialDelaySeconds\\\":20,\\\"periodSeconds\\\":10,\\\"port\\\":6969,\\\"scheme\\\":\\\"\\\",\\\"successThreshold\\\":1,\\\"tcp\\\":false,\\\"timeoutSeconds\\\":5},\\\"Spec\\\":{\\\"Affinity\\\":{\\\"Key\\\":null,\\\"Values\\\":\\\"nodes\\\",\\\"key\\\":\\\"\\\"}},\\\"StartupProbe\\\":{\\\"Path\\\":\\\"\\\",\\\"command\\\":[],\\\"failureThreshold\\\":3,\\\"httpHeaders\\\":[],\\\"initialDelaySeconds\\\":20,\\\"periodSeconds\\\":10,\\\"port\\\":6969,\\\"successThreshold\\\":1,\\\"tcp\\\":false,\\\"timeoutSeconds\\\":5},\\\"ambassadorMapping\\\":{\\\"ambassadorId\\\":\\\"\\\",\\\"cors\\\":{},\\\"enabled\\\":false,\\\"hostname\\\":\\\"devtron.example.com\\\",\\\"labels\\\":{},\\\"prefix\\\":\\\"/\\\",\\\"retryPolicy\\\":{},\\\"rewrite\\\":\\\"\\\",\\\"tls\\\":{\\\"context\\\":\\\"\\\",\\\"create\\\":false,\\\"hosts\\\":[],\\\"secretName\\\":\\\"\\\"}},\\\"args\\\":{\\\"enabled\\\":false,\\\"value\\\":[\\\"/bin/sh\\\",\\\"-c\\\",\\\"touch /tmp/healthy; sleep 30; rm -rf /tmp/healthy; sleep 600\\\"]},\\\"autoscaling\\\":{\\\"MaxReplicas\\\":2,\\\"MinReplicas\\\":1,\\\"TargetCPUUtilizationPercentage\\\":90,\\\"TargetMemoryUtilizationPercentage\\\":69,\\\"annotations\\\":{},\\\"behavior\\\":{},\\\"enabled\\\":false,\\\"extraMetrics\\\":[],\\\"labels\\\":{}},\\\"command\\\":{\\\"enabled\\\":false,\\\"value\\\":[],\\\"workingDir\\\":{}},\\\"containerSecurityContext\\\":{},\\\"containerSpec\\\":{\\\"lifecycle\\\":{\\\"enabled\\\":false,\\\"postStart\\\":{\\\"httpGet\\\":{\\\"host\\\":\\\"example.com\\\",\\\"path\\\":\\\"/example\\\",\\\"port\\\":90}},\\\"preStop\\\":{\\\"exec\\\":{\\\"command\\\":[\\\"sleep\\\",\\\"10\\\"]}}}},\\\"containers\\\":[],\\\"dbMigrationConfig\\\":{\\\"enabled\\\":false},\\\"envoyproxy\\\":{\\\"configMapName\\\":\\\"\\\",\\\"image\\\":\\\"docker.io/envoyproxy/envoy:v1.16.0\\\",\\\"lifecycle\\\":{},\\\"resources\\\":{\\\"limits\\\":{\\\"cpu\\\":\\\"50m\\\",\\\"memory\\\":\\\"50Mi\\\"},\\\"requests\\\":{\\\"cpu\\\":\\\"50m\\\",\\\"memory\\\":\\\"50Mi\\\"}}},\\\"hostAliases\\\":[],\\\"image\\\":{\\\"pullPolicy\\\":\\\"IfNotPresent\\\"},\\\"imagePullSecrets\\\":[],\\\"ingress\\\":{\\\"annotations\\\":{},\\\"className\\\":\\\"\\\",\\\"enabled\\\":false,\\\"hosts\\\":[{\\\"host\\\":\\\"chart-example1.local\\\",\\\"pathType\\\":\\\"ImplementationSpecific\\\",\\\"paths\\\":[\\\"/example1\\\"]},{\\\"host\\\":\\\"chart-example2.local\\\",\\\"pathType\\\":\\\"ImplementationSpecific\\\",\\\"paths\\\":[\\\"/example2\\\",\\\"/example2/healthz\\\"]}],\\\"labels\\\":{},\\\"tls\\\":[]},\\\"ingressInternal\\\":{\\\"annotations\\\":{},\\\"className\\\":\\\"\\\",\\\"enabled\\\":false,\\\"hosts\\\":[{\\\"host\\\":\\\"chart-example1.internal\\\",\\\"pathType\\\":\\\"ImplementationSpecific\\\",\\\"paths\\\":[\\\"/example1\\\"]},{\\\"host\\\":\\\"chart-example2.internal\\\",\\\"pathType\\\":\\\"ImplementationSpecific\\\",\\\"paths\\\":[\\\"/example2\\\",\\\"/example2/healthz\\\"]}],\\\"tls\\\":[]},\\\"initContainers\\\":[],\\\"istio\\\":{\\\"authorizationPolicy\\\":{\\\"action\\\":null,\\\"annotations\\\":{},\\\"enabled\\\":false,\\\"labels\\\":{},\\\"provider\\\":{},\\\"rules\\\":[]},\\\"destinationRule\\\":{\\\"annotations\\\":{},\\\"enabled\\\":false,\\\"labels\\\":{},\\\"subsets\\\":[],\\\"trafficPolicy\\\":{}},\\\"enable\\\":false,\\\"gateway\\\":{\\\"annotations\\\":{},\\\"enabled\\\":false,\\\"host\\\":\\\"example.com\\\",\\\"labels\\\":{},\\\"tls\\\":{\\\"enabled\\\":false,\\\"secretName\\\":\\\"secret-name\\\"}},\\\"peerAuthentication\\\":{\\\"annotations\\\":{},\\\"enabled\\\":false,\\\"labels\\\":{},\\\"mtls\\\":{\\\"mode\\\":null},\\\"portLevelMtls\\\":{},\\\"selector\\\":{\\\"enabled\\\":false}},\\\"requestAuthentication\\\":{\\\"annotations\\\":{},\\\"enabled\\\":false,\\\"jwtRules\\\":[],\\\"labels\\\":{},\\\"selector\\\":{\\\"enabled\\\":false}},\\\"virtualService\\\":{\\\"annotations\\\":{},\\\"enabled\\\":false,\\\"gateways\\\":[],\\\"hosts\\\":[],\\\"http\\\":[],\\\"labels\\\":{}}},\\\"kedaAutoscaling\\\":{\\\"advanced\\\":{},\\\"authenticationRef\\\":{},\\\"enabled\\\":false,\\\"envSourceContainerName\\\":\\\"\\\",\\\"maxReplicaCount\\\":2,\\\"minReplicaCount\\\":1,\\\"triggerAuthentication\\\":{\\\"enabled\\\":false,\\\"name\\\":\\\"\\\",\\\"spec\\\":{}},\\\"triggers\\\":[]},\\\"networkPolicy\\\":{\\\"annotations\\\":{},\\\"egress\\\":[],\\\"enabled\\\":false,\\\"ingress\\\":[],\\\"labels\\\":{},\\\"podSelector\\\":{\\\"matchExpressions\\\":[],\\\"matchLabels\\\":{}},\\\"policyTypes\\\":[]},\\\"pauseForSecondsBeforeSwitchActive\\\":30,\\\"podAnnotations\\\":{},\\\"podDisruptionBudget\\\":{},\\\"podLabels\\\":{},\\\"podSecurityContext\\\":{},\\\"prometheus\\\":{\\\"release\\\":\\\"monitoring\\\"},\\\"rawYaml\\\":[],\\\"replicaCount\\\":1,\\\"resources\\\":{\\\"limits\\\":{\\\"cpu\\\":\\\"0.05\\\",\\\"memory\\\":\\\"50Mi\\\"},\\\"requests\\\":{\\\"cpu\\\":\\\"0.01\\\",\\\"memory\\\":\\\"10Mi\\\"}},\\\"restartPolicy\\\":\\\"Always\\\",\\\"rolloutAnnotations\\\":{},\\\"rolloutLabels\\\":{},\\\"secret\\\":{\\\"data\\\":{},\\\"enabled\\\":false},\\\"server\\\":{\\\"deployment\\\":{\\\"image\\\":\\\"\\\",\\\"image_tag\\\":\\\"1-95af053\\\"}},\\\"service\\\":{\\\"annotations\\\":{},\\\"loadBalancerSourceRanges\\\":[],\\\"type\\\":\\\"ClusterIP\\\"},\\\"serviceAccount\\\":{\\\"annotations\\\":{},\\\"create\\\":false,\\\"name\\\":\\\"\\\"},\\\"servicemonitor\\\":{\\\"additionalLabels\\\":{}},\\\"tolerations\\\":[],\\\"topologySpreadConstraints\\\":[],\\\"volumeMounts\\\":[],\\\"volumes\\\":[],\\\"waitForSecondsBeforeScalingDown\\\":30,\\\"winterSoldier\\\":{\\\"action\\\":\\\"sleep\\\",\\\"annotation\\\":{},\\\"apiVersion\\\":\\\"pincher.devtron.ai/v1alpha1\\\",\\\"enabled\\\":false,\\\"fieldSelector\\\":[\\\"AfterTime(AddTime(ParseTime({{metadata.creationTimestamp}}, '2006-01-02T15:04:05Z'), '5m'), Now())\\\"],\\\"labels\\\":{},\\\"targetReplicas\\\":[],\\\"timeRangesWithZone\\\":{\\\"timeRanges\\\":[],\\\"timeZone\\\":\\\"Asia/Kolkata\\\"},\\\"type\\\":\\\"Rollout\\\"}}" - chart := &chartRepoRepository.Chart{} - chart.GlobalOverride = "{\\\"ContainerPort\\\":[{\\\"envoyPort\\\":6969,\\\"idleTimeout\\\":\\\"6969s\\\",\\\"name\\\":\\\"app\\\",\\\"port\\\":6969,\\\"servicePort\\\":69,\\\"supportStreaming\\\":false,\\\"useHTTP2\\\":false}],\\\"EnvVariables\\\":[],\\\"GracePeriod\\\":30,\\\"LivenessProbe\\\":{\\\"Path\\\":\\\"\\\",\\\"command\\\":[],\\\"failureThreshold\\\":3,\\\"httpHeaders\\\":[],\\\"initialDelaySeconds\\\":20,\\\"periodSeconds\\\":10,\\\"port\\\":6969,\\\"scheme\\\":\\\"\\\",\\\"successThreshold\\\":1,\\\"tcp\\\":false,\\\"timeoutSeconds\\\":5},\\\"MaxSurge\\\":1,\\\"MaxUnavailable\\\":0,\\\"MinReadySeconds\\\":60,\\\"ReadinessProbe\\\":{\\\"Path\\\":\\\"\\\",\\\"command\\\":[],\\\"failureThreshold\\\":3,\\\"httpHeaders\\\":[],\\\"initialDelaySeconds\\\":20,\\\"periodSeconds\\\":10,\\\"port\\\":6969,\\\"scheme\\\":\\\"\\\",\\\"successThreshold\\\":1,\\\"tcp\\\":false,\\\"timeoutSeconds\\\":5},\\\"Spec\\\":{\\\"Affinity\\\":{\\\"Key\\\":null,\\\"Values\\\":\\\"nodes\\\",\\\"key\\\":\\\"\\\"}},\\\"StartupProbe\\\":{\\\"Path\\\":\\\"\\\",\\\"command\\\":[],\\\"failureThreshold\\\":3,\\\"httpHeaders\\\":[],\\\"initialDelaySeconds\\\":20,\\\"periodSeconds\\\":10,\\\"port\\\":6969,\\\"successThreshold\\\":1,\\\"tcp\\\":false,\\\"timeoutSeconds\\\":5},\\\"ambassadorMapping\\\":{\\\"ambassadorId\\\":\\\"\\\",\\\"cors\\\":{},\\\"enabled\\\":false,\\\"hostname\\\":\\\"devtron.example.com\\\",\\\"labels\\\":{},\\\"prefix\\\":\\\"/\\\",\\\"retryPolicy\\\":{},\\\"rewrite\\\":\\\"\\\",\\\"tls\\\":{\\\"context\\\":\\\"\\\",\\\"create\\\":false,\\\"hosts\\\":[],\\\"secretName\\\":\\\"\\\"}},\\\"args\\\":{\\\"enabled\\\":false,\\\"value\\\":[\\\"/bin/sh\\\",\\\"-c\\\",\\\"touch /tmp/healthy; sleep 30; rm -rf /tmp/healthy; sleep 600\\\"]},\\\"autoscaling\\\":{\\\"MaxReplicas\\\":2,\\\"MinReplicas\\\":1,\\\"TargetCPUUtilizationPercentage\\\":90,\\\"TargetMemoryUtilizationPercentage\\\":69,\\\"annotations\\\":{},\\\"behavior\\\":{},\\\"enabled\\\":false,\\\"extraMetrics\\\":[],\\\"labels\\\":{}},\\\"command\\\":{\\\"enabled\\\":false,\\\"value\\\":[],\\\"workingDir\\\":{}},\\\"containerSecurityContext\\\":{},\\\"containerSpec\\\":{\\\"lifecycle\\\":{\\\"enabled\\\":false,\\\"postStart\\\":{\\\"httpGet\\\":{\\\"host\\\":\\\"example.com\\\",\\\"path\\\":\\\"/example\\\",\\\"port\\\":90}},\\\"preStop\\\":{\\\"exec\\\":{\\\"command\\\":[\\\"sleep\\\",\\\"10\\\"]}}}},\\\"containers\\\":[],\\\"dbMigrationConfig\\\":{\\\"enabled\\\":false},\\\"envoyproxy\\\":{\\\"configMapName\\\":\\\"\\\",\\\"image\\\":\\\"docker.io/envoyproxy/envoy:v1.16.0\\\",\\\"lifecycle\\\":{},\\\"resources\\\":{\\\"limits\\\":{\\\"cpu\\\":\\\"50m\\\",\\\"memory\\\":\\\"50Mi\\\"},\\\"requests\\\":{\\\"cpu\\\":\\\"50m\\\",\\\"memory\\\":\\\"50Mi\\\"}}},\\\"hostAliases\\\":[],\\\"image\\\":{\\\"pullPolicy\\\":\\\"IfNotPresent\\\"},\\\"imagePullSecrets\\\":[],\\\"ingress\\\":{\\\"annotations\\\":{},\\\"className\\\":\\\"\\\",\\\"enabled\\\":false,\\\"hosts\\\":[{\\\"host\\\":\\\"chart-example1.local\\\",\\\"pathType\\\":\\\"ImplementationSpecific\\\",\\\"paths\\\":[\\\"/example1\\\"]},{\\\"host\\\":\\\"chart-example2.local\\\",\\\"pathType\\\":\\\"ImplementationSpecific\\\",\\\"paths\\\":[\\\"/example2\\\",\\\"/example2/healthz\\\"]}],\\\"labels\\\":{},\\\"tls\\\":[]},\\\"ingressInternal\\\":{\\\"annotations\\\":{},\\\"className\\\":\\\"\\\",\\\"enabled\\\":false,\\\"hosts\\\":[{\\\"host\\\":\\\"chart-example1.internal\\\",\\\"pathType\\\":\\\"ImplementationSpecific\\\",\\\"paths\\\":[\\\"/example1\\\"]},{\\\"host\\\":\\\"chart-example2.internal\\\",\\\"pathType\\\":\\\"ImplementationSpecific\\\",\\\"paths\\\":[\\\"/example2\\\",\\\"/example2/healthz\\\"]}],\\\"tls\\\":[]},\\\"initContainers\\\":[],\\\"istio\\\":{\\\"authorizationPolicy\\\":{\\\"action\\\":null,\\\"annotations\\\":{},\\\"enabled\\\":false,\\\"labels\\\":{},\\\"provider\\\":{},\\\"rules\\\":[]},\\\"destinationRule\\\":{\\\"annotations\\\":{},\\\"enabled\\\":false,\\\"labels\\\":{},\\\"subsets\\\":[],\\\"trafficPolicy\\\":{}},\\\"enable\\\":false,\\\"gateway\\\":{\\\"annotations\\\":{},\\\"enabled\\\":false,\\\"host\\\":\\\"example.com\\\",\\\"labels\\\":{},\\\"tls\\\":{\\\"enabled\\\":false,\\\"secretName\\\":\\\"secret-name\\\"}},\\\"peerAuthentication\\\":{\\\"annotations\\\":{},\\\"enabled\\\":false,\\\"labels\\\":{},\\\"mtls\\\":{\\\"mode\\\":null},\\\"portLevelMtls\\\":{},\\\"selector\\\":{\\\"enabled\\\":false}},\\\"requestAuthentication\\\":{\\\"annotations\\\":{},\\\"enabled\\\":false,\\\"jwtRules\\\":[],\\\"labels\\\":{},\\\"selector\\\":{\\\"enabled\\\":false}},\\\"virtualService\\\":{\\\"annotations\\\":{},\\\"enabled\\\":false,\\\"gateways\\\":[],\\\"hosts\\\":[],\\\"http\\\":[],\\\"labels\\\":{}}},\\\"kedaAutoscaling\\\":{\\\"advanced\\\":{},\\\"authenticationRef\\\":{},\\\"enabled\\\":false,\\\"envSourceContainerName\\\":\\\"\\\",\\\"maxReplicaCount\\\":2,\\\"minReplicaCount\\\":1,\\\"triggerAuthentication\\\":{\\\"enabled\\\":false,\\\"name\\\":\\\"\\\",\\\"spec\\\":{}},\\\"triggers\\\":[]},\\\"networkPolicy\\\":{\\\"annotations\\\":{},\\\"egress\\\":[],\\\"enabled\\\":false,\\\"ingress\\\":[],\\\"labels\\\":{},\\\"podSelector\\\":{\\\"matchExpressions\\\":[],\\\"matchLabels\\\":{}},\\\"policyTypes\\\":[]},\\\"pauseForSecondsBeforeSwitchActive\\\":30,\\\"podAnnotations\\\":{},\\\"podDisruptionBudget\\\":{},\\\"podLabels\\\":{},\\\"podSecurityContext\\\":{},\\\"prometheus\\\":{\\\"release\\\":\\\"monitoring\\\"},\\\"rawYaml\\\":[],\\\"replicaCount\\\":1,\\\"resources\\\":{\\\"limits\\\":{\\\"cpu\\\":\\\"0.05\\\",\\\"memory\\\":\\\"50Mi\\\"},\\\"requests\\\":{\\\"cpu\\\":\\\"0.01\\\",\\\"memory\\\":\\\"10Mi\\\"}},\\\"restartPolicy\\\":\\\"Always\\\",\\\"rolloutAnnotations\\\":{},\\\"rolloutLabels\\\":{},\\\"secret\\\":{\\\"data\\\":{},\\\"enabled\\\":false},\\\"server\\\":{\\\"deployment\\\":{\\\"image\\\":\\\"\\\",\\\"image_tag\\\":\\\"1-95af053\\\"}},\\\"service\\\":{\\\"annotations\\\":{},\\\"loadBalancerSourceRanges\\\":[],\\\"type\\\":\\\"ClusterIP\\\"},\\\"serviceAccount\\\":{\\\"annotations\\\":{},\\\"create\\\":false,\\\"name\\\":\\\"\\\"},\\\"servicemonitor\\\":{\\\"additionalLabels\\\":{}},\\\"tolerations\\\":[],\\\"topologySpreadConstraints\\\":[],\\\"volumeMounts\\\":[],\\\"volumes\\\":[],\\\"waitForSecondsBeforeScalingDown\\\":30,\\\"winterSoldier\\\":{\\\"action\\\":\\\"sleep\\\",\\\"annotation\\\":{},\\\"apiVersion\\\":\\\"pincher.devtron.ai/v1alpha1\\\",\\\"enabled\\\":false,\\\"fieldSelector\\\":[\\\"AfterTime(AddTime(ParseTime({{metadata.creationTimestamp}}, '2006-01-02T15:04:05Z'), '5m'), Now())\\\"],\\\"labels\\\":{},\\\"targetReplicas\\\":[],\\\"timeRangesWithZone\\\":{\\\"timeRanges\\\":[],\\\"timeZone\\\":\\\"Asia/Kolkata\\\"},\\\"type\\\":\\\"Rollout\\\"}}" - chart.Id = 1 - type args struct { - ctx context.Context - request DeploymentTemplateRequest - } - tests := []struct { - name string - - args args - want DeploymentTemplateResponse - wantErr error - }{ - { - name: "get values same as that of request", - args: args{ - ctx: context.Background(), - request: DeploymentTemplateRequest{ - Values: "{\\\"ContainerPort\\\":[{\\\"envoyPort\\\":6969,\\\"idleTimeout\\\":\\\"6969s\\\",\\\"name\\\":\\\"app\\\",\\\"port\\\":6969,\\\"servicePort\\\":69,\\\"supportStreaming\\\":false,\\\"useHTTP2\\\":false}],\\\"EnvVariables\\\":[],\\\"GracePeriod\\\":30,\\\"LivenessProbe\\\":{\\\"Path\\\":\\\"\\\",\\\"command\\\":[],\\\"failureThreshold\\\":3,\\\"httpHeaders\\\":[],\\\"initialDelaySeconds\\\":20,\\\"periodSeconds\\\":10,\\\"port\\\":6969,\\\"scheme\\\":\\\"\\\",\\\"successThreshold\\\":1,\\\"tcp\\\":false,\\\"timeoutSeconds\\\":5},\\\"MaxSurge\\\":1,\\\"MaxUnavailable\\\":0,\\\"MinReadySeconds\\\":60,\\\"ReadinessProbe\\\":{\\\"Path\\\":\\\"\\\",\\\"command\\\":[],\\\"failureThreshold\\\":3,\\\"httpHeaders\\\":[],\\\"initialDelaySeconds\\\":20,\\\"periodSeconds\\\":10,\\\"port\\\":6969,\\\"scheme\\\":\\\"\\\",\\\"successThreshold\\\":1,\\\"tcp\\\":false,\\\"timeoutSeconds\\\":5},\\\"Spec\\\":{\\\"Affinity\\\":{\\\"Key\\\":null,\\\"Values\\\":\\\"nodes\\\",\\\"key\\\":\\\"\\\"}},\\\"StartupProbe\\\":{\\\"Path\\\":\\\"\\\",\\\"command\\\":[],\\\"failureThreshold\\\":3,\\\"httpHeaders\\\":[],\\\"initialDelaySeconds\\\":20,\\\"periodSeconds\\\":10,\\\"port\\\":6969,\\\"successThreshold\\\":1,\\\"tcp\\\":false,\\\"timeoutSeconds\\\":5},\\\"ambassadorMapping\\\":{\\\"ambassadorId\\\":\\\"\\\",\\\"cors\\\":{},\\\"enabled\\\":false,\\\"hostname\\\":\\\"devtron.example.com\\\",\\\"labels\\\":{},\\\"prefix\\\":\\\"/\\\",\\\"retryPolicy\\\":{},\\\"rewrite\\\":\\\"\\\",\\\"tls\\\":{\\\"context\\\":\\\"\\\",\\\"create\\\":false,\\\"hosts\\\":[],\\\"secretName\\\":\\\"\\\"}},\\\"args\\\":{\\\"enabled\\\":false,\\\"value\\\":[\\\"/bin/sh\\\",\\\"-c\\\",\\\"touch /tmp/healthy; sleep 30; rm -rf /tmp/healthy; sleep 600\\\"]},\\\"autoscaling\\\":{\\\"MaxReplicas\\\":2,\\\"MinReplicas\\\":1,\\\"TargetCPUUtilizationPercentage\\\":90,\\\"TargetMemoryUtilizationPercentage\\\":69,\\\"annotations\\\":{},\\\"behavior\\\":{},\\\"enabled\\\":false,\\\"extraMetrics\\\":[],\\\"labels\\\":{}},\\\"command\\\":{\\\"enabled\\\":false,\\\"value\\\":[],\\\"workingDir\\\":{}},\\\"containerSecurityContext\\\":{},\\\"containerSpec\\\":{\\\"lifecycle\\\":{\\\"enabled\\\":false,\\\"postStart\\\":{\\\"httpGet\\\":{\\\"host\\\":\\\"example.com\\\",\\\"path\\\":\\\"/example\\\",\\\"port\\\":90}},\\\"preStop\\\":{\\\"exec\\\":{\\\"command\\\":[\\\"sleep\\\",\\\"10\\\"]}}}},\\\"containers\\\":[],\\\"dbMigrationConfig\\\":{\\\"enabled\\\":false},\\\"envoyproxy\\\":{\\\"configMapName\\\":\\\"\\\",\\\"image\\\":\\\"docker.io/envoyproxy/envoy:v1.16.0\\\",\\\"lifecycle\\\":{},\\\"resources\\\":{\\\"limits\\\":{\\\"cpu\\\":\\\"50m\\\",\\\"memory\\\":\\\"50Mi\\\"},\\\"requests\\\":{\\\"cpu\\\":\\\"50m\\\",\\\"memory\\\":\\\"50Mi\\\"}}},\\\"hostAliases\\\":[],\\\"image\\\":{\\\"pullPolicy\\\":\\\"IfNotPresent\\\"},\\\"imagePullSecrets\\\":[],\\\"ingress\\\":{\\\"annotations\\\":{},\\\"className\\\":\\\"\\\",\\\"enabled\\\":false,\\\"hosts\\\":[{\\\"host\\\":\\\"chart-example1.local\\\",\\\"pathType\\\":\\\"ImplementationSpecific\\\",\\\"paths\\\":[\\\"/example1\\\"]},{\\\"host\\\":\\\"chart-example2.local\\\",\\\"pathType\\\":\\\"ImplementationSpecific\\\",\\\"paths\\\":[\\\"/example2\\\",\\\"/example2/healthz\\\"]}],\\\"labels\\\":{},\\\"tls\\\":[]},\\\"ingressInternal\\\":{\\\"annotations\\\":{},\\\"className\\\":\\\"\\\",\\\"enabled\\\":false,\\\"hosts\\\":[{\\\"host\\\":\\\"chart-example1.internal\\\",\\\"pathType\\\":\\\"ImplementationSpecific\\\",\\\"paths\\\":[\\\"/example1\\\"]},{\\\"host\\\":\\\"chart-example2.internal\\\",\\\"pathType\\\":\\\"ImplementationSpecific\\\",\\\"paths\\\":[\\\"/example2\\\",\\\"/example2/healthz\\\"]}],\\\"tls\\\":[]},\\\"initContainers\\\":[],\\\"istio\\\":{\\\"authorizationPolicy\\\":{\\\"action\\\":null,\\\"annotations\\\":{},\\\"enabled\\\":false,\\\"labels\\\":{},\\\"provider\\\":{},\\\"rules\\\":[]},\\\"destinationRule\\\":{\\\"annotations\\\":{},\\\"enabled\\\":false,\\\"labels\\\":{},\\\"subsets\\\":[],\\\"trafficPolicy\\\":{}},\\\"enable\\\":false,\\\"gateway\\\":{\\\"annotations\\\":{},\\\"enabled\\\":false,\\\"host\\\":\\\"example.com\\\",\\\"labels\\\":{},\\\"tls\\\":{\\\"enabled\\\":false,\\\"secretName\\\":\\\"secret-name\\\"}},\\\"peerAuthentication\\\":{\\\"annotations\\\":{},\\\"enabled\\\":false,\\\"labels\\\":{},\\\"mtls\\\":{\\\"mode\\\":null},\\\"portLevelMtls\\\":{},\\\"selector\\\":{\\\"enabled\\\":false}},\\\"requestAuthentication\\\":{\\\"annotations\\\":{},\\\"enabled\\\":false,\\\"jwtRules\\\":[],\\\"labels\\\":{},\\\"selector\\\":{\\\"enabled\\\":false}},\\\"virtualService\\\":{\\\"annotations\\\":{},\\\"enabled\\\":false,\\\"gateways\\\":[],\\\"hosts\\\":[],\\\"http\\\":[],\\\"labels\\\":{}}},\\\"kedaAutoscaling\\\":{\\\"advanced\\\":{},\\\"authenticationRef\\\":{},\\\"enabled\\\":false,\\\"envSourceContainerName\\\":\\\"\\\",\\\"maxReplicaCount\\\":2,\\\"minReplicaCount\\\":1,\\\"triggerAuthentication\\\":{\\\"enabled\\\":false,\\\"name\\\":\\\"\\\",\\\"spec\\\":{}},\\\"triggers\\\":[]},\\\"networkPolicy\\\":{\\\"annotations\\\":{},\\\"egress\\\":[],\\\"enabled\\\":false,\\\"ingress\\\":[],\\\"labels\\\":{},\\\"podSelector\\\":{\\\"matchExpressions\\\":[],\\\"matchLabels\\\":{}},\\\"policyTypes\\\":[]},\\\"pauseForSecondsBeforeSwitchActive\\\":30,\\\"podAnnotations\\\":{},\\\"podDisruptionBudget\\\":{},\\\"podLabels\\\":{},\\\"podSecurityContext\\\":{},\\\"prometheus\\\":{\\\"release\\\":\\\"monitoring\\\"},\\\"rawYaml\\\":[],\\\"replicaCount\\\":1,\\\"resources\\\":{\\\"limits\\\":{\\\"cpu\\\":\\\"0.05\\\",\\\"memory\\\":\\\"50Mi\\\"},\\\"requests\\\":{\\\"cpu\\\":\\\"0.01\\\",\\\"memory\\\":\\\"10Mi\\\"}},\\\"restartPolicy\\\":\\\"Always\\\",\\\"rolloutAnnotations\\\":{},\\\"rolloutLabels\\\":{},\\\"secret\\\":{\\\"data\\\":{},\\\"enabled\\\":false},\\\"server\\\":{\\\"deployment\\\":{\\\"image\\\":\\\"\\\",\\\"image_tag\\\":\\\"1-95af053\\\"}},\\\"service\\\":{\\\"annotations\\\":{},\\\"loadBalancerSourceRanges\\\":[],\\\"type\\\":\\\"ClusterIP\\\"},\\\"serviceAccount\\\":{\\\"annotations\\\":{},\\\"create\\\":false,\\\"name\\\":\\\"\\\"},\\\"servicemonitor\\\":{\\\"additionalLabels\\\":{}},\\\"tolerations\\\":[],\\\"topologySpreadConstraints\\\":[],\\\"volumeMounts\\\":[],\\\"volumes\\\":[],\\\"waitForSecondsBeforeScalingDown\\\":30,\\\"winterSoldier\\\":{\\\"action\\\":\\\"sleep\\\",\\\"annotation\\\":{},\\\"apiVersion\\\":\\\"pincher.devtron.ai/v1alpha1\\\",\\\"enabled\\\":false,\\\"fieldSelector\\\":[\\\"AfterTime(AddTime(ParseTime({{metadata.creationTimestamp}}, '2006-01-02T15:04:05Z'), '5m'), Now())\\\"],\\\"labels\\\":{},\\\"targetReplicas\\\":[],\\\"timeRangesWithZone\\\":{\\\"timeRanges\\\":[],\\\"timeZone\\\":\\\"Asia/Kolkata\\\"},\\\"type\\\":\\\"Rollout\\\"}}", - ValuesAndManifestFlag: Values, - }, - }, - want: DeploymentTemplateResponse{ - Data: "{\\\"ContainerPort\\\":[{\\\"envoyPort\\\":6969,\\\"idleTimeout\\\":\\\"6969s\\\",\\\"name\\\":\\\"app\\\",\\\"port\\\":6969,\\\"servicePort\\\":69,\\\"supportStreaming\\\":false,\\\"useHTTP2\\\":false}],\\\"EnvVariables\\\":[],\\\"GracePeriod\\\":30,\\\"LivenessProbe\\\":{\\\"Path\\\":\\\"\\\",\\\"command\\\":[],\\\"failureThreshold\\\":3,\\\"httpHeaders\\\":[],\\\"initialDelaySeconds\\\":20,\\\"periodSeconds\\\":10,\\\"port\\\":6969,\\\"scheme\\\":\\\"\\\",\\\"successThreshold\\\":1,\\\"tcp\\\":false,\\\"timeoutSeconds\\\":5},\\\"MaxSurge\\\":1,\\\"MaxUnavailable\\\":0,\\\"MinReadySeconds\\\":60,\\\"ReadinessProbe\\\":{\\\"Path\\\":\\\"\\\",\\\"command\\\":[],\\\"failureThreshold\\\":3,\\\"httpHeaders\\\":[],\\\"initialDelaySeconds\\\":20,\\\"periodSeconds\\\":10,\\\"port\\\":6969,\\\"scheme\\\":\\\"\\\",\\\"successThreshold\\\":1,\\\"tcp\\\":false,\\\"timeoutSeconds\\\":5},\\\"Spec\\\":{\\\"Affinity\\\":{\\\"Key\\\":null,\\\"Values\\\":\\\"nodes\\\",\\\"key\\\":\\\"\\\"}},\\\"StartupProbe\\\":{\\\"Path\\\":\\\"\\\",\\\"command\\\":[],\\\"failureThreshold\\\":3,\\\"httpHeaders\\\":[],\\\"initialDelaySeconds\\\":20,\\\"periodSeconds\\\":10,\\\"port\\\":6969,\\\"successThreshold\\\":1,\\\"tcp\\\":false,\\\"timeoutSeconds\\\":5},\\\"ambassadorMapping\\\":{\\\"ambassadorId\\\":\\\"\\\",\\\"cors\\\":{},\\\"enabled\\\":false,\\\"hostname\\\":\\\"devtron.example.com\\\",\\\"labels\\\":{},\\\"prefix\\\":\\\"/\\\",\\\"retryPolicy\\\":{},\\\"rewrite\\\":\\\"\\\",\\\"tls\\\":{\\\"context\\\":\\\"\\\",\\\"create\\\":false,\\\"hosts\\\":[],\\\"secretName\\\":\\\"\\\"}},\\\"args\\\":{\\\"enabled\\\":false,\\\"value\\\":[\\\"/bin/sh\\\",\\\"-c\\\",\\\"touch /tmp/healthy; sleep 30; rm -rf /tmp/healthy; sleep 600\\\"]},\\\"autoscaling\\\":{\\\"MaxReplicas\\\":2,\\\"MinReplicas\\\":1,\\\"TargetCPUUtilizationPercentage\\\":90,\\\"TargetMemoryUtilizationPercentage\\\":69,\\\"annotations\\\":{},\\\"behavior\\\":{},\\\"enabled\\\":false,\\\"extraMetrics\\\":[],\\\"labels\\\":{}},\\\"command\\\":{\\\"enabled\\\":false,\\\"value\\\":[],\\\"workingDir\\\":{}},\\\"containerSecurityContext\\\":{},\\\"containerSpec\\\":{\\\"lifecycle\\\":{\\\"enabled\\\":false,\\\"postStart\\\":{\\\"httpGet\\\":{\\\"host\\\":\\\"example.com\\\",\\\"path\\\":\\\"/example\\\",\\\"port\\\":90}},\\\"preStop\\\":{\\\"exec\\\":{\\\"command\\\":[\\\"sleep\\\",\\\"10\\\"]}}}},\\\"containers\\\":[],\\\"dbMigrationConfig\\\":{\\\"enabled\\\":false},\\\"envoyproxy\\\":{\\\"configMapName\\\":\\\"\\\",\\\"image\\\":\\\"docker.io/envoyproxy/envoy:v1.16.0\\\",\\\"lifecycle\\\":{},\\\"resources\\\":{\\\"limits\\\":{\\\"cpu\\\":\\\"50m\\\",\\\"memory\\\":\\\"50Mi\\\"},\\\"requests\\\":{\\\"cpu\\\":\\\"50m\\\",\\\"memory\\\":\\\"50Mi\\\"}}},\\\"hostAliases\\\":[],\\\"image\\\":{\\\"pullPolicy\\\":\\\"IfNotPresent\\\"},\\\"imagePullSecrets\\\":[],\\\"ingress\\\":{\\\"annotations\\\":{},\\\"className\\\":\\\"\\\",\\\"enabled\\\":false,\\\"hosts\\\":[{\\\"host\\\":\\\"chart-example1.local\\\",\\\"pathType\\\":\\\"ImplementationSpecific\\\",\\\"paths\\\":[\\\"/example1\\\"]},{\\\"host\\\":\\\"chart-example2.local\\\",\\\"pathType\\\":\\\"ImplementationSpecific\\\",\\\"paths\\\":[\\\"/example2\\\",\\\"/example2/healthz\\\"]}],\\\"labels\\\":{},\\\"tls\\\":[]},\\\"ingressInternal\\\":{\\\"annotations\\\":{},\\\"className\\\":\\\"\\\",\\\"enabled\\\":false,\\\"hosts\\\":[{\\\"host\\\":\\\"chart-example1.internal\\\",\\\"pathType\\\":\\\"ImplementationSpecific\\\",\\\"paths\\\":[\\\"/example1\\\"]},{\\\"host\\\":\\\"chart-example2.internal\\\",\\\"pathType\\\":\\\"ImplementationSpecific\\\",\\\"paths\\\":[\\\"/example2\\\",\\\"/example2/healthz\\\"]}],\\\"tls\\\":[]},\\\"initContainers\\\":[],\\\"istio\\\":{\\\"authorizationPolicy\\\":{\\\"action\\\":null,\\\"annotations\\\":{},\\\"enabled\\\":false,\\\"labels\\\":{},\\\"provider\\\":{},\\\"rules\\\":[]},\\\"destinationRule\\\":{\\\"annotations\\\":{},\\\"enabled\\\":false,\\\"labels\\\":{},\\\"subsets\\\":[],\\\"trafficPolicy\\\":{}},\\\"enable\\\":false,\\\"gateway\\\":{\\\"annotations\\\":{},\\\"enabled\\\":false,\\\"host\\\":\\\"example.com\\\",\\\"labels\\\":{},\\\"tls\\\":{\\\"enabled\\\":false,\\\"secretName\\\":\\\"secret-name\\\"}},\\\"peerAuthentication\\\":{\\\"annotations\\\":{},\\\"enabled\\\":false,\\\"labels\\\":{},\\\"mtls\\\":{\\\"mode\\\":null},\\\"portLevelMtls\\\":{},\\\"selector\\\":{\\\"enabled\\\":false}},\\\"requestAuthentication\\\":{\\\"annotations\\\":{},\\\"enabled\\\":false,\\\"jwtRules\\\":[],\\\"labels\\\":{},\\\"selector\\\":{\\\"enabled\\\":false}},\\\"virtualService\\\":{\\\"annotations\\\":{},\\\"enabled\\\":false,\\\"gateways\\\":[],\\\"hosts\\\":[],\\\"http\\\":[],\\\"labels\\\":{}}},\\\"kedaAutoscaling\\\":{\\\"advanced\\\":{},\\\"authenticationRef\\\":{},\\\"enabled\\\":false,\\\"envSourceContainerName\\\":\\\"\\\",\\\"maxReplicaCount\\\":2,\\\"minReplicaCount\\\":1,\\\"triggerAuthentication\\\":{\\\"enabled\\\":false,\\\"name\\\":\\\"\\\",\\\"spec\\\":{}},\\\"triggers\\\":[]},\\\"networkPolicy\\\":{\\\"annotations\\\":{},\\\"egress\\\":[],\\\"enabled\\\":false,\\\"ingress\\\":[],\\\"labels\\\":{},\\\"podSelector\\\":{\\\"matchExpressions\\\":[],\\\"matchLabels\\\":{}},\\\"policyTypes\\\":[]},\\\"pauseForSecondsBeforeSwitchActive\\\":30,\\\"podAnnotations\\\":{},\\\"podDisruptionBudget\\\":{},\\\"podLabels\\\":{},\\\"podSecurityContext\\\":{},\\\"prometheus\\\":{\\\"release\\\":\\\"monitoring\\\"},\\\"rawYaml\\\":[],\\\"replicaCount\\\":1,\\\"resources\\\":{\\\"limits\\\":{\\\"cpu\\\":\\\"0.05\\\",\\\"memory\\\":\\\"50Mi\\\"},\\\"requests\\\":{\\\"cpu\\\":\\\"0.01\\\",\\\"memory\\\":\\\"10Mi\\\"}},\\\"restartPolicy\\\":\\\"Always\\\",\\\"rolloutAnnotations\\\":{},\\\"rolloutLabels\\\":{},\\\"secret\\\":{\\\"data\\\":{},\\\"enabled\\\":false},\\\"server\\\":{\\\"deployment\\\":{\\\"image\\\":\\\"\\\",\\\"image_tag\\\":\\\"1-95af053\\\"}},\\\"service\\\":{\\\"annotations\\\":{},\\\"loadBalancerSourceRanges\\\":[],\\\"type\\\":\\\"ClusterIP\\\"},\\\"serviceAccount\\\":{\\\"annotations\\\":{},\\\"create\\\":false,\\\"name\\\":\\\"\\\"},\\\"servicemonitor\\\":{\\\"additionalLabels\\\":{}},\\\"tolerations\\\":[],\\\"topologySpreadConstraints\\\":[],\\\"volumeMounts\\\":[],\\\"volumes\\\":[],\\\"waitForSecondsBeforeScalingDown\\\":30,\\\"winterSoldier\\\":{\\\"action\\\":\\\"sleep\\\",\\\"annotation\\\":{},\\\"apiVersion\\\":\\\"pincher.devtron.ai/v1alpha1\\\",\\\"enabled\\\":false,\\\"fieldSelector\\\":[\\\"AfterTime(AddTime(ParseTime({{metadata.creationTimestamp}}, '2006-01-02T15:04:05Z'), '5m'), Now())\\\"],\\\"labels\\\":{},\\\"targetReplicas\\\":[],\\\"timeRangesWithZone\\\":{\\\"timeRanges\\\":[],\\\"timeZone\\\":\\\"Asia/Kolkata\\\"},\\\"type\\\":\\\"Rollout\\\"}}"}, - }, - { - name: "get values for base charts", - args: args{ - ctx: context.Background(), - request: DeploymentTemplateRequest{ - Values: "", - ValuesAndManifestFlag: Values, - Type: 1, - ChartRefId: 1, - }, - }, - want: DeploymentTemplateResponse{ - Data: "{\\\"ContainerPort\\\":[{\\\"envoyPort\\\":6969,\\\"idleTimeout\\\":\\\"6969s\\\",\\\"name\\\":\\\"app\\\",\\\"port\\\":6969,\\\"servicePort\\\":69,\\\"supportStreaming\\\":false,\\\"useHTTP2\\\":false}],\\\"EnvVariables\\\":[],\\\"GracePeriod\\\":30,\\\"LivenessProbe\\\":{\\\"Path\\\":\\\"\\\",\\\"command\\\":[],\\\"failureThreshold\\\":3,\\\"httpHeaders\\\":[],\\\"initialDelaySeconds\\\":20,\\\"periodSeconds\\\":10,\\\"port\\\":6969,\\\"scheme\\\":\\\"\\\",\\\"successThreshold\\\":1,\\\"tcp\\\":false,\\\"timeoutSeconds\\\":5},\\\"MaxSurge\\\":1,\\\"MaxUnavailable\\\":0,\\\"MinReadySeconds\\\":60,\\\"ReadinessProbe\\\":{\\\"Path\\\":\\\"\\\",\\\"command\\\":[],\\\"failureThreshold\\\":3,\\\"httpHeaders\\\":[],\\\"initialDelaySeconds\\\":20,\\\"periodSeconds\\\":10,\\\"port\\\":6969,\\\"scheme\\\":\\\"\\\",\\\"successThreshold\\\":1,\\\"tcp\\\":false,\\\"timeoutSeconds\\\":5},\\\"Spec\\\":{\\\"Affinity\\\":{\\\"Key\\\":null,\\\"Values\\\":\\\"nodes\\\",\\\"key\\\":\\\"\\\"}},\\\"StartupProbe\\\":{\\\"Path\\\":\\\"\\\",\\\"command\\\":[],\\\"failureThreshold\\\":3,\\\"httpHeaders\\\":[],\\\"initialDelaySeconds\\\":20,\\\"periodSeconds\\\":10,\\\"port\\\":6969,\\\"successThreshold\\\":1,\\\"tcp\\\":false,\\\"timeoutSeconds\\\":5},\\\"ambassadorMapping\\\":{\\\"ambassadorId\\\":\\\"\\\",\\\"cors\\\":{},\\\"enabled\\\":false,\\\"hostname\\\":\\\"devtron.example.com\\\",\\\"labels\\\":{},\\\"prefix\\\":\\\"/\\\",\\\"retryPolicy\\\":{},\\\"rewrite\\\":\\\"\\\",\\\"tls\\\":{\\\"context\\\":\\\"\\\",\\\"create\\\":false,\\\"hosts\\\":[],\\\"secretName\\\":\\\"\\\"}},\\\"args\\\":{\\\"enabled\\\":false,\\\"value\\\":[\\\"/bin/sh\\\",\\\"-c\\\",\\\"touch /tmp/healthy; sleep 30; rm -rf /tmp/healthy; sleep 600\\\"]},\\\"autoscaling\\\":{\\\"MaxReplicas\\\":2,\\\"MinReplicas\\\":1,\\\"TargetCPUUtilizationPercentage\\\":90,\\\"TargetMemoryUtilizationPercentage\\\":69,\\\"annotations\\\":{},\\\"behavior\\\":{},\\\"enabled\\\":false,\\\"extraMetrics\\\":[],\\\"labels\\\":{}},\\\"command\\\":{\\\"enabled\\\":false,\\\"value\\\":[],\\\"workingDir\\\":{}},\\\"containerSecurityContext\\\":{},\\\"containerSpec\\\":{\\\"lifecycle\\\":{\\\"enabled\\\":false,\\\"postStart\\\":{\\\"httpGet\\\":{\\\"host\\\":\\\"example.com\\\",\\\"path\\\":\\\"/example\\\",\\\"port\\\":90}},\\\"preStop\\\":{\\\"exec\\\":{\\\"command\\\":[\\\"sleep\\\",\\\"10\\\"]}}}},\\\"containers\\\":[],\\\"dbMigrationConfig\\\":{\\\"enabled\\\":false},\\\"envoyproxy\\\":{\\\"configMapName\\\":\\\"\\\",\\\"image\\\":\\\"docker.io/envoyproxy/envoy:v1.16.0\\\",\\\"lifecycle\\\":{},\\\"resources\\\":{\\\"limits\\\":{\\\"cpu\\\":\\\"50m\\\",\\\"memory\\\":\\\"50Mi\\\"},\\\"requests\\\":{\\\"cpu\\\":\\\"50m\\\",\\\"memory\\\":\\\"50Mi\\\"}}},\\\"hostAliases\\\":[],\\\"image\\\":{\\\"pullPolicy\\\":\\\"IfNotPresent\\\"},\\\"imagePullSecrets\\\":[],\\\"ingress\\\":{\\\"annotations\\\":{},\\\"className\\\":\\\"\\\",\\\"enabled\\\":false,\\\"hosts\\\":[{\\\"host\\\":\\\"chart-example1.local\\\",\\\"pathType\\\":\\\"ImplementationSpecific\\\",\\\"paths\\\":[\\\"/example1\\\"]},{\\\"host\\\":\\\"chart-example2.local\\\",\\\"pathType\\\":\\\"ImplementationSpecific\\\",\\\"paths\\\":[\\\"/example2\\\",\\\"/example2/healthz\\\"]}],\\\"labels\\\":{},\\\"tls\\\":[]},\\\"ingressInternal\\\":{\\\"annotations\\\":{},\\\"className\\\":\\\"\\\",\\\"enabled\\\":false,\\\"hosts\\\":[{\\\"host\\\":\\\"chart-example1.internal\\\",\\\"pathType\\\":\\\"ImplementationSpecific\\\",\\\"paths\\\":[\\\"/example1\\\"]},{\\\"host\\\":\\\"chart-example2.internal\\\",\\\"pathType\\\":\\\"ImplementationSpecific\\\",\\\"paths\\\":[\\\"/example2\\\",\\\"/example2/healthz\\\"]}],\\\"tls\\\":[]},\\\"initContainers\\\":[],\\\"istio\\\":{\\\"authorizationPolicy\\\":{\\\"action\\\":null,\\\"annotations\\\":{},\\\"enabled\\\":false,\\\"labels\\\":{},\\\"provider\\\":{},\\\"rules\\\":[]},\\\"destinationRule\\\":{\\\"annotations\\\":{},\\\"enabled\\\":false,\\\"labels\\\":{},\\\"subsets\\\":[],\\\"trafficPolicy\\\":{}},\\\"enable\\\":false,\\\"gateway\\\":{\\\"annotations\\\":{},\\\"enabled\\\":false,\\\"host\\\":\\\"example.com\\\",\\\"labels\\\":{},\\\"tls\\\":{\\\"enabled\\\":false,\\\"secretName\\\":\\\"secret-name\\\"}},\\\"peerAuthentication\\\":{\\\"annotations\\\":{},\\\"enabled\\\":false,\\\"labels\\\":{},\\\"mtls\\\":{\\\"mode\\\":null},\\\"portLevelMtls\\\":{},\\\"selector\\\":{\\\"enabled\\\":false}},\\\"requestAuthentication\\\":{\\\"annotations\\\":{},\\\"enabled\\\":false,\\\"jwtRules\\\":[],\\\"labels\\\":{},\\\"selector\\\":{\\\"enabled\\\":false}},\\\"virtualService\\\":{\\\"annotations\\\":{},\\\"enabled\\\":false,\\\"gateways\\\":[],\\\"hosts\\\":[],\\\"http\\\":[],\\\"labels\\\":{}}},\\\"kedaAutoscaling\\\":{\\\"advanced\\\":{},\\\"authenticationRef\\\":{},\\\"enabled\\\":false,\\\"envSourceContainerName\\\":\\\"\\\",\\\"maxReplicaCount\\\":2,\\\"minReplicaCount\\\":1,\\\"triggerAuthentication\\\":{\\\"enabled\\\":false,\\\"name\\\":\\\"\\\",\\\"spec\\\":{}},\\\"triggers\\\":[]},\\\"networkPolicy\\\":{\\\"annotations\\\":{},\\\"egress\\\":[],\\\"enabled\\\":false,\\\"ingress\\\":[],\\\"labels\\\":{},\\\"podSelector\\\":{\\\"matchExpressions\\\":[],\\\"matchLabels\\\":{}},\\\"policyTypes\\\":[]},\\\"pauseForSecondsBeforeSwitchActive\\\":30,\\\"podAnnotations\\\":{},\\\"podDisruptionBudget\\\":{},\\\"podLabels\\\":{},\\\"podSecurityContext\\\":{},\\\"prometheus\\\":{\\\"release\\\":\\\"monitoring\\\"},\\\"rawYaml\\\":[],\\\"replicaCount\\\":1,\\\"resources\\\":{\\\"limits\\\":{\\\"cpu\\\":\\\"0.05\\\",\\\"memory\\\":\\\"50Mi\\\"},\\\"requests\\\":{\\\"cpu\\\":\\\"0.01\\\",\\\"memory\\\":\\\"10Mi\\\"}},\\\"restartPolicy\\\":\\\"Always\\\",\\\"rolloutAnnotations\\\":{},\\\"rolloutLabels\\\":{},\\\"secret\\\":{\\\"data\\\":{},\\\"enabled\\\":false},\\\"server\\\":{\\\"deployment\\\":{\\\"image\\\":\\\"\\\",\\\"image_tag\\\":\\\"1-95af053\\\"}},\\\"service\\\":{\\\"annotations\\\":{},\\\"loadBalancerSourceRanges\\\":[],\\\"type\\\":\\\"ClusterIP\\\"},\\\"serviceAccount\\\":{\\\"annotations\\\":{},\\\"create\\\":false,\\\"name\\\":\\\"\\\"},\\\"servicemonitor\\\":{\\\"additionalLabels\\\":{}},\\\"tolerations\\\":[],\\\"topologySpreadConstraints\\\":[],\\\"volumeMounts\\\":[],\\\"volumes\\\":[],\\\"waitForSecondsBeforeScalingDown\\\":30,\\\"winterSoldier\\\":{\\\"action\\\":\\\"sleep\\\",\\\"annotation\\\":{},\\\"apiVersion\\\":\\\"pincher.devtron.ai/v1alpha1\\\",\\\"enabled\\\":false,\\\"fieldSelector\\\":[\\\"AfterTime(AddTime(ParseTime({{metadata.creationTimestamp}}, '2006-01-02T15:04:05Z'), '5m'), Now())\\\"],\\\"labels\\\":{},\\\"targetReplicas\\\":[],\\\"timeRangesWithZone\\\":{\\\"timeRanges\\\":[],\\\"timeZone\\\":\\\"Asia/Kolkata\\\"},\\\"type\\\":\\\"Rollout\\\"}}"}, - }, - { - name: "get values for published on other envs", - args: args{ - ctx: context.Background(), - request: DeploymentTemplateRequest{ - Values: "", - ValuesAndManifestFlag: Values, - Type: 2, - ChartRefId: 1, - AppId: 1, - }, - }, - want: DeploymentTemplateResponse{ - Data: "{\\\"ContainerPort\\\":[{\\\"envoyPort\\\":6969,\\\"idleTimeout\\\":\\\"6969s\\\",\\\"name\\\":\\\"app\\\",\\\"port\\\":6969,\\\"servicePort\\\":69,\\\"supportStreaming\\\":false,\\\"useHTTP2\\\":false}],\\\"EnvVariables\\\":[],\\\"GracePeriod\\\":30,\\\"LivenessProbe\\\":{\\\"Path\\\":\\\"\\\",\\\"command\\\":[],\\\"failureThreshold\\\":3,\\\"httpHeaders\\\":[],\\\"initialDelaySeconds\\\":20,\\\"periodSeconds\\\":10,\\\"port\\\":6969,\\\"scheme\\\":\\\"\\\",\\\"successThreshold\\\":1,\\\"tcp\\\":false,\\\"timeoutSeconds\\\":5},\\\"MaxSurge\\\":1,\\\"MaxUnavailable\\\":0,\\\"MinReadySeconds\\\":60,\\\"ReadinessProbe\\\":{\\\"Path\\\":\\\"\\\",\\\"command\\\":[],\\\"failureThreshold\\\":3,\\\"httpHeaders\\\":[],\\\"initialDelaySeconds\\\":20,\\\"periodSeconds\\\":10,\\\"port\\\":6969,\\\"scheme\\\":\\\"\\\",\\\"successThreshold\\\":1,\\\"tcp\\\":false,\\\"timeoutSeconds\\\":5},\\\"Spec\\\":{\\\"Affinity\\\":{\\\"Key\\\":null,\\\"Values\\\":\\\"nodes\\\",\\\"key\\\":\\\"\\\"}},\\\"StartupProbe\\\":{\\\"Path\\\":\\\"\\\",\\\"command\\\":[],\\\"failureThreshold\\\":3,\\\"httpHeaders\\\":[],\\\"initialDelaySeconds\\\":20,\\\"periodSeconds\\\":10,\\\"port\\\":6969,\\\"successThreshold\\\":1,\\\"tcp\\\":false,\\\"timeoutSeconds\\\":5},\\\"ambassadorMapping\\\":{\\\"ambassadorId\\\":\\\"\\\",\\\"cors\\\":{},\\\"enabled\\\":false,\\\"hostname\\\":\\\"devtron.example.com\\\",\\\"labels\\\":{},\\\"prefix\\\":\\\"/\\\",\\\"retryPolicy\\\":{},\\\"rewrite\\\":\\\"\\\",\\\"tls\\\":{\\\"context\\\":\\\"\\\",\\\"create\\\":false,\\\"hosts\\\":[],\\\"secretName\\\":\\\"\\\"}},\\\"args\\\":{\\\"enabled\\\":false,\\\"value\\\":[\\\"/bin/sh\\\",\\\"-c\\\",\\\"touch /tmp/healthy; sleep 30; rm -rf /tmp/healthy; sleep 600\\\"]},\\\"autoscaling\\\":{\\\"MaxReplicas\\\":2,\\\"MinReplicas\\\":1,\\\"TargetCPUUtilizationPercentage\\\":90,\\\"TargetMemoryUtilizationPercentage\\\":69,\\\"annotations\\\":{},\\\"behavior\\\":{},\\\"enabled\\\":false,\\\"extraMetrics\\\":[],\\\"labels\\\":{}},\\\"command\\\":{\\\"enabled\\\":false,\\\"value\\\":[],\\\"workingDir\\\":{}},\\\"containerSecurityContext\\\":{},\\\"containerSpec\\\":{\\\"lifecycle\\\":{\\\"enabled\\\":false,\\\"postStart\\\":{\\\"httpGet\\\":{\\\"host\\\":\\\"example.com\\\",\\\"path\\\":\\\"/example\\\",\\\"port\\\":90}},\\\"preStop\\\":{\\\"exec\\\":{\\\"command\\\":[\\\"sleep\\\",\\\"10\\\"]}}}},\\\"containers\\\":[],\\\"dbMigrationConfig\\\":{\\\"enabled\\\":false},\\\"envoyproxy\\\":{\\\"configMapName\\\":\\\"\\\",\\\"image\\\":\\\"docker.io/envoyproxy/envoy:v1.16.0\\\",\\\"lifecycle\\\":{},\\\"resources\\\":{\\\"limits\\\":{\\\"cpu\\\":\\\"50m\\\",\\\"memory\\\":\\\"50Mi\\\"},\\\"requests\\\":{\\\"cpu\\\":\\\"50m\\\",\\\"memory\\\":\\\"50Mi\\\"}}},\\\"hostAliases\\\":[],\\\"image\\\":{\\\"pullPolicy\\\":\\\"IfNotPresent\\\"},\\\"imagePullSecrets\\\":[],\\\"ingress\\\":{\\\"annotations\\\":{},\\\"className\\\":\\\"\\\",\\\"enabled\\\":false,\\\"hosts\\\":[{\\\"host\\\":\\\"chart-example1.local\\\",\\\"pathType\\\":\\\"ImplementationSpecific\\\",\\\"paths\\\":[\\\"/example1\\\"]},{\\\"host\\\":\\\"chart-example2.local\\\",\\\"pathType\\\":\\\"ImplementationSpecific\\\",\\\"paths\\\":[\\\"/example2\\\",\\\"/example2/healthz\\\"]}],\\\"labels\\\":{},\\\"tls\\\":[]},\\\"ingressInternal\\\":{\\\"annotations\\\":{},\\\"className\\\":\\\"\\\",\\\"enabled\\\":false,\\\"hosts\\\":[{\\\"host\\\":\\\"chart-example1.internal\\\",\\\"pathType\\\":\\\"ImplementationSpecific\\\",\\\"paths\\\":[\\\"/example1\\\"]},{\\\"host\\\":\\\"chart-example2.internal\\\",\\\"pathType\\\":\\\"ImplementationSpecific\\\",\\\"paths\\\":[\\\"/example2\\\",\\\"/example2/healthz\\\"]}],\\\"tls\\\":[]},\\\"initContainers\\\":[],\\\"istio\\\":{\\\"authorizationPolicy\\\":{\\\"action\\\":null,\\\"annotations\\\":{},\\\"enabled\\\":false,\\\"labels\\\":{},\\\"provider\\\":{},\\\"rules\\\":[]},\\\"destinationRule\\\":{\\\"annotations\\\":{},\\\"enabled\\\":false,\\\"labels\\\":{},\\\"subsets\\\":[],\\\"trafficPolicy\\\":{}},\\\"enable\\\":false,\\\"gateway\\\":{\\\"annotations\\\":{},\\\"enabled\\\":false,\\\"host\\\":\\\"example.com\\\",\\\"labels\\\":{},\\\"tls\\\":{\\\"enabled\\\":false,\\\"secretName\\\":\\\"secret-name\\\"}},\\\"peerAuthentication\\\":{\\\"annotations\\\":{},\\\"enabled\\\":false,\\\"labels\\\":{},\\\"mtls\\\":{\\\"mode\\\":null},\\\"portLevelMtls\\\":{},\\\"selector\\\":{\\\"enabled\\\":false}},\\\"requestAuthentication\\\":{\\\"annotations\\\":{},\\\"enabled\\\":false,\\\"jwtRules\\\":[],\\\"labels\\\":{},\\\"selector\\\":{\\\"enabled\\\":false}},\\\"virtualService\\\":{\\\"annotations\\\":{},\\\"enabled\\\":false,\\\"gateways\\\":[],\\\"hosts\\\":[],\\\"http\\\":[],\\\"labels\\\":{}}},\\\"kedaAutoscaling\\\":{\\\"advanced\\\":{},\\\"authenticationRef\\\":{},\\\"enabled\\\":false,\\\"envSourceContainerName\\\":\\\"\\\",\\\"maxReplicaCount\\\":2,\\\"minReplicaCount\\\":1,\\\"triggerAuthentication\\\":{\\\"enabled\\\":false,\\\"name\\\":\\\"\\\",\\\"spec\\\":{}},\\\"triggers\\\":[]},\\\"networkPolicy\\\":{\\\"annotations\\\":{},\\\"egress\\\":[],\\\"enabled\\\":false,\\\"ingress\\\":[],\\\"labels\\\":{},\\\"podSelector\\\":{\\\"matchExpressions\\\":[],\\\"matchLabels\\\":{}},\\\"policyTypes\\\":[]},\\\"pauseForSecondsBeforeSwitchActive\\\":30,\\\"podAnnotations\\\":{},\\\"podDisruptionBudget\\\":{},\\\"podLabels\\\":{},\\\"podSecurityContext\\\":{},\\\"prometheus\\\":{\\\"release\\\":\\\"monitoring\\\"},\\\"rawYaml\\\":[],\\\"replicaCount\\\":1,\\\"resources\\\":{\\\"limits\\\":{\\\"cpu\\\":\\\"0.05\\\",\\\"memory\\\":\\\"50Mi\\\"},\\\"requests\\\":{\\\"cpu\\\":\\\"0.01\\\",\\\"memory\\\":\\\"10Mi\\\"}},\\\"restartPolicy\\\":\\\"Always\\\",\\\"rolloutAnnotations\\\":{},\\\"rolloutLabels\\\":{},\\\"secret\\\":{\\\"data\\\":{},\\\"enabled\\\":false},\\\"server\\\":{\\\"deployment\\\":{\\\"image\\\":\\\"\\\",\\\"image_tag\\\":\\\"1-95af053\\\"}},\\\"service\\\":{\\\"annotations\\\":{},\\\"loadBalancerSourceRanges\\\":[],\\\"type\\\":\\\"ClusterIP\\\"},\\\"serviceAccount\\\":{\\\"annotations\\\":{},\\\"create\\\":false,\\\"name\\\":\\\"\\\"},\\\"servicemonitor\\\":{\\\"additionalLabels\\\":{}},\\\"tolerations\\\":[],\\\"topologySpreadConstraints\\\":[],\\\"volumeMounts\\\":[],\\\"volumes\\\":[],\\\"waitForSecondsBeforeScalingDown\\\":30,\\\"winterSoldier\\\":{\\\"action\\\":\\\"sleep\\\",\\\"annotation\\\":{},\\\"apiVersion\\\":\\\"pincher.devtron.ai/v1alpha1\\\",\\\"enabled\\\":false,\\\"fieldSelector\\\":[\\\"AfterTime(AddTime(ParseTime({{metadata.creationTimestamp}}, '2006-01-02T15:04:05Z'), '5m'), Now())\\\"],\\\"labels\\\":{},\\\"targetReplicas\\\":[],\\\"timeRangesWithZone\\\":{\\\"timeRanges\\\":[],\\\"timeZone\\\":\\\"Asia/Kolkata\\\"},\\\"type\\\":\\\"Rollout\\\"}}"}, - }, - { - name: "get error for published on other envs", - args: args{ - ctx: context.Background(), - request: DeploymentTemplateRequest{ - Values: "", - ValuesAndManifestFlag: Values, - Type: 2, - ChartRefId: 1, - AppId: 1, - }, - }, - wantErr: errors.New("error in getting chart"), - want: DeploymentTemplateResponse{ - Data: "{\\\"ContainerPort\\\":[{\\\"envoyPort\\\":6969,\\\"idleTimeout\\\":\\\"6969s\\\",\\\"name\\\":\\\"app\\\",\\\"port\\\":6969,\\\"servicePort\\\":69,\\\"supportStreaming\\\":false,\\\"useHTTP2\\\":false}],\\\"EnvVariables\\\":[],\\\"GracePeriod\\\":30,\\\"LivenessProbe\\\":{\\\"Path\\\":\\\"\\\",\\\"command\\\":[],\\\"failureThreshold\\\":3,\\\"httpHeaders\\\":[],\\\"initialDelaySeconds\\\":20,\\\"periodSeconds\\\":10,\\\"port\\\":6969,\\\"scheme\\\":\\\"\\\",\\\"successThreshold\\\":1,\\\"tcp\\\":false,\\\"timeoutSeconds\\\":5},\\\"MaxSurge\\\":1,\\\"MaxUnavailable\\\":0,\\\"MinReadySeconds\\\":60,\\\"ReadinessProbe\\\":{\\\"Path\\\":\\\"\\\",\\\"command\\\":[],\\\"failureThreshold\\\":3,\\\"httpHeaders\\\":[],\\\"initialDelaySeconds\\\":20,\\\"periodSeconds\\\":10,\\\"port\\\":6969,\\\"scheme\\\":\\\"\\\",\\\"successThreshold\\\":1,\\\"tcp\\\":false,\\\"timeoutSeconds\\\":5},\\\"Spec\\\":{\\\"Affinity\\\":{\\\"Key\\\":null,\\\"Values\\\":\\\"nodes\\\",\\\"key\\\":\\\"\\\"}},\\\"StartupProbe\\\":{\\\"Path\\\":\\\"\\\",\\\"command\\\":[],\\\"failureThreshold\\\":3,\\\"httpHeaders\\\":[],\\\"initialDelaySeconds\\\":20,\\\"periodSeconds\\\":10,\\\"port\\\":6969,\\\"successThreshold\\\":1,\\\"tcp\\\":false,\\\"timeoutSeconds\\\":5},\\\"ambassadorMapping\\\":{\\\"ambassadorId\\\":\\\"\\\",\\\"cors\\\":{},\\\"enabled\\\":false,\\\"hostname\\\":\\\"devtron.example.com\\\",\\\"labels\\\":{},\\\"prefix\\\":\\\"/\\\",\\\"retryPolicy\\\":{},\\\"rewrite\\\":\\\"\\\",\\\"tls\\\":{\\\"context\\\":\\\"\\\",\\\"create\\\":false,\\\"hosts\\\":[],\\\"secretName\\\":\\\"\\\"}},\\\"args\\\":{\\\"enabled\\\":false,\\\"value\\\":[\\\"/bin/sh\\\",\\\"-c\\\",\\\"touch /tmp/healthy; sleep 30; rm -rf /tmp/healthy; sleep 600\\\"]},\\\"autoscaling\\\":{\\\"MaxReplicas\\\":2,\\\"MinReplicas\\\":1,\\\"TargetCPUUtilizationPercentage\\\":90,\\\"TargetMemoryUtilizationPercentage\\\":69,\\\"annotations\\\":{},\\\"behavior\\\":{},\\\"enabled\\\":false,\\\"extraMetrics\\\":[],\\\"labels\\\":{}},\\\"command\\\":{\\\"enabled\\\":false,\\\"value\\\":[],\\\"workingDir\\\":{}},\\\"containerSecurityContext\\\":{},\\\"containerSpec\\\":{\\\"lifecycle\\\":{\\\"enabled\\\":false,\\\"postStart\\\":{\\\"httpGet\\\":{\\\"host\\\":\\\"example.com\\\",\\\"path\\\":\\\"/example\\\",\\\"port\\\":90}},\\\"preStop\\\":{\\\"exec\\\":{\\\"command\\\":[\\\"sleep\\\",\\\"10\\\"]}}}},\\\"containers\\\":[],\\\"dbMigrationConfig\\\":{\\\"enabled\\\":false},\\\"envoyproxy\\\":{\\\"configMapName\\\":\\\"\\\",\\\"image\\\":\\\"docker.io/envoyproxy/envoy:v1.16.0\\\",\\\"lifecycle\\\":{},\\\"resources\\\":{\\\"limits\\\":{\\\"cpu\\\":\\\"50m\\\",\\\"memory\\\":\\\"50Mi\\\"},\\\"requests\\\":{\\\"cpu\\\":\\\"50m\\\",\\\"memory\\\":\\\"50Mi\\\"}}},\\\"hostAliases\\\":[],\\\"image\\\":{\\\"pullPolicy\\\":\\\"IfNotPresent\\\"},\\\"imagePullSecrets\\\":[],\\\"ingress\\\":{\\\"annotations\\\":{},\\\"className\\\":\\\"\\\",\\\"enabled\\\":false,\\\"hosts\\\":[{\\\"host\\\":\\\"chart-example1.local\\\",\\\"pathType\\\":\\\"ImplementationSpecific\\\",\\\"paths\\\":[\\\"/example1\\\"]},{\\\"host\\\":\\\"chart-example2.local\\\",\\\"pathType\\\":\\\"ImplementationSpecific\\\",\\\"paths\\\":[\\\"/example2\\\",\\\"/example2/healthz\\\"]}],\\\"labels\\\":{},\\\"tls\\\":[]},\\\"ingressInternal\\\":{\\\"annotations\\\":{},\\\"className\\\":\\\"\\\",\\\"enabled\\\":false,\\\"hosts\\\":[{\\\"host\\\":\\\"chart-example1.internal\\\",\\\"pathType\\\":\\\"ImplementationSpecific\\\",\\\"paths\\\":[\\\"/example1\\\"]},{\\\"host\\\":\\\"chart-example2.internal\\\",\\\"pathType\\\":\\\"ImplementationSpecific\\\",\\\"paths\\\":[\\\"/example2\\\",\\\"/example2/healthz\\\"]}],\\\"tls\\\":[]},\\\"initContainers\\\":[],\\\"istio\\\":{\\\"authorizationPolicy\\\":{\\\"action\\\":null,\\\"annotations\\\":{},\\\"enabled\\\":false,\\\"labels\\\":{},\\\"provider\\\":{},\\\"rules\\\":[]},\\\"destinationRule\\\":{\\\"annotations\\\":{},\\\"enabled\\\":false,\\\"labels\\\":{},\\\"subsets\\\":[],\\\"trafficPolicy\\\":{}},\\\"enable\\\":false,\\\"gateway\\\":{\\\"annotations\\\":{},\\\"enabled\\\":false,\\\"host\\\":\\\"example.com\\\",\\\"labels\\\":{},\\\"tls\\\":{\\\"enabled\\\":false,\\\"secretName\\\":\\\"secret-name\\\"}},\\\"peerAuthentication\\\":{\\\"annotations\\\":{},\\\"enabled\\\":false,\\\"labels\\\":{},\\\"mtls\\\":{\\\"mode\\\":null},\\\"portLevelMtls\\\":{},\\\"selector\\\":{\\\"enabled\\\":false}},\\\"requestAuthentication\\\":{\\\"annotations\\\":{},\\\"enabled\\\":false,\\\"jwtRules\\\":[],\\\"labels\\\":{},\\\"selector\\\":{\\\"enabled\\\":false}},\\\"virtualService\\\":{\\\"annotations\\\":{},\\\"enabled\\\":false,\\\"gateways\\\":[],\\\"hosts\\\":[],\\\"http\\\":[],\\\"labels\\\":{}}},\\\"kedaAutoscaling\\\":{\\\"advanced\\\":{},\\\"authenticationRef\\\":{},\\\"enabled\\\":false,\\\"envSourceContainerName\\\":\\\"\\\",\\\"maxReplicaCount\\\":2,\\\"minReplicaCount\\\":1,\\\"triggerAuthentication\\\":{\\\"enabled\\\":false,\\\"name\\\":\\\"\\\",\\\"spec\\\":{}},\\\"triggers\\\":[]},\\\"networkPolicy\\\":{\\\"annotations\\\":{},\\\"egress\\\":[],\\\"enabled\\\":false,\\\"ingress\\\":[],\\\"labels\\\":{},\\\"podSelector\\\":{\\\"matchExpressions\\\":[],\\\"matchLabels\\\":{}},\\\"policyTypes\\\":[]},\\\"pauseForSecondsBeforeSwitchActive\\\":30,\\\"podAnnotations\\\":{},\\\"podDisruptionBudget\\\":{},\\\"podLabels\\\":{},\\\"podSecurityContext\\\":{},\\\"prometheus\\\":{\\\"release\\\":\\\"monitoring\\\"},\\\"rawYaml\\\":[],\\\"replicaCount\\\":1,\\\"resources\\\":{\\\"limits\\\":{\\\"cpu\\\":\\\"0.05\\\",\\\"memory\\\":\\\"50Mi\\\"},\\\"requests\\\":{\\\"cpu\\\":\\\"0.01\\\",\\\"memory\\\":\\\"10Mi\\\"}},\\\"restartPolicy\\\":\\\"Always\\\",\\\"rolloutAnnotations\\\":{},\\\"rolloutLabels\\\":{},\\\"secret\\\":{\\\"data\\\":{},\\\"enabled\\\":false},\\\"server\\\":{\\\"deployment\\\":{\\\"image\\\":\\\"\\\",\\\"image_tag\\\":\\\"1-95af053\\\"}},\\\"service\\\":{\\\"annotations\\\":{},\\\"loadBalancerSourceRanges\\\":[],\\\"type\\\":\\\"ClusterIP\\\"},\\\"serviceAccount\\\":{\\\"annotations\\\":{},\\\"create\\\":false,\\\"name\\\":\\\"\\\"},\\\"servicemonitor\\\":{\\\"additionalLabels\\\":{}},\\\"tolerations\\\":[],\\\"topologySpreadConstraints\\\":[],\\\"volumeMounts\\\":[],\\\"volumes\\\":[],\\\"waitForSecondsBeforeScalingDown\\\":30,\\\"winterSoldier\\\":{\\\"action\\\":\\\"sleep\\\",\\\"annotation\\\":{},\\\"apiVersion\\\":\\\"pincher.devtron.ai/v1alpha1\\\",\\\"enabled\\\":false,\\\"fieldSelector\\\":[\\\"AfterTime(AddTime(ParseTime({{metadata.creationTimestamp}}, '2006-01-02T15:04:05Z'), '5m'), Now())\\\"],\\\"labels\\\":{},\\\"targetReplicas\\\":[],\\\"timeRangesWithZone\\\":{\\\"timeRanges\\\":[],\\\"timeZone\\\":\\\"Asia/Kolkata\\\"},\\\"type\\\":\\\"Rollout\\\"}}"}, - }, - { - name: "get values for deployed on envs", - args: args{ - ctx: context.Background(), - request: DeploymentTemplateRequest{ - Values: "", - ValuesAndManifestFlag: Values, - Type: 3, - ChartRefId: 1, - AppId: 1, - PipelineConfigOverrideId: 1, - }, - }, - want: DeploymentTemplateResponse{ - Data: "{\\\"ContainerPort\\\":[{\\\"envoyPort\\\":6969,\\\"idleTimeout\\\":\\\"6969s\\\",\\\"name\\\":\\\"app\\\",\\\"port\\\":6969,\\\"servicePort\\\":69,\\\"supportStreaming\\\":false,\\\"useHTTP2\\\":false}],\\\"EnvVariables\\\":[],\\\"GracePeriod\\\":30,\\\"LivenessProbe\\\":{\\\"Path\\\":\\\"\\\",\\\"command\\\":[],\\\"failureThreshold\\\":3,\\\"httpHeaders\\\":[],\\\"initialDelaySeconds\\\":20,\\\"periodSeconds\\\":10,\\\"port\\\":6969,\\\"scheme\\\":\\\"\\\",\\\"successThreshold\\\":1,\\\"tcp\\\":false,\\\"timeoutSeconds\\\":5},\\\"MaxSurge\\\":1,\\\"MaxUnavailable\\\":0,\\\"MinReadySeconds\\\":60,\\\"ReadinessProbe\\\":{\\\"Path\\\":\\\"\\\",\\\"command\\\":[],\\\"failureThreshold\\\":3,\\\"httpHeaders\\\":[],\\\"initialDelaySeconds\\\":20,\\\"periodSeconds\\\":10,\\\"port\\\":6969,\\\"scheme\\\":\\\"\\\",\\\"successThreshold\\\":1,\\\"tcp\\\":false,\\\"timeoutSeconds\\\":5},\\\"Spec\\\":{\\\"Affinity\\\":{\\\"Key\\\":null,\\\"Values\\\":\\\"nodes\\\",\\\"key\\\":\\\"\\\"}},\\\"StartupProbe\\\":{\\\"Path\\\":\\\"\\\",\\\"command\\\":[],\\\"failureThreshold\\\":3,\\\"httpHeaders\\\":[],\\\"initialDelaySeconds\\\":20,\\\"periodSeconds\\\":10,\\\"port\\\":6969,\\\"successThreshold\\\":1,\\\"tcp\\\":false,\\\"timeoutSeconds\\\":5},\\\"ambassadorMapping\\\":{\\\"ambassadorId\\\":\\\"\\\",\\\"cors\\\":{},\\\"enabled\\\":false,\\\"hostname\\\":\\\"devtron.example.com\\\",\\\"labels\\\":{},\\\"prefix\\\":\\\"/\\\",\\\"retryPolicy\\\":{},\\\"rewrite\\\":\\\"\\\",\\\"tls\\\":{\\\"context\\\":\\\"\\\",\\\"create\\\":false,\\\"hosts\\\":[],\\\"secretName\\\":\\\"\\\"}},\\\"args\\\":{\\\"enabled\\\":false,\\\"value\\\":[\\\"/bin/sh\\\",\\\"-c\\\",\\\"touch /tmp/healthy; sleep 30; rm -rf /tmp/healthy; sleep 600\\\"]},\\\"autoscaling\\\":{\\\"MaxReplicas\\\":2,\\\"MinReplicas\\\":1,\\\"TargetCPUUtilizationPercentage\\\":90,\\\"TargetMemoryUtilizationPercentage\\\":69,\\\"annotations\\\":{},\\\"behavior\\\":{},\\\"enabled\\\":false,\\\"extraMetrics\\\":[],\\\"labels\\\":{}},\\\"command\\\":{\\\"enabled\\\":false,\\\"value\\\":[],\\\"workingDir\\\":{}},\\\"containerSecurityContext\\\":{},\\\"containerSpec\\\":{\\\"lifecycle\\\":{\\\"enabled\\\":false,\\\"postStart\\\":{\\\"httpGet\\\":{\\\"host\\\":\\\"example.com\\\",\\\"path\\\":\\\"/example\\\",\\\"port\\\":90}},\\\"preStop\\\":{\\\"exec\\\":{\\\"command\\\":[\\\"sleep\\\",\\\"10\\\"]}}}},\\\"containers\\\":[],\\\"dbMigrationConfig\\\":{\\\"enabled\\\":false},\\\"envoyproxy\\\":{\\\"configMapName\\\":\\\"\\\",\\\"image\\\":\\\"docker.io/envoyproxy/envoy:v1.16.0\\\",\\\"lifecycle\\\":{},\\\"resources\\\":{\\\"limits\\\":{\\\"cpu\\\":\\\"50m\\\",\\\"memory\\\":\\\"50Mi\\\"},\\\"requests\\\":{\\\"cpu\\\":\\\"50m\\\",\\\"memory\\\":\\\"50Mi\\\"}}},\\\"hostAliases\\\":[],\\\"image\\\":{\\\"pullPolicy\\\":\\\"IfNotPresent\\\"},\\\"imagePullSecrets\\\":[],\\\"ingress\\\":{\\\"annotations\\\":{},\\\"className\\\":\\\"\\\",\\\"enabled\\\":false,\\\"hosts\\\":[{\\\"host\\\":\\\"chart-example1.local\\\",\\\"pathType\\\":\\\"ImplementationSpecific\\\",\\\"paths\\\":[\\\"/example1\\\"]},{\\\"host\\\":\\\"chart-example2.local\\\",\\\"pathType\\\":\\\"ImplementationSpecific\\\",\\\"paths\\\":[\\\"/example2\\\",\\\"/example2/healthz\\\"]}],\\\"labels\\\":{},\\\"tls\\\":[]},\\\"ingressInternal\\\":{\\\"annotations\\\":{},\\\"className\\\":\\\"\\\",\\\"enabled\\\":false,\\\"hosts\\\":[{\\\"host\\\":\\\"chart-example1.internal\\\",\\\"pathType\\\":\\\"ImplementationSpecific\\\",\\\"paths\\\":[\\\"/example1\\\"]},{\\\"host\\\":\\\"chart-example2.internal\\\",\\\"pathType\\\":\\\"ImplementationSpecific\\\",\\\"paths\\\":[\\\"/example2\\\",\\\"/example2/healthz\\\"]}],\\\"tls\\\":[]},\\\"initContainers\\\":[],\\\"istio\\\":{\\\"authorizationPolicy\\\":{\\\"action\\\":null,\\\"annotations\\\":{},\\\"enabled\\\":false,\\\"labels\\\":{},\\\"provider\\\":{},\\\"rules\\\":[]},\\\"destinationRule\\\":{\\\"annotations\\\":{},\\\"enabled\\\":false,\\\"labels\\\":{},\\\"subsets\\\":[],\\\"trafficPolicy\\\":{}},\\\"enable\\\":false,\\\"gateway\\\":{\\\"annotations\\\":{},\\\"enabled\\\":false,\\\"host\\\":\\\"example.com\\\",\\\"labels\\\":{},\\\"tls\\\":{\\\"enabled\\\":false,\\\"secretName\\\":\\\"secret-name\\\"}},\\\"peerAuthentication\\\":{\\\"annotations\\\":{},\\\"enabled\\\":false,\\\"labels\\\":{},\\\"mtls\\\":{\\\"mode\\\":null},\\\"portLevelMtls\\\":{},\\\"selector\\\":{\\\"enabled\\\":false}},\\\"requestAuthentication\\\":{\\\"annotations\\\":{},\\\"enabled\\\":false,\\\"jwtRules\\\":[],\\\"labels\\\":{},\\\"selector\\\":{\\\"enabled\\\":false}},\\\"virtualService\\\":{\\\"annotations\\\":{},\\\"enabled\\\":false,\\\"gateways\\\":[],\\\"hosts\\\":[],\\\"http\\\":[],\\\"labels\\\":{}}},\\\"kedaAutoscaling\\\":{\\\"advanced\\\":{},\\\"authenticationRef\\\":{},\\\"enabled\\\":false,\\\"envSourceContainerName\\\":\\\"\\\",\\\"maxReplicaCount\\\":2,\\\"minReplicaCount\\\":1,\\\"triggerAuthentication\\\":{\\\"enabled\\\":false,\\\"name\\\":\\\"\\\",\\\"spec\\\":{}},\\\"triggers\\\":[]},\\\"networkPolicy\\\":{\\\"annotations\\\":{},\\\"egress\\\":[],\\\"enabled\\\":false,\\\"ingress\\\":[],\\\"labels\\\":{},\\\"podSelector\\\":{\\\"matchExpressions\\\":[],\\\"matchLabels\\\":{}},\\\"policyTypes\\\":[]},\\\"pauseForSecondsBeforeSwitchActive\\\":30,\\\"podAnnotations\\\":{},\\\"podDisruptionBudget\\\":{},\\\"podLabels\\\":{},\\\"podSecurityContext\\\":{},\\\"prometheus\\\":{\\\"release\\\":\\\"monitoring\\\"},\\\"rawYaml\\\":[],\\\"replicaCount\\\":1,\\\"resources\\\":{\\\"limits\\\":{\\\"cpu\\\":\\\"0.05\\\",\\\"memory\\\":\\\"50Mi\\\"},\\\"requests\\\":{\\\"cpu\\\":\\\"0.01\\\",\\\"memory\\\":\\\"10Mi\\\"}},\\\"restartPolicy\\\":\\\"Always\\\",\\\"rolloutAnnotations\\\":{},\\\"rolloutLabels\\\":{},\\\"secret\\\":{\\\"data\\\":{},\\\"enabled\\\":false},\\\"server\\\":{\\\"deployment\\\":{\\\"image\\\":\\\"\\\",\\\"image_tag\\\":\\\"1-95af053\\\"}},\\\"service\\\":{\\\"annotations\\\":{},\\\"loadBalancerSourceRanges\\\":[],\\\"type\\\":\\\"ClusterIP\\\"},\\\"serviceAccount\\\":{\\\"annotations\\\":{},\\\"create\\\":false,\\\"name\\\":\\\"\\\"},\\\"servicemonitor\\\":{\\\"additionalLabels\\\":{}},\\\"tolerations\\\":[],\\\"topologySpreadConstraints\\\":[],\\\"volumeMounts\\\":[],\\\"volumes\\\":[],\\\"waitForSecondsBeforeScalingDown\\\":30,\\\"winterSoldier\\\":{\\\"action\\\":\\\"sleep\\\",\\\"annotation\\\":{},\\\"apiVersion\\\":\\\"pincher.devtron.ai/v1alpha1\\\",\\\"enabled\\\":false,\\\"fieldSelector\\\":[\\\"AfterTime(AddTime(ParseTime({{metadata.creationTimestamp}}, '2006-01-02T15:04:05Z'), '5m'), Now())\\\"],\\\"labels\\\":{},\\\"targetReplicas\\\":[],\\\"timeRangesWithZone\\\":{\\\"timeRanges\\\":[],\\\"timeZone\\\":\\\"Asia/Kolkata\\\"},\\\"type\\\":\\\"Rollout\\\"}}"}, - }, - { - name: "get error for deployed on envs", - args: args{ - ctx: context.Background(), - request: DeploymentTemplateRequest{ - Values: "", - ValuesAndManifestFlag: Values, - Type: 3, - ChartRefId: 1, - AppId: 1, - PipelineConfigOverrideId: 1, - }, - }, - wantErr: errors.New("error in getting values"), - want: DeploymentTemplateResponse{ - Data: "{\\\"ContainerPort\\\":[{\\\"envoyPort\\\":6969,\\\"idleTimeout\\\":\\\"6969s\\\",\\\"name\\\":\\\"app\\\",\\\"port\\\":6969,\\\"servicePort\\\":69,\\\"supportStreaming\\\":false,\\\"useHTTP2\\\":false}],\\\"EnvVariables\\\":[],\\\"GracePeriod\\\":30,\\\"LivenessProbe\\\":{\\\"Path\\\":\\\"\\\",\\\"command\\\":[],\\\"failureThreshold\\\":3,\\\"httpHeaders\\\":[],\\\"initialDelaySeconds\\\":20,\\\"periodSeconds\\\":10,\\\"port\\\":6969,\\\"scheme\\\":\\\"\\\",\\\"successThreshold\\\":1,\\\"tcp\\\":false,\\\"timeoutSeconds\\\":5},\\\"MaxSurge\\\":1,\\\"MaxUnavailable\\\":0,\\\"MinReadySeconds\\\":60,\\\"ReadinessProbe\\\":{\\\"Path\\\":\\\"\\\",\\\"command\\\":[],\\\"failureThreshold\\\":3,\\\"httpHeaders\\\":[],\\\"initialDelaySeconds\\\":20,\\\"periodSeconds\\\":10,\\\"port\\\":6969,\\\"scheme\\\":\\\"\\\",\\\"successThreshold\\\":1,\\\"tcp\\\":false,\\\"timeoutSeconds\\\":5},\\\"Spec\\\":{\\\"Affinity\\\":{\\\"Key\\\":null,\\\"Values\\\":\\\"nodes\\\",\\\"key\\\":\\\"\\\"}},\\\"StartupProbe\\\":{\\\"Path\\\":\\\"\\\",\\\"command\\\":[],\\\"failureThreshold\\\":3,\\\"httpHeaders\\\":[],\\\"initialDelaySeconds\\\":20,\\\"periodSeconds\\\":10,\\\"port\\\":6969,\\\"successThreshold\\\":1,\\\"tcp\\\":false,\\\"timeoutSeconds\\\":5},\\\"ambassadorMapping\\\":{\\\"ambassadorId\\\":\\\"\\\",\\\"cors\\\":{},\\\"enabled\\\":false,\\\"hostname\\\":\\\"devtron.example.com\\\",\\\"labels\\\":{},\\\"prefix\\\":\\\"/\\\",\\\"retryPolicy\\\":{},\\\"rewrite\\\":\\\"\\\",\\\"tls\\\":{\\\"context\\\":\\\"\\\",\\\"create\\\":false,\\\"hosts\\\":[],\\\"secretName\\\":\\\"\\\"}},\\\"args\\\":{\\\"enabled\\\":false,\\\"value\\\":[\\\"/bin/sh\\\",\\\"-c\\\",\\\"touch /tmp/healthy; sleep 30; rm -rf /tmp/healthy; sleep 600\\\"]},\\\"autoscaling\\\":{\\\"MaxReplicas\\\":2,\\\"MinReplicas\\\":1,\\\"TargetCPUUtilizationPercentage\\\":90,\\\"TargetMemoryUtilizationPercentage\\\":69,\\\"annotations\\\":{},\\\"behavior\\\":{},\\\"enabled\\\":false,\\\"extraMetrics\\\":[],\\\"labels\\\":{}},\\\"command\\\":{\\\"enabled\\\":false,\\\"value\\\":[],\\\"workingDir\\\":{}},\\\"containerSecurityContext\\\":{},\\\"containerSpec\\\":{\\\"lifecycle\\\":{\\\"enabled\\\":false,\\\"postStart\\\":{\\\"httpGet\\\":{\\\"host\\\":\\\"example.com\\\",\\\"path\\\":\\\"/example\\\",\\\"port\\\":90}},\\\"preStop\\\":{\\\"exec\\\":{\\\"command\\\":[\\\"sleep\\\",\\\"10\\\"]}}}},\\\"containers\\\":[],\\\"dbMigrationConfig\\\":{\\\"enabled\\\":false},\\\"envoyproxy\\\":{\\\"configMapName\\\":\\\"\\\",\\\"image\\\":\\\"docker.io/envoyproxy/envoy:v1.16.0\\\",\\\"lifecycle\\\":{},\\\"resources\\\":{\\\"limits\\\":{\\\"cpu\\\":\\\"50m\\\",\\\"memory\\\":\\\"50Mi\\\"},\\\"requests\\\":{\\\"cpu\\\":\\\"50m\\\",\\\"memory\\\":\\\"50Mi\\\"}}},\\\"hostAliases\\\":[],\\\"image\\\":{\\\"pullPolicy\\\":\\\"IfNotPresent\\\"},\\\"imagePullSecrets\\\":[],\\\"ingress\\\":{\\\"annotations\\\":{},\\\"className\\\":\\\"\\\",\\\"enabled\\\":false,\\\"hosts\\\":[{\\\"host\\\":\\\"chart-example1.local\\\",\\\"pathType\\\":\\\"ImplementationSpecific\\\",\\\"paths\\\":[\\\"/example1\\\"]},{\\\"host\\\":\\\"chart-example2.local\\\",\\\"pathType\\\":\\\"ImplementationSpecific\\\",\\\"paths\\\":[\\\"/example2\\\",\\\"/example2/healthz\\\"]}],\\\"labels\\\":{},\\\"tls\\\":[]},\\\"ingressInternal\\\":{\\\"annotations\\\":{},\\\"className\\\":\\\"\\\",\\\"enabled\\\":false,\\\"hosts\\\":[{\\\"host\\\":\\\"chart-example1.internal\\\",\\\"pathType\\\":\\\"ImplementationSpecific\\\",\\\"paths\\\":[\\\"/example1\\\"]},{\\\"host\\\":\\\"chart-example2.internal\\\",\\\"pathType\\\":\\\"ImplementationSpecific\\\",\\\"paths\\\":[\\\"/example2\\\",\\\"/example2/healthz\\\"]}],\\\"tls\\\":[]},\\\"initContainers\\\":[],\\\"istio\\\":{\\\"authorizationPolicy\\\":{\\\"action\\\":null,\\\"annotations\\\":{},\\\"enabled\\\":false,\\\"labels\\\":{},\\\"provider\\\":{},\\\"rules\\\":[]},\\\"destinationRule\\\":{\\\"annotations\\\":{},\\\"enabled\\\":false,\\\"labels\\\":{},\\\"subsets\\\":[],\\\"trafficPolicy\\\":{}},\\\"enable\\\":false,\\\"gateway\\\":{\\\"annotations\\\":{},\\\"enabled\\\":false,\\\"host\\\":\\\"example.com\\\",\\\"labels\\\":{},\\\"tls\\\":{\\\"enabled\\\":false,\\\"secretName\\\":\\\"secret-name\\\"}},\\\"peerAuthentication\\\":{\\\"annotations\\\":{},\\\"enabled\\\":false,\\\"labels\\\":{},\\\"mtls\\\":{\\\"mode\\\":null},\\\"portLevelMtls\\\":{},\\\"selector\\\":{\\\"enabled\\\":false}},\\\"requestAuthentication\\\":{\\\"annotations\\\":{},\\\"enabled\\\":false,\\\"jwtRules\\\":[],\\\"labels\\\":{},\\\"selector\\\":{\\\"enabled\\\":false}},\\\"virtualService\\\":{\\\"annotations\\\":{},\\\"enabled\\\":false,\\\"gateways\\\":[],\\\"hosts\\\":[],\\\"http\\\":[],\\\"labels\\\":{}}},\\\"kedaAutoscaling\\\":{\\\"advanced\\\":{},\\\"authenticationRef\\\":{},\\\"enabled\\\":false,\\\"envSourceContainerName\\\":\\\"\\\",\\\"maxReplicaCount\\\":2,\\\"minReplicaCount\\\":1,\\\"triggerAuthentication\\\":{\\\"enabled\\\":false,\\\"name\\\":\\\"\\\",\\\"spec\\\":{}},\\\"triggers\\\":[]},\\\"networkPolicy\\\":{\\\"annotations\\\":{},\\\"egress\\\":[],\\\"enabled\\\":false,\\\"ingress\\\":[],\\\"labels\\\":{},\\\"podSelector\\\":{\\\"matchExpressions\\\":[],\\\"matchLabels\\\":{}},\\\"policyTypes\\\":[]},\\\"pauseForSecondsBeforeSwitchActive\\\":30,\\\"podAnnotations\\\":{},\\\"podDisruptionBudget\\\":{},\\\"podLabels\\\":{},\\\"podSecurityContext\\\":{},\\\"prometheus\\\":{\\\"release\\\":\\\"monitoring\\\"},\\\"rawYaml\\\":[],\\\"replicaCount\\\":1,\\\"resources\\\":{\\\"limits\\\":{\\\"cpu\\\":\\\"0.05\\\",\\\"memory\\\":\\\"50Mi\\\"},\\\"requests\\\":{\\\"cpu\\\":\\\"0.01\\\",\\\"memory\\\":\\\"10Mi\\\"}},\\\"restartPolicy\\\":\\\"Always\\\",\\\"rolloutAnnotations\\\":{},\\\"rolloutLabels\\\":{},\\\"secret\\\":{\\\"data\\\":{},\\\"enabled\\\":false},\\\"server\\\":{\\\"deployment\\\":{\\\"image\\\":\\\"\\\",\\\"image_tag\\\":\\\"1-95af053\\\"}},\\\"service\\\":{\\\"annotations\\\":{},\\\"loadBalancerSourceRanges\\\":[],\\\"type\\\":\\\"ClusterIP\\\"},\\\"serviceAccount\\\":{\\\"annotations\\\":{},\\\"create\\\":false,\\\"name\\\":\\\"\\\"},\\\"servicemonitor\\\":{\\\"additionalLabels\\\":{}},\\\"tolerations\\\":[],\\\"topologySpreadConstraints\\\":[],\\\"volumeMounts\\\":[],\\\"volumes\\\":[],\\\"waitForSecondsBeforeScalingDown\\\":30,\\\"winterSoldier\\\":{\\\"action\\\":\\\"sleep\\\",\\\"annotation\\\":{},\\\"apiVersion\\\":\\\"pincher.devtron.ai/v1alpha1\\\",\\\"enabled\\\":false,\\\"fieldSelector\\\":[\\\"AfterTime(AddTime(ParseTime({{metadata.creationTimestamp}}, '2006-01-02T15:04:05Z'), '5m'), Now())\\\"],\\\"labels\\\":{},\\\"targetReplicas\\\":[],\\\"timeRangesWithZone\\\":{\\\"timeRanges\\\":[],\\\"timeZone\\\":\\\"Asia/Kolkata\\\"},\\\"type\\\":\\\"Rollout\\\"}}"}, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - impl, chartService, _, deploymentTemplateRepository, chartRepository, _, _, _ := InitEventSimpleFactoryImpl(t) - if tt.name == "get values for base charts" { - chartService.On("GetAppOverrideForDefaultTemplate", tt.args.request.ChartRefId).Return(myMap, myString, nil) - } - if tt.name == "get values for published on other envs" { - chartRepository.On("FindLatestChartForAppByAppId", tt.args.request.AppId).Return(chart, nil) - } - - if tt.name == "get error for published on other envs" { - chartRepository.On("FindLatestChartForAppByAppId", tt.args.request.AppId).Return(nil, errors.New("error in getting chart")) - } - - if tt.name == "get values for deployed on envs" { - deploymentTemplateRepository.On("FetchPipelineOverrideValues", tt.args.request.PipelineConfigOverrideId).Return(myString, nil) - } - - if tt.name == "get error for deployed on envs" { - deploymentTemplateRepository.On("FetchPipelineOverrideValues", tt.args.request.PipelineConfigOverrideId).Return(myString, errors.New("error in getting values")) - } - - got, err := impl.GetDeploymentTemplate(tt.args.ctx, tt.args.request) - assert.Equal(t, tt.wantErr, err) - if err == nil { - assert.Equal(t, got.Data, tt.want.Data) - } - }) - } -} - -func TestDeploymentTemplateServiceImpl_GetManifest(t *testing.T) { - refChart := "refChart" - template := "template" - version := "version" - myString := "myString" - var chartBytes []byte - - t.Run("TestErrorInGettingRefChart", func(t *testing.T) { - impl, chartService, _, _, _, _, _, _ := InitEventSimpleFactoryImpl(t) - valuesYaml := "{\\\"ContainerPort\\\":[{\\\"envoyPort\\\":6961,\\\"idleTimeout\\\":\\\"6969s\\\",\\\"name\\\":\\\"app\\\",\\\"port\\\":6969,\\\"servicePort\\\":69,\\\"supportStreaming\\\":false,\\\"useHTTP2\\\":false}],\\\"EnvVariables\\\":[],\\\"GracePeriod\\\":30,\\\"LivenessProbe\\\":{\\\"Path\\\":\\\"\\\",\\\"command\\\":[],\\\"failureThreshold\\\":3,\\\"httpHeaders\\\":[],\\\"initialDelaySeconds\\\":20,\\\"periodSeconds\\\":10,\\\"port\\\":6969,\\\"scheme\\\":\\\"\\\",\\\"successThreshold\\\":1,\\\"tcp\\\":false,\\\"timeoutSeconds\\\":5},\\\"MaxSurge\\\":1,\\\"MaxUnavailable\\\":0,\\\"MinReadySeconds\\\":60,\\\"ReadinessProbe\\\":{\\\"Path\\\":\\\"\\\",\\\"command\\\":[],\\\"failureThreshold\\\":3,\\\"httpHeaders\\\":[],\\\"initialDelaySeconds\\\":20,\\\"periodSeconds\\\":10,\\\"port\\\":6969,\\\"scheme\\\":\\\"\\\",\\\"successThreshold\\\":1,\\\"tcp\\\":false,\\\"timeoutSeconds\\\":5},\\\"Spec\\\":{\\\"Affinity\\\":{\\\"Key\\\":null,\\\"Values\\\":\\\"nodes\\\",\\\"key\\\":\\\"\\\"}},\\\"StartupProbe\\\":{\\\"Path\\\":\\\"\\\",\\\"command\\\":[],\\\"failureThreshold\\\":3,\\\"httpHeaders\\\":[],\\\"initialDelaySeconds\\\":20,\\\"periodSeconds\\\":10,\\\"port\\\":6969,\\\"successThreshold\\\":1,\\\"tcp\\\":false,\\\"timeoutSeconds\\\":5},\\\"ambassadorMapping\\\":{\\\"ambassadorId\\\":\\\"\\\",\\\"cors\\\":{},\\\"enabled\\\":false,\\\"hostname\\\":\\\"devtron.example.com\\\",\\\"labels\\\":{},\\\"prefix\\\":\\\"/\\\",\\\"retryPolicy\\\":{},\\\"rewrite\\\":\\\"\\\",\\\"tls\\\":{\\\"context\\\":\\\"\\\",\\\"create\\\":false,\\\"hosts\\\":[],\\\"secretName\\\":\\\"\\\"}},\\\"args\\\":{\\\"enabled\\\":false,\\\"value\\\":[\\\"/bin/sh\\\",\\\"-c\\\",\\\"touch /tmp/healthy; sleep 30; rm -rf /tmp/healthy; sleep 600\\\"]},\\\"autoscaling\\\":{\\\"MaxReplicas\\\":2,\\\"MinReplicas\\\":1,\\\"TargetCPUUtilizationPercentage\\\":90,\\\"TargetMemoryUtilizationPercentage\\\":69,\\\"annotations\\\":{},\\\"behavior\\\":{},\\\"enabled\\\":false,\\\"extraMetrics\\\":[],\\\"labels\\\":{}},\\\"command\\\":{\\\"enabled\\\":false,\\\"value\\\":[],\\\"workingDir\\\":{}},\\\"containerSecurityContext\\\":{},\\\"containerSpec\\\":{\\\"lifecycle\\\":{\\\"enabled\\\":false,\\\"postStart\\\":{\\\"httpGet\\\":{\\\"host\\\":\\\"example.com\\\",\\\"path\\\":\\\"/example\\\",\\\"port\\\":90}},\\\"preStop\\\":{\\\"exec\\\":{\\\"command\\\":[\\\"sleep\\\",\\\"10\\\"]}}}},\\\"containers\\\":[],\\\"dbMigrationConfig\\\":{\\\"enabled\\\":false},\\\"envoyproxy\\\":{\\\"configMapName\\\":\\\"\\\",\\\"image\\\":\\\"docker.io/envoyproxy/envoy:v1.16.0\\\",\\\"lifecycle\\\":{},\\\"resources\\\":{\\\"limits\\\":{\\\"cpu\\\":\\\"50m\\\",\\\"memory\\\":\\\"50Mi\\\"},\\\"requests\\\":{\\\"cpu\\\":\\\"50m\\\",\\\"memory\\\":\\\"50Mi\\\"}}},\\\"hostAliases\\\":[],\\\"image\\\":{\\\"pullPolicy\\\":\\\"IfNotPresent\\\"},\\\"imagePullSecrets\\\":[],\\\"ingress\\\":{\\\"annotations\\\":{},\\\"className\\\":\\\"\\\",\\\"enabled\\\":false,\\\"hosts\\\":[{\\\"host\\\":\\\"chart-example1.local\\\",\\\"pathType\\\":\\\"ImplementationSpecific\\\",\\\"paths\\\":[\\\"/example1\\\"]},{\\\"host\\\":\\\"chart-example2.local\\\",\\\"pathType\\\":\\\"ImplementationSpecific\\\",\\\"paths\\\":[\\\"/example2\\\",\\\"/example2/healthz\\\"]}],\\\"labels\\\":{},\\\"tls\\\":[]},\\\"ingressInternal\\\":{\\\"annotations\\\":{},\\\"className\\\":\\\"\\\",\\\"enabled\\\":false,\\\"hosts\\\":[{\\\"host\\\":\\\"chart-example1.internal\\\",\\\"pathType\\\":\\\"ImplementationSpecific\\\",\\\"paths\\\":[\\\"/example1\\\"]},{\\\"host\\\":\\\"chart-example2.internal\\\",\\\"pathType\\\":\\\"ImplementationSpecific\\\",\\\"paths\\\":[\\\"/example2\\\",\\\"/example2/healthz\\\"]}],\\\"tls\\\":[]},\\\"initContainers\\\":[],\\\"istio\\\":{\\\"authorizationPolicy\\\":{\\\"action\\\":null,\\\"annotations\\\":{},\\\"enabled\\\":false,\\\"labels\\\":{},\\\"provider\\\":{},\\\"rules\\\":[]},\\\"destinationRule\\\":{\\\"annotations\\\":{},\\\"enabled\\\":false,\\\"labels\\\":{},\\\"subsets\\\":[],\\\"trafficPolicy\\\":{}},\\\"enable\\\":false,\\\"gateway\\\":{\\\"annotations\\\":{},\\\"enabled\\\":false,\\\"host\\\":\\\"example.com\\\",\\\"labels\\\":{},\\\"tls\\\":{\\\"enabled\\\":false,\\\"secretName\\\":\\\"secret-name\\\"}},\\\"peerAuthentication\\\":{\\\"annotations\\\":{},\\\"enabled\\\":false,\\\"labels\\\":{},\\\"mtls\\\":{\\\"mode\\\":null},\\\"portLevelMtls\\\":{},\\\"selector\\\":{\\\"enabled\\\":false}},\\\"requestAuthentication\\\":{\\\"annotations\\\":{},\\\"enabled\\\":false,\\\"jwtRules\\\":[],\\\"labels\\\":{},\\\"selector\\\":{\\\"enabled\\\":false}},\\\"virtualService\\\":{\\\"annotations\\\":{},\\\"enabled\\\":false,\\\"gateways\\\":[],\\\"hosts\\\":[],\\\"http\\\":[],\\\"labels\\\":{}}},\\\"kedaAutoscaling\\\":{\\\"advanced\\\":{},\\\"authenticationRef\\\":{},\\\"enabled\\\":false,\\\"envSourceContainerName\\\":\\\"\\\",\\\"maxReplicaCount\\\":2,\\\"minReplicaCount\\\":1,\\\"triggerAuthentication\\\":{\\\"enabled\\\":false,\\\"name\\\":\\\"\\\",\\\"spec\\\":{}},\\\"triggers\\\":[]},\\\"networkPolicy\\\":{\\\"annotations\\\":{},\\\"egress\\\":[],\\\"enabled\\\":false,\\\"ingress\\\":[],\\\"labels\\\":{},\\\"podSelector\\\":{\\\"matchExpressions\\\":[],\\\"matchLabels\\\":{}},\\\"policyTypes\\\":[]},\\\"pauseForSecondsBeforeSwitchActive\\\":30,\\\"podAnnotations\\\":{},\\\"podDisruptionBudget\\\":{},\\\"podLabels\\\":{},\\\"podSecurityContext\\\":{},\\\"prometheus\\\":{\\\"release\\\":\\\"monitoring\\\"},\\\"rawYaml\\\":[],\\\"replicaCount\\\":1,\\\"resources\\\":{\\\"limits\\\":{\\\"cpu\\\":\\\"0.05\\\",\\\"memory\\\":\\\"50Mi\\\"},\\\"requests\\\":{\\\"cpu\\\":\\\"0.01\\\",\\\"memory\\\":\\\"10Mi\\\"}},\\\"restartPolicy\\\":\\\"Always\\\",\\\"rolloutAnnotations\\\":{},\\\"rolloutLabels\\\":{},\\\"secret\\\":{\\\"data\\\":{},\\\"enabled\\\":false},\\\"server\\\":{\\\"deployment\\\":{\\\"image\\\":\\\"\\\",\\\"image_tag\\\":\\\"1-95af053\\\"}},\\\"service\\\":{\\\"annotations\\\":{},\\\"loadBalancerSourceRanges\\\":[],\\\"type\\\":\\\"ClusterIP\\\"},\\\"serviceAccount\\\":{\\\"annotations\\\":{},\\\"create\\\":false,\\\"name\\\":\\\"\\\"},\\\"servicemonitor\\\":{\\\"additionalLabels\\\":{}},\\\"tolerations\\\":[],\\\"topologySpreadConstraints\\\":[],\\\"volumeMounts\\\":[],\\\"volumes\\\":[],\\\"waitForSecondsBeforeScalingDown\\\":30,\\\"winterSoldier\\\":{\\\"action\\\":\\\"sleep\\\",\\\"annotation\\\":{},\\\"apiVersion\\\":\\\"pincher.devtron.ai/v1alpha1\\\",\\\"enabled\\\":false,\\\"fieldSelector\\\":[\\\"AfterTime(AddTime(ParseTime({{metadata.creationTimestamp}}, '2006-01-02T15:04:05Z'), '5m'), Now())\\\"],\\\"labels\\\":{},\\\"targetReplicas\\\":[],\\\"timeRangesWithZone\\\":{\\\"timeRanges\\\":[],\\\"timeZone\\\":\\\"Asia/Kolkata\\\"},\\\"type\\\":\\\"Rollout\\\"}}" - ctx := context.Background() - request := chart.TemplateRequest{ChartRefId: 1} - wantErr := errors.New("error in getting refChart") - chartService.On("GetRefChart", request).Return(refChart, template, wantErr, version, myString) - _, gotErr := impl.GetManifest(ctx, 1, valuesYaml) - assert.Equal(t, gotErr, wantErr) - }) - - t.Run("TestManifestGeneration_Success", func(t *testing.T) { - impl, chartService, _, _, _, chartTemplateServiceImpl, helmAppService, helmAppClient := InitEventSimpleFactoryImpl(t) - valuesYaml := "{\\\"ContainerPort\\\":[{\\\"envoyPort\\\":6962,\\\"idleTimeout\\\":\\\"6969s\\\",\\\"name\\\":\\\"app\\\",\\\"port\\\":6969,\\\"servicePort\\\":69,\\\"supportStreaming\\\":false,\\\"useHTTP2\\\":false}],\\\"EnvVariables\\\":[],\\\"GracePeriod\\\":30,\\\"LivenessProbe\\\":{\\\"Path\\\":\\\"\\\",\\\"command\\\":[],\\\"failureThreshold\\\":3,\\\"httpHeaders\\\":[],\\\"initialDelaySeconds\\\":20,\\\"periodSeconds\\\":10,\\\"port\\\":6969,\\\"scheme\\\":\\\"\\\",\\\"successThreshold\\\":1,\\\"tcp\\\":false,\\\"timeoutSeconds\\\":5},\\\"MaxSurge\\\":1,\\\"MaxUnavailable\\\":0,\\\"MinReadySeconds\\\":60,\\\"ReadinessProbe\\\":{\\\"Path\\\":\\\"\\\",\\\"command\\\":[],\\\"failureThreshold\\\":3,\\\"httpHeaders\\\":[],\\\"initialDelaySeconds\\\":20,\\\"periodSeconds\\\":10,\\\"port\\\":6969,\\\"scheme\\\":\\\"\\\",\\\"successThreshold\\\":1,\\\"tcp\\\":false,\\\"timeoutSeconds\\\":5},\\\"Spec\\\":{\\\"Affinity\\\":{\\\"Key\\\":null,\\\"Values\\\":\\\"nodes\\\",\\\"key\\\":\\\"\\\"}},\\\"StartupProbe\\\":{\\\"Path\\\":\\\"\\\",\\\"command\\\":[],\\\"failureThreshold\\\":3,\\\"httpHeaders\\\":[],\\\"initialDelaySeconds\\\":20,\\\"periodSeconds\\\":10,\\\"port\\\":6969,\\\"successThreshold\\\":1,\\\"tcp\\\":false,\\\"timeoutSeconds\\\":5},\\\"ambassadorMapping\\\":{\\\"ambassadorId\\\":\\\"\\\",\\\"cors\\\":{},\\\"enabled\\\":false,\\\"hostname\\\":\\\"devtron.example.com\\\",\\\"labels\\\":{},\\\"prefix\\\":\\\"/\\\",\\\"retryPolicy\\\":{},\\\"rewrite\\\":\\\"\\\",\\\"tls\\\":{\\\"context\\\":\\\"\\\",\\\"create\\\":false,\\\"hosts\\\":[],\\\"secretName\\\":\\\"\\\"}},\\\"args\\\":{\\\"enabled\\\":false,\\\"value\\\":[\\\"/bin/sh\\\",\\\"-c\\\",\\\"touch /tmp/healthy; sleep 30; rm -rf /tmp/healthy; sleep 600\\\"]},\\\"autoscaling\\\":{\\\"MaxReplicas\\\":2,\\\"MinReplicas\\\":1,\\\"TargetCPUUtilizationPercentage\\\":90,\\\"TargetMemoryUtilizationPercentage\\\":69,\\\"annotations\\\":{},\\\"behavior\\\":{},\\\"enabled\\\":false,\\\"extraMetrics\\\":[],\\\"labels\\\":{}},\\\"command\\\":{\\\"enabled\\\":false,\\\"value\\\":[],\\\"workingDir\\\":{}},\\\"containerSecurityContext\\\":{},\\\"containerSpec\\\":{\\\"lifecycle\\\":{\\\"enabled\\\":false,\\\"postStart\\\":{\\\"httpGet\\\":{\\\"host\\\":\\\"example.com\\\",\\\"path\\\":\\\"/example\\\",\\\"port\\\":90}},\\\"preStop\\\":{\\\"exec\\\":{\\\"command\\\":[\\\"sleep\\\",\\\"10\\\"]}}}},\\\"containers\\\":[],\\\"dbMigrationConfig\\\":{\\\"enabled\\\":false},\\\"envoyproxy\\\":{\\\"configMapName\\\":\\\"\\\",\\\"image\\\":\\\"docker.io/envoyproxy/envoy:v1.16.0\\\",\\\"lifecycle\\\":{},\\\"resources\\\":{\\\"limits\\\":{\\\"cpu\\\":\\\"50m\\\",\\\"memory\\\":\\\"50Mi\\\"},\\\"requests\\\":{\\\"cpu\\\":\\\"50m\\\",\\\"memory\\\":\\\"50Mi\\\"}}},\\\"hostAliases\\\":[],\\\"image\\\":{\\\"pullPolicy\\\":\\\"IfNotPresent\\\"},\\\"imagePullSecrets\\\":[],\\\"ingress\\\":{\\\"annotations\\\":{},\\\"className\\\":\\\"\\\",\\\"enabled\\\":false,\\\"hosts\\\":[{\\\"host\\\":\\\"chart-example1.local\\\",\\\"pathType\\\":\\\"ImplementationSpecific\\\",\\\"paths\\\":[\\\"/example1\\\"]},{\\\"host\\\":\\\"chart-example2.local\\\",\\\"pathType\\\":\\\"ImplementationSpecific\\\",\\\"paths\\\":[\\\"/example2\\\",\\\"/example2/healthz\\\"]}],\\\"labels\\\":{},\\\"tls\\\":[]},\\\"ingressInternal\\\":{\\\"annotations\\\":{},\\\"className\\\":\\\"\\\",\\\"enabled\\\":false,\\\"hosts\\\":[{\\\"host\\\":\\\"chart-example1.internal\\\",\\\"pathType\\\":\\\"ImplementationSpecific\\\",\\\"paths\\\":[\\\"/example1\\\"]},{\\\"host\\\":\\\"chart-example2.internal\\\",\\\"pathType\\\":\\\"ImplementationSpecific\\\",\\\"paths\\\":[\\\"/example2\\\",\\\"/example2/healthz\\\"]}],\\\"tls\\\":[]},\\\"initContainers\\\":[],\\\"istio\\\":{\\\"authorizationPolicy\\\":{\\\"action\\\":null,\\\"annotations\\\":{},\\\"enabled\\\":false,\\\"labels\\\":{},\\\"provider\\\":{},\\\"rules\\\":[]},\\\"destinationRule\\\":{\\\"annotations\\\":{},\\\"enabled\\\":false,\\\"labels\\\":{},\\\"subsets\\\":[],\\\"trafficPolicy\\\":{}},\\\"enable\\\":false,\\\"gateway\\\":{\\\"annotations\\\":{},\\\"enabled\\\":false,\\\"host\\\":\\\"example.com\\\",\\\"labels\\\":{},\\\"tls\\\":{\\\"enabled\\\":false,\\\"secretName\\\":\\\"secret-name\\\"}},\\\"peerAuthentication\\\":{\\\"annotations\\\":{},\\\"enabled\\\":false,\\\"labels\\\":{},\\\"mtls\\\":{\\\"mode\\\":null},\\\"portLevelMtls\\\":{},\\\"selector\\\":{\\\"enabled\\\":false}},\\\"requestAuthentication\\\":{\\\"annotations\\\":{},\\\"enabled\\\":false,\\\"jwtRules\\\":[],\\\"labels\\\":{},\\\"selector\\\":{\\\"enabled\\\":false}},\\\"virtualService\\\":{\\\"annotations\\\":{},\\\"enabled\\\":false,\\\"gateways\\\":[],\\\"hosts\\\":[],\\\"http\\\":[],\\\"labels\\\":{}}},\\\"kedaAutoscaling\\\":{\\\"advanced\\\":{},\\\"authenticationRef\\\":{},\\\"enabled\\\":false,\\\"envSourceContainerName\\\":\\\"\\\",\\\"maxReplicaCount\\\":2,\\\"minReplicaCount\\\":1,\\\"triggerAuthentication\\\":{\\\"enabled\\\":false,\\\"name\\\":\\\"\\\",\\\"spec\\\":{}},\\\"triggers\\\":[]},\\\"networkPolicy\\\":{\\\"annotations\\\":{},\\\"egress\\\":[],\\\"enabled\\\":false,\\\"ingress\\\":[],\\\"labels\\\":{},\\\"podSelector\\\":{\\\"matchExpressions\\\":[],\\\"matchLabels\\\":{}},\\\"policyTypes\\\":[]},\\\"pauseForSecondsBeforeSwitchActive\\\":30,\\\"podAnnotations\\\":{},\\\"podDisruptionBudget\\\":{},\\\"podLabels\\\":{},\\\"podSecurityContext\\\":{},\\\"prometheus\\\":{\\\"release\\\":\\\"monitoring\\\"},\\\"rawYaml\\\":[],\\\"replicaCount\\\":1,\\\"resources\\\":{\\\"limits\\\":{\\\"cpu\\\":\\\"0.05\\\",\\\"memory\\\":\\\"50Mi\\\"},\\\"requests\\\":{\\\"cpu\\\":\\\"0.01\\\",\\\"memory\\\":\\\"10Mi\\\"}},\\\"restartPolicy\\\":\\\"Always\\\",\\\"rolloutAnnotations\\\":{},\\\"rolloutLabels\\\":{},\\\"secret\\\":{\\\"data\\\":{},\\\"enabled\\\":false},\\\"server\\\":{\\\"deployment\\\":{\\\"image\\\":\\\"\\\",\\\"image_tag\\\":\\\"1-95af053\\\"}},\\\"service\\\":{\\\"annotations\\\":{},\\\"loadBalancerSourceRanges\\\":[],\\\"type\\\":\\\"ClusterIP\\\"},\\\"serviceAccount\\\":{\\\"annotations\\\":{},\\\"create\\\":false,\\\"name\\\":\\\"\\\"},\\\"servicemonitor\\\":{\\\"additionalLabels\\\":{}},\\\"tolerations\\\":[],\\\"topologySpreadConstraints\\\":[],\\\"volumeMounts\\\":[],\\\"volumes\\\":[],\\\"waitForSecondsBeforeScalingDown\\\":30,\\\"winterSoldier\\\":{\\\"action\\\":\\\"sleep\\\",\\\"annotation\\\":{},\\\"apiVersion\\\":\\\"pincher.devtron.ai/v1alpha1\\\",\\\"enabled\\\":false,\\\"fieldSelector\\\":[\\\"AfterTime(AddTime(ParseTime({{metadata.creationTimestamp}}, '2006-01-02T15:04:05Z'), '5m'), Now())\\\"],\\\"labels\\\":{},\\\"targetReplicas\\\":[],\\\"timeRangesWithZone\\\":{\\\"timeRanges\\\":[],\\\"timeZone\\\":\\\"Asia/Kolkata\\\"},\\\"type\\\":\\\"Rollout\\\"}}" - ctx := context.Background() - request := chart.TemplateRequest{ChartRefId: 2} - var config *client.ClusterConfig - templateChartResponse := &client.TemplateChartResponse{ - GeneratedManifest: "test generated manifest", - } - var zipPath string - chartService.On("GetRefChart", request).Return(refChart, template, nil, version, myString) - chartTemplateServiceImpl.On("LoadChartInBytes", "refChart", false, "", "").Return(chartBytes, zipPath, nil) - helmAppService.On("GetClusterConf", 1).Return(config, nil) - helmAppClient.On("TemplateChart", ctx, mock.AnythingOfType("*client.InstallReleaseRequest")).Return(templateChartResponse, nil) - chartTemplateServiceImpl.On("CleanDir", zipPath) - got, _ := impl.GetManifest(ctx, 2, valuesYaml) - assert.Equal(t, *got.Manifest, templateChartResponse.GeneratedManifest) - }) - - t.Run("TestErrorInGetClusterConf", func(t *testing.T) { - impl, chartService, _, _, _, chartTemplateServiceImpl, helmAppService, _ := InitEventSimpleFactoryImpl(t) - valuesYaml := "{\\\"ContainerPort\\\":[{\\\"envoyPort\\\":6963,\\\"idleTimeout\\\":\\\"6969s\\\",\\\"name\\\":\\\"app\\\",\\\"port\\\":6969,\\\"servicePort\\\":69,\\\"supportStreaming\\\":false,\\\"useHTTP2\\\":false}],\\\"EnvVariables\\\":[],\\\"GracePeriod\\\":30,\\\"LivenessProbe\\\":{\\\"Path\\\":\\\"\\\",\\\"command\\\":[],\\\"failureThreshold\\\":3,\\\"httpHeaders\\\":[],\\\"initialDelaySeconds\\\":20,\\\"periodSeconds\\\":10,\\\"port\\\":6969,\\\"scheme\\\":\\\"\\\",\\\"successThreshold\\\":1,\\\"tcp\\\":false,\\\"timeoutSeconds\\\":5},\\\"MaxSurge\\\":1,\\\"MaxUnavailable\\\":0,\\\"MinReadySeconds\\\":60,\\\"ReadinessProbe\\\":{\\\"Path\\\":\\\"\\\",\\\"command\\\":[],\\\"failureThreshold\\\":3,\\\"httpHeaders\\\":[],\\\"initialDelaySeconds\\\":20,\\\"periodSeconds\\\":10,\\\"port\\\":6969,\\\"scheme\\\":\\\"\\\",\\\"successThreshold\\\":1,\\\"tcp\\\":false,\\\"timeoutSeconds\\\":5},\\\"Spec\\\":{\\\"Affinity\\\":{\\\"Key\\\":null,\\\"Values\\\":\\\"nodes\\\",\\\"key\\\":\\\"\\\"}},\\\"StartupProbe\\\":{\\\"Path\\\":\\\"\\\",\\\"command\\\":[],\\\"failureThreshold\\\":3,\\\"httpHeaders\\\":[],\\\"initialDelaySeconds\\\":20,\\\"periodSeconds\\\":10,\\\"port\\\":6969,\\\"successThreshold\\\":1,\\\"tcp\\\":false,\\\"timeoutSeconds\\\":5},\\\"ambassadorMapping\\\":{\\\"ambassadorId\\\":\\\"\\\",\\\"cors\\\":{},\\\"enabled\\\":false,\\\"hostname\\\":\\\"devtron.example.com\\\",\\\"labels\\\":{},\\\"prefix\\\":\\\"/\\\",\\\"retryPolicy\\\":{},\\\"rewrite\\\":\\\"\\\",\\\"tls\\\":{\\\"context\\\":\\\"\\\",\\\"create\\\":false,\\\"hosts\\\":[],\\\"secretName\\\":\\\"\\\"}},\\\"args\\\":{\\\"enabled\\\":false,\\\"value\\\":[\\\"/bin/sh\\\",\\\"-c\\\",\\\"touch /tmp/healthy; sleep 30; rm -rf /tmp/healthy; sleep 600\\\"]},\\\"autoscaling\\\":{\\\"MaxReplicas\\\":2,\\\"MinReplicas\\\":1,\\\"TargetCPUUtilizationPercentage\\\":90,\\\"TargetMemoryUtilizationPercentage\\\":69,\\\"annotations\\\":{},\\\"behavior\\\":{},\\\"enabled\\\":false,\\\"extraMetrics\\\":[],\\\"labels\\\":{}},\\\"command\\\":{\\\"enabled\\\":false,\\\"value\\\":[],\\\"workingDir\\\":{}},\\\"containerSecurityContext\\\":{},\\\"containerSpec\\\":{\\\"lifecycle\\\":{\\\"enabled\\\":false,\\\"postStart\\\":{\\\"httpGet\\\":{\\\"host\\\":\\\"example.com\\\",\\\"path\\\":\\\"/example\\\",\\\"port\\\":90}},\\\"preStop\\\":{\\\"exec\\\":{\\\"command\\\":[\\\"sleep\\\",\\\"10\\\"]}}}},\\\"containers\\\":[],\\\"dbMigrationConfig\\\":{\\\"enabled\\\":false},\\\"envoyproxy\\\":{\\\"configMapName\\\":\\\"\\\",\\\"image\\\":\\\"docker.io/envoyproxy/envoy:v1.16.0\\\",\\\"lifecycle\\\":{},\\\"resources\\\":{\\\"limits\\\":{\\\"cpu\\\":\\\"50m\\\",\\\"memory\\\":\\\"50Mi\\\"},\\\"requests\\\":{\\\"cpu\\\":\\\"50m\\\",\\\"memory\\\":\\\"50Mi\\\"}}},\\\"hostAliases\\\":[],\\\"image\\\":{\\\"pullPolicy\\\":\\\"IfNotPresent\\\"},\\\"imagePullSecrets\\\":[],\\\"ingress\\\":{\\\"annotations\\\":{},\\\"className\\\":\\\"\\\",\\\"enabled\\\":false,\\\"hosts\\\":[{\\\"host\\\":\\\"chart-example1.local\\\",\\\"pathType\\\":\\\"ImplementationSpecific\\\",\\\"paths\\\":[\\\"/example1\\\"]},{\\\"host\\\":\\\"chart-example2.local\\\",\\\"pathType\\\":\\\"ImplementationSpecific\\\",\\\"paths\\\":[\\\"/example2\\\",\\\"/example2/healthz\\\"]}],\\\"labels\\\":{},\\\"tls\\\":[]},\\\"ingressInternal\\\":{\\\"annotations\\\":{},\\\"className\\\":\\\"\\\",\\\"enabled\\\":false,\\\"hosts\\\":[{\\\"host\\\":\\\"chart-example1.internal\\\",\\\"pathType\\\":\\\"ImplementationSpecific\\\",\\\"paths\\\":[\\\"/example1\\\"]},{\\\"host\\\":\\\"chart-example2.internal\\\",\\\"pathType\\\":\\\"ImplementationSpecific\\\",\\\"paths\\\":[\\\"/example2\\\",\\\"/example2/healthz\\\"]}],\\\"tls\\\":[]},\\\"initContainers\\\":[],\\\"istio\\\":{\\\"authorizationPolicy\\\":{\\\"action\\\":null,\\\"annotations\\\":{},\\\"enabled\\\":false,\\\"labels\\\":{},\\\"provider\\\":{},\\\"rules\\\":[]},\\\"destinationRule\\\":{\\\"annotations\\\":{},\\\"enabled\\\":false,\\\"labels\\\":{},\\\"subsets\\\":[],\\\"trafficPolicy\\\":{}},\\\"enable\\\":false,\\\"gateway\\\":{\\\"annotations\\\":{},\\\"enabled\\\":false,\\\"host\\\":\\\"example.com\\\",\\\"labels\\\":{},\\\"tls\\\":{\\\"enabled\\\":false,\\\"secretName\\\":\\\"secret-name\\\"}},\\\"peerAuthentication\\\":{\\\"annotations\\\":{},\\\"enabled\\\":false,\\\"labels\\\":{},\\\"mtls\\\":{\\\"mode\\\":null},\\\"portLevelMtls\\\":{},\\\"selector\\\":{\\\"enabled\\\":false}},\\\"requestAuthentication\\\":{\\\"annotations\\\":{},\\\"enabled\\\":false,\\\"jwtRules\\\":[],\\\"labels\\\":{},\\\"selector\\\":{\\\"enabled\\\":false}},\\\"virtualService\\\":{\\\"annotations\\\":{},\\\"enabled\\\":false,\\\"gateways\\\":[],\\\"hosts\\\":[],\\\"http\\\":[],\\\"labels\\\":{}}},\\\"kedaAutoscaling\\\":{\\\"advanced\\\":{},\\\"authenticationRef\\\":{},\\\"enabled\\\":false,\\\"envSourceContainerName\\\":\\\"\\\",\\\"maxReplicaCount\\\":2,\\\"minReplicaCount\\\":1,\\\"triggerAuthentication\\\":{\\\"enabled\\\":false,\\\"name\\\":\\\"\\\",\\\"spec\\\":{}},\\\"triggers\\\":[]},\\\"networkPolicy\\\":{\\\"annotations\\\":{},\\\"egress\\\":[],\\\"enabled\\\":false,\\\"ingress\\\":[],\\\"labels\\\":{},\\\"podSelector\\\":{\\\"matchExpressions\\\":[],\\\"matchLabels\\\":{}},\\\"policyTypes\\\":[]},\\\"pauseForSecondsBeforeSwitchActive\\\":30,\\\"podAnnotations\\\":{},\\\"podDisruptionBudget\\\":{},\\\"podLabels\\\":{},\\\"podSecurityContext\\\":{},\\\"prometheus\\\":{\\\"release\\\":\\\"monitoring\\\"},\\\"rawYaml\\\":[],\\\"replicaCount\\\":1,\\\"resources\\\":{\\\"limits\\\":{\\\"cpu\\\":\\\"0.05\\\",\\\"memory\\\":\\\"50Mi\\\"},\\\"requests\\\":{\\\"cpu\\\":\\\"0.01\\\",\\\"memory\\\":\\\"10Mi\\\"}},\\\"restartPolicy\\\":\\\"Always\\\",\\\"rolloutAnnotations\\\":{},\\\"rolloutLabels\\\":{},\\\"secret\\\":{\\\"data\\\":{},\\\"enabled\\\":false},\\\"server\\\":{\\\"deployment\\\":{\\\"image\\\":\\\"\\\",\\\"image_tag\\\":\\\"1-95af053\\\"}},\\\"service\\\":{\\\"annotations\\\":{},\\\"loadBalancerSourceRanges\\\":[],\\\"type\\\":\\\"ClusterIP\\\"},\\\"serviceAccount\\\":{\\\"annotations\\\":{},\\\"create\\\":false,\\\"name\\\":\\\"\\\"},\\\"servicemonitor\\\":{\\\"additionalLabels\\\":{}},\\\"tolerations\\\":[],\\\"topologySpreadConstraints\\\":[],\\\"volumeMounts\\\":[],\\\"volumes\\\":[],\\\"waitForSecondsBeforeScalingDown\\\":30,\\\"winterSoldier\\\":{\\\"action\\\":\\\"sleep\\\",\\\"annotation\\\":{},\\\"apiVersion\\\":\\\"pincher.devtron.ai/v1alpha1\\\",\\\"enabled\\\":false,\\\"fieldSelector\\\":[\\\"AfterTime(AddTime(ParseTime({{metadata.creationTimestamp}}, '2006-01-02T15:04:05Z'), '5m'), Now())\\\"],\\\"labels\\\":{},\\\"targetReplicas\\\":[],\\\"timeRangesWithZone\\\":{\\\"timeRanges\\\":[],\\\"timeZone\\\":\\\"Asia/Kolkata\\\"},\\\"type\\\":\\\"Rollout\\\"}}" - ctx := context.Background() - request := chart.TemplateRequest{ChartRefId: 5} - wantErr1 := errors.New("error in fetching cluster detail") - var zipPath string - chartService.On("GetRefChart", request).Return("refChart5", "template5", nil, "version5", "myString5") - chartTemplateServiceImpl.On("LoadChartInBytes", "refChart5", false, "", "").Return(chartBytes, zipPath, nil) - helmAppService.On("GetClusterConf", 1).Return(nil, errors.New("error in fetching cluster detail")) - chartTemplateServiceImpl.On("CleanDir", zipPath) - _, gotErr1 := impl.GetManifest(ctx, 5, valuesYaml) - assert.Equal(t, gotErr1, wantErr1) - }) - - t.Run("TestErrorInTemplateChart", func(t *testing.T) { - impl, chartService, _, _, _, chartTemplateServiceImpl, helmAppService, helmAppClient := InitEventSimpleFactoryImpl(t) - valuesYaml := "{\\\"ContainerPort\\\":[{\\\"envoyPort\\\":6963,\\\"idleTimeout\\\":\\\"6969s\\\",\\\"name\\\":\\\"app\\\",\\\"port\\\":6969,\\\"servicePort\\\":69,\\\"supportStreaming\\\":false,\\\"useHTTP2\\\":false}],\\\"EnvVariables\\\":[],\\\"GracePeriod\\\":30,\\\"LivenessProbe\\\":{\\\"Path\\\":\\\"\\\",\\\"command\\\":[],\\\"failureThreshold\\\":3,\\\"httpHeaders\\\":[],\\\"initialDelaySeconds\\\":20,\\\"periodSeconds\\\":10,\\\"port\\\":6969,\\\"scheme\\\":\\\"\\\",\\\"successThreshold\\\":1,\\\"tcp\\\":false,\\\"timeoutSeconds\\\":5},\\\"MaxSurge\\\":1,\\\"MaxUnavailable\\\":0,\\\"MinReadySeconds\\\":60,\\\"ReadinessProbe\\\":{\\\"Path\\\":\\\"\\\",\\\"command\\\":[],\\\"failureThreshold\\\":3,\\\"httpHeaders\\\":[],\\\"initialDelaySeconds\\\":20,\\\"periodSeconds\\\":10,\\\"port\\\":6969,\\\"scheme\\\":\\\"\\\",\\\"successThreshold\\\":1,\\\"tcp\\\":false,\\\"timeoutSeconds\\\":5},\\\"Spec\\\":{\\\"Affinity\\\":{\\\"Key\\\":null,\\\"Values\\\":\\\"nodes\\\",\\\"key\\\":\\\"\\\"}},\\\"StartupProbe\\\":{\\\"Path\\\":\\\"\\\",\\\"command\\\":[],\\\"failureThreshold\\\":3,\\\"httpHeaders\\\":[],\\\"initialDelaySeconds\\\":20,\\\"periodSeconds\\\":10,\\\"port\\\":6969,\\\"successThreshold\\\":1,\\\"tcp\\\":false,\\\"timeoutSeconds\\\":5},\\\"ambassadorMapping\\\":{\\\"ambassadorId\\\":\\\"\\\",\\\"cors\\\":{},\\\"enabled\\\":false,\\\"hostname\\\":\\\"devtron.example.com\\\",\\\"labels\\\":{},\\\"prefix\\\":\\\"/\\\",\\\"retryPolicy\\\":{},\\\"rewrite\\\":\\\"\\\",\\\"tls\\\":{\\\"context\\\":\\\"\\\",\\\"create\\\":false,\\\"hosts\\\":[],\\\"secretName\\\":\\\"\\\"}},\\\"args\\\":{\\\"enabled\\\":false,\\\"value\\\":[\\\"/bin/sh\\\",\\\"-c\\\",\\\"touch /tmp/healthy; sleep 30; rm -rf /tmp/healthy; sleep 600\\\"]},\\\"autoscaling\\\":{\\\"MaxReplicas\\\":2,\\\"MinReplicas\\\":1,\\\"TargetCPUUtilizationPercentage\\\":90,\\\"TargetMemoryUtilizationPercentage\\\":69,\\\"annotations\\\":{},\\\"behavior\\\":{},\\\"enabled\\\":false,\\\"extraMetrics\\\":[],\\\"labels\\\":{}},\\\"command\\\":{\\\"enabled\\\":false,\\\"value\\\":[],\\\"workingDir\\\":{}},\\\"containerSecurityContext\\\":{},\\\"containerSpec\\\":{\\\"lifecycle\\\":{\\\"enabled\\\":false,\\\"postStart\\\":{\\\"httpGet\\\":{\\\"host\\\":\\\"example.com\\\",\\\"path\\\":\\\"/example\\\",\\\"port\\\":90}},\\\"preStop\\\":{\\\"exec\\\":{\\\"command\\\":[\\\"sleep\\\",\\\"10\\\"]}}}},\\\"containers\\\":[],\\\"dbMigrationConfig\\\":{\\\"enabled\\\":false},\\\"envoyproxy\\\":{\\\"configMapName\\\":\\\"\\\",\\\"image\\\":\\\"docker.io/envoyproxy/envoy:v1.16.0\\\",\\\"lifecycle\\\":{},\\\"resources\\\":{\\\"limits\\\":{\\\"cpu\\\":\\\"50m\\\",\\\"memory\\\":\\\"50Mi\\\"},\\\"requests\\\":{\\\"cpu\\\":\\\"50m\\\",\\\"memory\\\":\\\"50Mi\\\"}}},\\\"hostAliases\\\":[],\\\"image\\\":{\\\"pullPolicy\\\":\\\"IfNotPresent\\\"},\\\"imagePullSecrets\\\":[],\\\"ingress\\\":{\\\"annotations\\\":{},\\\"className\\\":\\\"\\\",\\\"enabled\\\":false,\\\"hosts\\\":[{\\\"host\\\":\\\"chart-example1.local\\\",\\\"pathType\\\":\\\"ImplementationSpecific\\\",\\\"paths\\\":[\\\"/example1\\\"]},{\\\"host\\\":\\\"chart-example2.local\\\",\\\"pathType\\\":\\\"ImplementationSpecific\\\",\\\"paths\\\":[\\\"/example2\\\",\\\"/example2/healthz\\\"]}],\\\"labels\\\":{},\\\"tls\\\":[]},\\\"ingressInternal\\\":{\\\"annotations\\\":{},\\\"className\\\":\\\"\\\",\\\"enabled\\\":false,\\\"hosts\\\":[{\\\"host\\\":\\\"chart-example1.internal\\\",\\\"pathType\\\":\\\"ImplementationSpecific\\\",\\\"paths\\\":[\\\"/example1\\\"]},{\\\"host\\\":\\\"chart-example2.internal\\\",\\\"pathType\\\":\\\"ImplementationSpecific\\\",\\\"paths\\\":[\\\"/example2\\\",\\\"/example2/healthz\\\"]}],\\\"tls\\\":[]},\\\"initContainers\\\":[],\\\"istio\\\":{\\\"authorizationPolicy\\\":{\\\"action\\\":null,\\\"annotations\\\":{},\\\"enabled\\\":false,\\\"labels\\\":{},\\\"provider\\\":{},\\\"rules\\\":[]},\\\"destinationRule\\\":{\\\"annotations\\\":{},\\\"enabled\\\":false,\\\"labels\\\":{},\\\"subsets\\\":[],\\\"trafficPolicy\\\":{}},\\\"enable\\\":false,\\\"gateway\\\":{\\\"annotations\\\":{},\\\"enabled\\\":false,\\\"host\\\":\\\"example.com\\\",\\\"labels\\\":{},\\\"tls\\\":{\\\"enabled\\\":false,\\\"secretName\\\":\\\"secret-name\\\"}},\\\"peerAuthentication\\\":{\\\"annotations\\\":{},\\\"enabled\\\":false,\\\"labels\\\":{},\\\"mtls\\\":{\\\"mode\\\":null},\\\"portLevelMtls\\\":{},\\\"selector\\\":{\\\"enabled\\\":false}},\\\"requestAuthentication\\\":{\\\"annotations\\\":{},\\\"enabled\\\":false,\\\"jwtRules\\\":[],\\\"labels\\\":{},\\\"selector\\\":{\\\"enabled\\\":false}},\\\"virtualService\\\":{\\\"annotations\\\":{},\\\"enabled\\\":false,\\\"gateways\\\":[],\\\"hosts\\\":[],\\\"http\\\":[],\\\"labels\\\":{}}},\\\"kedaAutoscaling\\\":{\\\"advanced\\\":{},\\\"authenticationRef\\\":{},\\\"enabled\\\":false,\\\"envSourceContainerName\\\":\\\"\\\",\\\"maxReplicaCount\\\":2,\\\"minReplicaCount\\\":1,\\\"triggerAuthentication\\\":{\\\"enabled\\\":false,\\\"name\\\":\\\"\\\",\\\"spec\\\":{}},\\\"triggers\\\":[]},\\\"networkPolicy\\\":{\\\"annotations\\\":{},\\\"egress\\\":[],\\\"enabled\\\":false,\\\"ingress\\\":[],\\\"labels\\\":{},\\\"podSelector\\\":{\\\"matchExpressions\\\":[],\\\"matchLabels\\\":{}},\\\"policyTypes\\\":[]},\\\"pauseForSecondsBeforeSwitchActive\\\":30,\\\"podAnnotations\\\":{},\\\"podDisruptionBudget\\\":{},\\\"podLabels\\\":{},\\\"podSecurityContext\\\":{},\\\"prometheus\\\":{\\\"release\\\":\\\"monitoring\\\"},\\\"rawYaml\\\":[],\\\"replicaCount\\\":1,\\\"resources\\\":{\\\"limits\\\":{\\\"cpu\\\":\\\"0.05\\\",\\\"memory\\\":\\\"50Mi\\\"},\\\"requests\\\":{\\\"cpu\\\":\\\"0.01\\\",\\\"memory\\\":\\\"10Mi\\\"}},\\\"restartPolicy\\\":\\\"Always\\\",\\\"rolloutAnnotations\\\":{},\\\"rolloutLabels\\\":{},\\\"secret\\\":{\\\"data\\\":{},\\\"enabled\\\":false},\\\"server\\\":{\\\"deployment\\\":{\\\"image\\\":\\\"\\\",\\\"image_tag\\\":\\\"1-95af053\\\"}},\\\"service\\\":{\\\"annotations\\\":{},\\\"loadBalancerSourceRanges\\\":[],\\\"type\\\":\\\"ClusterIP\\\"},\\\"serviceAccount\\\":{\\\"annotations\\\":{},\\\"create\\\":false,\\\"name\\\":\\\"\\\"},\\\"servicemonitor\\\":{\\\"additionalLabels\\\":{}},\\\"tolerations\\\":[],\\\"topologySpreadConstraints\\\":[],\\\"volumeMounts\\\":[],\\\"volumes\\\":[],\\\"waitForSecondsBeforeScalingDown\\\":30,\\\"winterSoldier\\\":{\\\"action\\\":\\\"sleep\\\",\\\"annotation\\\":{},\\\"apiVersion\\\":\\\"pincher.devtron.ai/v1alpha1\\\",\\\"enabled\\\":false,\\\"fieldSelector\\\":[\\\"AfterTime(AddTime(ParseTime({{metadata.creationTimestamp}}, '2006-01-02T15:04:05Z'), '5m'), Now())\\\"],\\\"labels\\\":{},\\\"targetReplicas\\\":[],\\\"timeRangesWithZone\\\":{\\\"timeRanges\\\":[],\\\"timeZone\\\":\\\"Asia/Kolkata\\\"},\\\"type\\\":\\\"Rollout\\\"}}" - ctx := context.Background() - request := chart.TemplateRequest{ChartRefId: 5} - var config *client.ClusterConfig - wantErr1 := errors.New("error in templating chart") - var zipPath string - chartService.On("GetRefChart", request).Return("refChart5", "template5", nil, "version5", "myString5") - chartTemplateServiceImpl.On("LoadChartInBytes", "refChart5", false, "", "").Return(chartBytes, zipPath, nil) - helmAppService.On("GetClusterConf", 1).Return(config, nil) - helmAppClient.On("TemplateChart", ctx, mock.AnythingOfType("*client.InstallReleaseRequest")).Return(nil, errors.New("error in templating chart")) - chartTemplateServiceImpl.On("CleanDir", zipPath) - _, gotErr1 := impl.GetManifest(ctx, 5, valuesYaml) - assert.Equal(t, gotErr1, wantErr1) - }) - - t.Run("TestErrorInLoadChartInBytes", func(t *testing.T) { - impl, chartService, _, _, _, chartTemplateServiceImpl, _, _ := InitEventSimpleFactoryImpl(t) - valuesYaml := "{\\\"ContainerPort\\\":[{\\\"envoyPort\\\":6964,\\\"idleTimeout\\\":\\\"6969s\\\",\\\"name\\\":\\\"app\\\",\\\"port\\\":6969,\\\"servicePort\\\":69,\\\"supportStreaming\\\":false,\\\"useHTTP2\\\":false}],\\\"EnvVariables\\\":[],\\\"GracePeriod\\\":30,\\\"LivenessProbe\\\":{\\\"Path\\\":\\\"\\\",\\\"command\\\":[],\\\"failureThreshold\\\":3,\\\"httpHeaders\\\":[],\\\"initialDelaySeconds\\\":20,\\\"periodSeconds\\\":10,\\\"port\\\":6969,\\\"scheme\\\":\\\"\\\",\\\"successThreshold\\\":1,\\\"tcp\\\":false,\\\"timeoutSeconds\\\":5},\\\"MaxSurge\\\":1,\\\"MaxUnavailable\\\":0,\\\"MinReadySeconds\\\":60,\\\"ReadinessProbe\\\":{\\\"Path\\\":\\\"\\\",\\\"command\\\":[],\\\"failureThreshold\\\":3,\\\"httpHeaders\\\":[],\\\"initialDelaySeconds\\\":20,\\\"periodSeconds\\\":10,\\\"port\\\":6969,\\\"scheme\\\":\\\"\\\",\\\"successThreshold\\\":1,\\\"tcp\\\":false,\\\"timeoutSeconds\\\":5},\\\"Spec\\\":{\\\"Affinity\\\":{\\\"Key\\\":null,\\\"Values\\\":\\\"nodes\\\",\\\"key\\\":\\\"\\\"}},\\\"StartupProbe\\\":{\\\"Path\\\":\\\"\\\",\\\"command\\\":[],\\\"failureThreshold\\\":3,\\\"httpHeaders\\\":[],\\\"initialDelaySeconds\\\":20,\\\"periodSeconds\\\":10,\\\"port\\\":6969,\\\"successThreshold\\\":1,\\\"tcp\\\":false,\\\"timeoutSeconds\\\":5},\\\"ambassadorMapping\\\":{\\\"ambassadorId\\\":\\\"\\\",\\\"cors\\\":{},\\\"enabled\\\":false,\\\"hostname\\\":\\\"devtron.example.com\\\",\\\"labels\\\":{},\\\"prefix\\\":\\\"/\\\",\\\"retryPolicy\\\":{},\\\"rewrite\\\":\\\"\\\",\\\"tls\\\":{\\\"context\\\":\\\"\\\",\\\"create\\\":false,\\\"hosts\\\":[],\\\"secretName\\\":\\\"\\\"}},\\\"args\\\":{\\\"enabled\\\":false,\\\"value\\\":[\\\"/bin/sh\\\",\\\"-c\\\",\\\"touch /tmp/healthy; sleep 30; rm -rf /tmp/healthy; sleep 600\\\"]},\\\"autoscaling\\\":{\\\"MaxReplicas\\\":2,\\\"MinReplicas\\\":1,\\\"TargetCPUUtilizationPercentage\\\":90,\\\"TargetMemoryUtilizationPercentage\\\":69,\\\"annotations\\\":{},\\\"behavior\\\":{},\\\"enabled\\\":false,\\\"extraMetrics\\\":[],\\\"labels\\\":{}},\\\"command\\\":{\\\"enabled\\\":false,\\\"value\\\":[],\\\"workingDir\\\":{}},\\\"containerSecurityContext\\\":{},\\\"containerSpec\\\":{\\\"lifecycle\\\":{\\\"enabled\\\":false,\\\"postStart\\\":{\\\"httpGet\\\":{\\\"host\\\":\\\"example.com\\\",\\\"path\\\":\\\"/example\\\",\\\"port\\\":90}},\\\"preStop\\\":{\\\"exec\\\":{\\\"command\\\":[\\\"sleep\\\",\\\"10\\\"]}}}},\\\"containers\\\":[],\\\"dbMigrationConfig\\\":{\\\"enabled\\\":false},\\\"envoyproxy\\\":{\\\"configMapName\\\":\\\"\\\",\\\"image\\\":\\\"docker.io/envoyproxy/envoy:v1.16.0\\\",\\\"lifecycle\\\":{},\\\"resources\\\":{\\\"limits\\\":{\\\"cpu\\\":\\\"50m\\\",\\\"memory\\\":\\\"50Mi\\\"},\\\"requests\\\":{\\\"cpu\\\":\\\"50m\\\",\\\"memory\\\":\\\"50Mi\\\"}}},\\\"hostAliases\\\":[],\\\"image\\\":{\\\"pullPolicy\\\":\\\"IfNotPresent\\\"},\\\"imagePullSecrets\\\":[],\\\"ingress\\\":{\\\"annotations\\\":{},\\\"className\\\":\\\"\\\",\\\"enabled\\\":false,\\\"hosts\\\":[{\\\"host\\\":\\\"chart-example1.local\\\",\\\"pathType\\\":\\\"ImplementationSpecific\\\",\\\"paths\\\":[\\\"/example1\\\"]},{\\\"host\\\":\\\"chart-example2.local\\\",\\\"pathType\\\":\\\"ImplementationSpecific\\\",\\\"paths\\\":[\\\"/example2\\\",\\\"/example2/healthz\\\"]}],\\\"labels\\\":{},\\\"tls\\\":[]},\\\"ingressInternal\\\":{\\\"annotations\\\":{},\\\"className\\\":\\\"\\\",\\\"enabled\\\":false,\\\"hosts\\\":[{\\\"host\\\":\\\"chart-example1.internal\\\",\\\"pathType\\\":\\\"ImplementationSpecific\\\",\\\"paths\\\":[\\\"/example1\\\"]},{\\\"host\\\":\\\"chart-example2.internal\\\",\\\"pathType\\\":\\\"ImplementationSpecific\\\",\\\"paths\\\":[\\\"/example2\\\",\\\"/example2/healthz\\\"]}],\\\"tls\\\":[]},\\\"initContainers\\\":[],\\\"istio\\\":{\\\"authorizationPolicy\\\":{\\\"action\\\":null,\\\"annotations\\\":{},\\\"enabled\\\":false,\\\"labels\\\":{},\\\"provider\\\":{},\\\"rules\\\":[]},\\\"destinationRule\\\":{\\\"annotations\\\":{},\\\"enabled\\\":false,\\\"labels\\\":{},\\\"subsets\\\":[],\\\"trafficPolicy\\\":{}},\\\"enable\\\":false,\\\"gateway\\\":{\\\"annotations\\\":{},\\\"enabled\\\":false,\\\"host\\\":\\\"example.com\\\",\\\"labels\\\":{},\\\"tls\\\":{\\\"enabled\\\":false,\\\"secretName\\\":\\\"secret-name\\\"}},\\\"peerAuthentication\\\":{\\\"annotations\\\":{},\\\"enabled\\\":false,\\\"labels\\\":{},\\\"mtls\\\":{\\\"mode\\\":null},\\\"portLevelMtls\\\":{},\\\"selector\\\":{\\\"enabled\\\":false}},\\\"requestAuthentication\\\":{\\\"annotations\\\":{},\\\"enabled\\\":false,\\\"jwtRules\\\":[],\\\"labels\\\":{},\\\"selector\\\":{\\\"enabled\\\":false}},\\\"virtualService\\\":{\\\"annotations\\\":{},\\\"enabled\\\":false,\\\"gateways\\\":[],\\\"hosts\\\":[],\\\"http\\\":[],\\\"labels\\\":{}}},\\\"kedaAutoscaling\\\":{\\\"advanced\\\":{},\\\"authenticationRef\\\":{},\\\"enabled\\\":false,\\\"envSourceContainerName\\\":\\\"\\\",\\\"maxReplicaCount\\\":2,\\\"minReplicaCount\\\":1,\\\"triggerAuthentication\\\":{\\\"enabled\\\":false,\\\"name\\\":\\\"\\\",\\\"spec\\\":{}},\\\"triggers\\\":[]},\\\"networkPolicy\\\":{\\\"annotations\\\":{},\\\"egress\\\":[],\\\"enabled\\\":false,\\\"ingress\\\":[],\\\"labels\\\":{},\\\"podSelector\\\":{\\\"matchExpressions\\\":[],\\\"matchLabels\\\":{}},\\\"policyTypes\\\":[]},\\\"pauseForSecondsBeforeSwitchActive\\\":30,\\\"podAnnotations\\\":{},\\\"podDisruptionBudget\\\":{},\\\"podLabels\\\":{},\\\"podSecurityContext\\\":{},\\\"prometheus\\\":{\\\"release\\\":\\\"monitoring\\\"},\\\"rawYaml\\\":[],\\\"replicaCount\\\":1,\\\"resources\\\":{\\\"limits\\\":{\\\"cpu\\\":\\\"0.05\\\",\\\"memory\\\":\\\"50Mi\\\"},\\\"requests\\\":{\\\"cpu\\\":\\\"0.01\\\",\\\"memory\\\":\\\"10Mi\\\"}},\\\"restartPolicy\\\":\\\"Always\\\",\\\"rolloutAnnotations\\\":{},\\\"rolloutLabels\\\":{},\\\"secret\\\":{\\\"data\\\":{},\\\"enabled\\\":false},\\\"server\\\":{\\\"deployment\\\":{\\\"image\\\":\\\"\\\",\\\"image_tag\\\":\\\"1-95af053\\\"}},\\\"service\\\":{\\\"annotations\\\":{},\\\"loadBalancerSourceRanges\\\":[],\\\"type\\\":\\\"ClusterIP\\\"},\\\"serviceAccount\\\":{\\\"annotations\\\":{},\\\"create\\\":false,\\\"name\\\":\\\"\\\"},\\\"servicemonitor\\\":{\\\"additionalLabels\\\":{}},\\\"tolerations\\\":[],\\\"topologySpreadConstraints\\\":[],\\\"volumeMounts\\\":[],\\\"volumes\\\":[],\\\"waitForSecondsBeforeScalingDown\\\":30,\\\"winterSoldier\\\":{\\\"action\\\":\\\"sleep\\\",\\\"annotation\\\":{},\\\"apiVersion\\\":\\\"pincher.devtron.ai/v1alpha1\\\",\\\"enabled\\\":false,\\\"fieldSelector\\\":[\\\"AfterTime(AddTime(ParseTime({{metadata.creationTimestamp}}, '2006-01-02T15:04:05Z'), '5m'), Now())\\\"],\\\"labels\\\":{},\\\"targetReplicas\\\":[],\\\"timeRangesWithZone\\\":{\\\"timeRanges\\\":[],\\\"timeZone\\\":\\\"Asia/Kolkata\\\"},\\\"type\\\":\\\"Rollout\\\"}}" - ctx := context.Background() - request := chart.TemplateRequest{ChartRefId: 3} - wantErr := errors.New("error in getting chart") - var zipPath string - chartService.On("GetRefChart", request).Return("refChart1", "template1", nil, "version1", "myString1") - chartTemplateServiceImpl.On("LoadChartInBytes", "refChart1", false, "", "").Return(chartBytes, zipPath, errors.New("error in getting chart")) - _, err := impl.GetManifest(ctx, 3, valuesYaml) - assert.Equal(t, err, wantErr) - }) -} - -func InitEventSimpleFactoryImpl(t *testing.T) (*DeploymentTemplateServiceImpl, *mocks.ChartService, *mocks2.AppListingService, *mocks3.DeploymentTemplateRepository, *mocks5.ChartRepository, *mocks6.ChartTemplateService, *mocks4.HelmAppService, *mocks4.HelmAppClient) { - logger, _ := util.NewSugardLogger() - chartService := mocks.NewChartService(t) - appListingService := mocks2.NewAppListingService(t) - appListingRepository := mocks3.NewAppListingRepository(t) - deploymentTemplateRepository := mocks3.NewDeploymentTemplateRepository(t) - helmAppService := mocks4.NewHelmAppService(t) - chartRepository := mocks5.NewChartRepository(t) - chartTemplateServiceImpl := mocks6.NewChartTemplateService(t) - helmAppClient := mocks4.NewHelmAppClient(t) - var k8sUtil *k8s.K8sUtil - if K8sUtilObj != nil { - k8sUtil = K8sUtilObj - } else { - config := &client2.RuntimeConfig{LocalDevMode: true} - k8sUtil = k8s.NewK8sUtil(logger, config) - K8sUtilObj = k8sUtil - } - impl := NewDeploymentTemplateServiceImpl(logger, chartService, appListingService, appListingRepository, deploymentTemplateRepository, helmAppService, chartRepository, chartTemplateServiceImpl, helmAppClient, k8sUtil) - return impl, chartService, appListingService, deploymentTemplateRepository, chartRepository, chartTemplateServiceImpl, helmAppService, helmAppClient -} +// +//import ( +// "context" +// "errors" +// client2 "github.com/devtron-labs/authenticator/client" +// k8s2 "github.com/devtron-labs/common-lib/utils/k8s" +// "github.com/devtron-labs/devtron/api/bean" +// client "github.com/devtron-labs/devtron/api/helm-app" +// mocks4 "github.com/devtron-labs/devtron/api/helm-app/mocks" +// "github.com/devtron-labs/devtron/internal/sql/repository" +// mocks3 "github.com/devtron-labs/devtron/internal/sql/repository/mocks" +// "github.com/devtron-labs/devtron/internal/util" +// mocks6 "github.com/devtron-labs/devtron/internal/util/mocks" +// mocks2 "github.com/devtron-labs/devtron/pkg/app/mocks" +// "github.com/devtron-labs/devtron/pkg/chart" +// "github.com/devtron-labs/devtron/pkg/chart/mocks" +// chartRepoRepository "github.com/devtron-labs/devtron/pkg/chartRepo/repository" +// mocks5 "github.com/devtron-labs/devtron/pkg/chartRepo/repository/mocks" +// "github.com/stretchr/testify/assert" +// "github.com/stretchr/testify/mock" +// "testing" +//) +// +//var K8sUtilObj *k8s2.K8sUtil +// +//func TestDeploymentTemplateServiceImpl_FetchDeploymentsWithChartRefs(t *testing.T) { +// defaultVersions := &chart.ChartRefResponse{ +// ChartRefs: []chart.ChartRef{ +// { +// Id: 1, +// Version: "v1.0.1", +// Name: "Deployment", +// Description: "This is a deployment chart", +// UserUploaded: false, +// IsAppMetricsSupported: false, +// }, +// { +// Id: 2, +// Version: "v1.0.2", +// Name: "Deployment", +// Description: "This is a deployment chart", +// UserUploaded: false, +// IsAppMetricsSupported: false, +// }, +// { +// Id: 3, +// Version: "v1.0.3", +// Name: "Deployment", +// Description: "This is a deployment chart", +// UserUploaded: false, +// IsAppMetricsSupported: false, +// }, +// }, +// LatestAppChartRef: 2, +// LatestEnvChartRef: 2, +// } +// publishedOnEnvs := []*bean.Environment{ +// { +// ChartRefId: 2, +// EnvironmentId: 1, +// EnvironmentName: "devtron-demo", +// }, +// } +// +// deployedOnEnv := []*repository.DeploymentTemplateComparisonMetadata{ +// { +// ChartId: 1, +// ChartVersion: "4.18.1", +// EnvironmentId: 1, +// //PipelineConfigOverrideId: 5, +// //StartedOn: 2023-08-26T16:36:55.732551Z, +// //FinishedOn: 2023-08-26T16:40:00.174576Z, +// Status: "Succeeded", +// }, +// { +// ChartId: 1, +// ChartVersion: "4.18.1", +// EnvironmentId: 1, +// //PipelineConfigOverrideId: 5, +// //StartedOn: 2023-08-26T16:36:55.732551Z, +// //FinishedOn: 2023-08-26T16:40:00.174576Z, +// Status: "Succeeded", +// }, +// { +// ChartId: 1, +// ChartVersion: "4.18.1", +// EnvironmentId: 1, +// //PipelineConfigOverrideId: 5, +// //StartedOn: 2023-08-26T16:36:55.732551Z, +// //FinishedOn: 2023-08-26T16:40:00.174576Z, +// Status: "Succeeded", +// }, +// } +// +// deployedOnOtherEnvs := []*repository.DeploymentTemplateComparisonMetadata{ +// { +// ChartId: 1, +// ChartVersion: "4.18.1", +// EnvironmentId: 2, +// //PipelineConfigOverrideId: 9, +// }, +// } +// +// type args struct { +// appId int +// envId int +// } +// tests := []struct { +// name string +// args args +// want []*repository.DeploymentTemplateComparisonMetadata +// wantErr error +// }{ +// +// { +// name: "test for successfully fetching the list", +// args: args{ +// appId: 1, +// envId: 1, +// }, +// want: []*repository.DeploymentTemplateComparisonMetadata{ +// { +// ChartId: 1, +// ChartVersion: "v1.0.1", +// ChartType: "Deployment", +// EnvironmentId: 0, +// EnvironmentName: "", +// //PipelineConfigOverrideId: 0, +// StartedOn: nil, +// FinishedOn: nil, +// Status: "", +// Type: 1, +// }, +// { +// ChartId: 2, +// ChartVersion: "v1.0.2", +// ChartType: "Deployment", +// EnvironmentId: 0, +// EnvironmentName: "", +// //PipelineConfigOverrideId: 0, +// StartedOn: nil, +// FinishedOn: nil, +// Status: "", +// Type: 1, +// }, { +// ChartId: 3, +// ChartVersion: "v1.0.3", +// ChartType: "Deployment", +// EnvironmentId: 0, +// EnvironmentName: "", +// //PipelineConfigOverrideId: 0, +// StartedOn: nil, +// FinishedOn: nil, +// Status: "", +// Type: 1, +// }, { +// ChartId: 2, +// ChartVersion: "", +// ChartType: "", +// EnvironmentId: 1, +// EnvironmentName: "devtron-demo", +// //PipelineConfigOverrideId: 0, +// StartedOn: nil, +// FinishedOn: nil, +// Status: "", +// Type: 2, +// }, { +// ChartId: 1, +// ChartVersion: "4.18.1", +// ChartType: "", +// EnvironmentId: 1, +// EnvironmentName: "", +// //PipelineConfigOverrideId: 5, +// StartedOn: nil, +// FinishedOn: nil, +// Status: "Succeeded", +// Type: 3, +// }, { +// ChartId: 1, +// ChartVersion: "4.18.1", +// ChartType: "", +// EnvironmentId: 1, +// EnvironmentName: "", +// //PipelineConfigOverrideId: 5, +// StartedOn: nil, +// FinishedOn: nil, +// Status: "Succeeded", +// Type: 3, +// }, { +// ChartId: 1, +// ChartVersion: "4.18.1", +// ChartType: "", +// EnvironmentId: 1, +// EnvironmentName: "", +// //PipelineConfigOverrideId: 5, +// StartedOn: nil, +// FinishedOn: nil, +// Status: "Succeeded", +// Type: 3, +// }, { +// ChartId: 1, +// ChartVersion: "v1.0.1", +// ChartType: "Deployment", +// EnvironmentId: 2, +// EnvironmentName: "", +// //PipelineConfigOverrideId: 9, +// StartedOn: nil, +// FinishedOn: nil, +// Status: "", +// Type: 4, +// }, +// }, +// }, +// { +// name: "test for error in chart", +// args: args{ +// appId: 1, +// envId: 1, +// }, +// wantErr: errors.New("error in getting defaultVersions"), +// }, +// { +// name: "test for error in publishedOnEnvs", +// args: args{ +// appId: 1, +// envId: 1, +// }, +// wantErr: errors.New("error in getting publishedOnEnvs"), +// }, +// { +// name: "test for error in deployedOnEnv", +// args: args{ +// appId: 1, +// envId: 1, +// }, +// wantErr: errors.New("error in getting deployedOnEnv"), +// }, +// { +// name: "test for error in deployedOnOtherEnvs", +// args: args{ +// appId: 1, +// envId: 1, +// }, +// wantErr: errors.New("error in getting deployedOnOtherEnvs"), +// }, +// } +// for _, tt := range tests { +// t.Run(tt.name, func(t *testing.T) { +// impl, chartService, appListingService, deploymentTemplateRepository, _, _, _, _ := InitEventSimpleFactoryImpl(t) +// +// if tt.name == "test for successfully fetching the list" { +// chartService.On("ChartRefAutocompleteForAppOrEnv", tt.args.appId, 0).Return(defaultVersions, nil) +// appListingService.On("FetchMinDetailOtherEnvironment", tt.args.appId).Return(publishedOnEnvs, nil) +// deploymentTemplateRepository.On("FetchDeploymentHistoryWithChartRefs", tt.args.appId, tt.args.envId).Return(deployedOnEnv, nil) +// deploymentTemplateRepository.On("FetchLatestDeploymentWithChartRefs", tt.args.appId, tt.args.envId).Return(deployedOnOtherEnvs, nil) +// } +// +// if tt.name == "test for error in chart" { +// chartService.On("ChartRefAutocompleteForAppOrEnv", tt.args.appId, 0).Return(nil, errors.New("error in getting defaultVersions")) +// } +// +// if tt.name == "test for error in publishedOnEnvs" { +// chartService.On("ChartRefAutocompleteForAppOrEnv", tt.args.appId, 0).Return(defaultVersions, nil) +// appListingService.On("FetchMinDetailOtherEnvironment", tt.args.appId).Return(nil, errors.New("error in getting publishedOnEnvs")) +// } +// +// if tt.name == "test for error in deployedOnEnv" { +// chartService.On("ChartRefAutocompleteForAppOrEnv", tt.args.appId, 0).Return(defaultVersions, nil) +// appListingService.On("FetchMinDetailOtherEnvironment", tt.args.appId).Return(publishedOnEnvs, nil) +// deploymentTemplateRepository.On("FetchDeploymentHistoryWithChartRefs", tt.args.appId, tt.args.envId).Return(nil, errors.New("error in getting deployedOnEnv")) +// } +// +// if tt.name == "test for error in deployedOnOtherEnvs" { +// chartService.On("ChartRefAutocompleteForAppOrEnv", tt.args.appId, 0).Return(defaultVersions, nil) +// appListingService.On("FetchMinDetailOtherEnvironment", tt.args.appId).Return(publishedOnEnvs, nil) +// deploymentTemplateRepository.On("FetchDeploymentHistoryWithChartRefs", tt.args.appId, tt.args.envId).Return(deployedOnEnv, nil) +// deploymentTemplateRepository.On("FetchLatestDeploymentWithChartRefs", tt.args.appId, tt.args.envId).Return(deployedOnOtherEnvs, errors.New("error in getting deployedOnOtherEnvs")) +// } +// +// got, err := impl.FetchDeploymentsWithChartRefs(tt.args.appId, tt.args.envId) +// +// assert.Equal(t, err, tt.wantErr) +// +// assert.Equal(t, len(got), len(tt.want)) +// }) +// } +//} +// +//func TestDeploymentTemplateServiceImpl_GetDeploymentTemplate(t *testing.T) { +// +// var myMap = make(map[string]interface{}) +// myString := "{\\\"ContainerPort\\\":[{\\\"envoyPort\\\":6969,\\\"idleTimeout\\\":\\\"6969s\\\",\\\"name\\\":\\\"app\\\",\\\"port\\\":6969,\\\"servicePort\\\":69,\\\"supportStreaming\\\":false,\\\"useHTTP2\\\":false}],\\\"EnvVariables\\\":[],\\\"GracePeriod\\\":30,\\\"LivenessProbe\\\":{\\\"Path\\\":\\\"\\\",\\\"command\\\":[],\\\"failureThreshold\\\":3,\\\"httpHeaders\\\":[],\\\"initialDelaySeconds\\\":20,\\\"periodSeconds\\\":10,\\\"port\\\":6969,\\\"scheme\\\":\\\"\\\",\\\"successThreshold\\\":1,\\\"tcp\\\":false,\\\"timeoutSeconds\\\":5},\\\"MaxSurge\\\":1,\\\"MaxUnavailable\\\":0,\\\"MinReadySeconds\\\":60,\\\"ReadinessProbe\\\":{\\\"Path\\\":\\\"\\\",\\\"command\\\":[],\\\"failureThreshold\\\":3,\\\"httpHeaders\\\":[],\\\"initialDelaySeconds\\\":20,\\\"periodSeconds\\\":10,\\\"port\\\":6969,\\\"scheme\\\":\\\"\\\",\\\"successThreshold\\\":1,\\\"tcp\\\":false,\\\"timeoutSeconds\\\":5},\\\"Spec\\\":{\\\"Affinity\\\":{\\\"Key\\\":null,\\\"Values\\\":\\\"nodes\\\",\\\"key\\\":\\\"\\\"}},\\\"StartupProbe\\\":{\\\"Path\\\":\\\"\\\",\\\"command\\\":[],\\\"failureThreshold\\\":3,\\\"httpHeaders\\\":[],\\\"initialDelaySeconds\\\":20,\\\"periodSeconds\\\":10,\\\"port\\\":6969,\\\"successThreshold\\\":1,\\\"tcp\\\":false,\\\"timeoutSeconds\\\":5},\\\"ambassadorMapping\\\":{\\\"ambassadorId\\\":\\\"\\\",\\\"cors\\\":{},\\\"enabled\\\":false,\\\"hostname\\\":\\\"devtron.example.com\\\",\\\"labels\\\":{},\\\"prefix\\\":\\\"/\\\",\\\"retryPolicy\\\":{},\\\"rewrite\\\":\\\"\\\",\\\"tls\\\":{\\\"context\\\":\\\"\\\",\\\"create\\\":false,\\\"hosts\\\":[],\\\"secretName\\\":\\\"\\\"}},\\\"args\\\":{\\\"enabled\\\":false,\\\"value\\\":[\\\"/bin/sh\\\",\\\"-c\\\",\\\"touch /tmp/healthy; sleep 30; rm -rf /tmp/healthy; sleep 600\\\"]},\\\"autoscaling\\\":{\\\"MaxReplicas\\\":2,\\\"MinReplicas\\\":1,\\\"TargetCPUUtilizationPercentage\\\":90,\\\"TargetMemoryUtilizationPercentage\\\":69,\\\"annotations\\\":{},\\\"behavior\\\":{},\\\"enabled\\\":false,\\\"extraMetrics\\\":[],\\\"labels\\\":{}},\\\"command\\\":{\\\"enabled\\\":false,\\\"value\\\":[],\\\"workingDir\\\":{}},\\\"containerSecurityContext\\\":{},\\\"containerSpec\\\":{\\\"lifecycle\\\":{\\\"enabled\\\":false,\\\"postStart\\\":{\\\"httpGet\\\":{\\\"host\\\":\\\"example.com\\\",\\\"path\\\":\\\"/example\\\",\\\"port\\\":90}},\\\"preStop\\\":{\\\"exec\\\":{\\\"command\\\":[\\\"sleep\\\",\\\"10\\\"]}}}},\\\"containers\\\":[],\\\"dbMigrationConfig\\\":{\\\"enabled\\\":false},\\\"envoyproxy\\\":{\\\"configMapName\\\":\\\"\\\",\\\"image\\\":\\\"docker.io/envoyproxy/envoy:v1.16.0\\\",\\\"lifecycle\\\":{},\\\"resources\\\":{\\\"limits\\\":{\\\"cpu\\\":\\\"50m\\\",\\\"memory\\\":\\\"50Mi\\\"},\\\"requests\\\":{\\\"cpu\\\":\\\"50m\\\",\\\"memory\\\":\\\"50Mi\\\"}}},\\\"hostAliases\\\":[],\\\"image\\\":{\\\"pullPolicy\\\":\\\"IfNotPresent\\\"},\\\"imagePullSecrets\\\":[],\\\"ingress\\\":{\\\"annotations\\\":{},\\\"className\\\":\\\"\\\",\\\"enabled\\\":false,\\\"hosts\\\":[{\\\"host\\\":\\\"chart-example1.local\\\",\\\"pathType\\\":\\\"ImplementationSpecific\\\",\\\"paths\\\":[\\\"/example1\\\"]},{\\\"host\\\":\\\"chart-example2.local\\\",\\\"pathType\\\":\\\"ImplementationSpecific\\\",\\\"paths\\\":[\\\"/example2\\\",\\\"/example2/healthz\\\"]}],\\\"labels\\\":{},\\\"tls\\\":[]},\\\"ingressInternal\\\":{\\\"annotations\\\":{},\\\"className\\\":\\\"\\\",\\\"enabled\\\":false,\\\"hosts\\\":[{\\\"host\\\":\\\"chart-example1.internal\\\",\\\"pathType\\\":\\\"ImplementationSpecific\\\",\\\"paths\\\":[\\\"/example1\\\"]},{\\\"host\\\":\\\"chart-example2.internal\\\",\\\"pathType\\\":\\\"ImplementationSpecific\\\",\\\"paths\\\":[\\\"/example2\\\",\\\"/example2/healthz\\\"]}],\\\"tls\\\":[]},\\\"initContainers\\\":[],\\\"istio\\\":{\\\"authorizationPolicy\\\":{\\\"action\\\":null,\\\"annotations\\\":{},\\\"enabled\\\":false,\\\"labels\\\":{},\\\"provider\\\":{},\\\"rules\\\":[]},\\\"destinationRule\\\":{\\\"annotations\\\":{},\\\"enabled\\\":false,\\\"labels\\\":{},\\\"subsets\\\":[],\\\"trafficPolicy\\\":{}},\\\"enable\\\":false,\\\"gateway\\\":{\\\"annotations\\\":{},\\\"enabled\\\":false,\\\"host\\\":\\\"example.com\\\",\\\"labels\\\":{},\\\"tls\\\":{\\\"enabled\\\":false,\\\"secretName\\\":\\\"secret-name\\\"}},\\\"peerAuthentication\\\":{\\\"annotations\\\":{},\\\"enabled\\\":false,\\\"labels\\\":{},\\\"mtls\\\":{\\\"mode\\\":null},\\\"portLevelMtls\\\":{},\\\"selector\\\":{\\\"enabled\\\":false}},\\\"requestAuthentication\\\":{\\\"annotations\\\":{},\\\"enabled\\\":false,\\\"jwtRules\\\":[],\\\"labels\\\":{},\\\"selector\\\":{\\\"enabled\\\":false}},\\\"virtualService\\\":{\\\"annotations\\\":{},\\\"enabled\\\":false,\\\"gateways\\\":[],\\\"hosts\\\":[],\\\"http\\\":[],\\\"labels\\\":{}}},\\\"kedaAutoscaling\\\":{\\\"advanced\\\":{},\\\"authenticationRef\\\":{},\\\"enabled\\\":false,\\\"envSourceContainerName\\\":\\\"\\\",\\\"maxReplicaCount\\\":2,\\\"minReplicaCount\\\":1,\\\"triggerAuthentication\\\":{\\\"enabled\\\":false,\\\"name\\\":\\\"\\\",\\\"spec\\\":{}},\\\"triggers\\\":[]},\\\"networkPolicy\\\":{\\\"annotations\\\":{},\\\"egress\\\":[],\\\"enabled\\\":false,\\\"ingress\\\":[],\\\"labels\\\":{},\\\"podSelector\\\":{\\\"matchExpressions\\\":[],\\\"matchLabels\\\":{}},\\\"policyTypes\\\":[]},\\\"pauseForSecondsBeforeSwitchActive\\\":30,\\\"podAnnotations\\\":{},\\\"podDisruptionBudget\\\":{},\\\"podLabels\\\":{},\\\"podSecurityContext\\\":{},\\\"prometheus\\\":{\\\"release\\\":\\\"monitoring\\\"},\\\"rawYaml\\\":[],\\\"replicaCount\\\":1,\\\"resources\\\":{\\\"limits\\\":{\\\"cpu\\\":\\\"0.05\\\",\\\"memory\\\":\\\"50Mi\\\"},\\\"requests\\\":{\\\"cpu\\\":\\\"0.01\\\",\\\"memory\\\":\\\"10Mi\\\"}},\\\"restartPolicy\\\":\\\"Always\\\",\\\"rolloutAnnotations\\\":{},\\\"rolloutLabels\\\":{},\\\"secret\\\":{\\\"data\\\":{},\\\"enabled\\\":false},\\\"server\\\":{\\\"deployment\\\":{\\\"image\\\":\\\"\\\",\\\"image_tag\\\":\\\"1-95af053\\\"}},\\\"service\\\":{\\\"annotations\\\":{},\\\"loadBalancerSourceRanges\\\":[],\\\"type\\\":\\\"ClusterIP\\\"},\\\"serviceAccount\\\":{\\\"annotations\\\":{},\\\"create\\\":false,\\\"name\\\":\\\"\\\"},\\\"servicemonitor\\\":{\\\"additionalLabels\\\":{}},\\\"tolerations\\\":[],\\\"topologySpreadConstraints\\\":[],\\\"volumeMounts\\\":[],\\\"volumes\\\":[],\\\"waitForSecondsBeforeScalingDown\\\":30,\\\"winterSoldier\\\":{\\\"action\\\":\\\"sleep\\\",\\\"annotation\\\":{},\\\"apiVersion\\\":\\\"pincher.devtron.ai/v1alpha1\\\",\\\"enabled\\\":false,\\\"fieldSelector\\\":[\\\"AfterTime(AddTime(ParseTime({{metadata.creationTimestamp}}, '2006-01-02T15:04:05Z'), '5m'), Now())\\\"],\\\"labels\\\":{},\\\"targetReplicas\\\":[],\\\"timeRangesWithZone\\\":{\\\"timeRanges\\\":[],\\\"timeZone\\\":\\\"Asia/Kolkata\\\"},\\\"type\\\":\\\"Rollout\\\"}}" +// chart := &chartRepoRepository.Chart{} +// chart.GlobalOverride = "{\\\"ContainerPort\\\":[{\\\"envoyPort\\\":6969,\\\"idleTimeout\\\":\\\"6969s\\\",\\\"name\\\":\\\"app\\\",\\\"port\\\":6969,\\\"servicePort\\\":69,\\\"supportStreaming\\\":false,\\\"useHTTP2\\\":false}],\\\"EnvVariables\\\":[],\\\"GracePeriod\\\":30,\\\"LivenessProbe\\\":{\\\"Path\\\":\\\"\\\",\\\"command\\\":[],\\\"failureThreshold\\\":3,\\\"httpHeaders\\\":[],\\\"initialDelaySeconds\\\":20,\\\"periodSeconds\\\":10,\\\"port\\\":6969,\\\"scheme\\\":\\\"\\\",\\\"successThreshold\\\":1,\\\"tcp\\\":false,\\\"timeoutSeconds\\\":5},\\\"MaxSurge\\\":1,\\\"MaxUnavailable\\\":0,\\\"MinReadySeconds\\\":60,\\\"ReadinessProbe\\\":{\\\"Path\\\":\\\"\\\",\\\"command\\\":[],\\\"failureThreshold\\\":3,\\\"httpHeaders\\\":[],\\\"initialDelaySeconds\\\":20,\\\"periodSeconds\\\":10,\\\"port\\\":6969,\\\"scheme\\\":\\\"\\\",\\\"successThreshold\\\":1,\\\"tcp\\\":false,\\\"timeoutSeconds\\\":5},\\\"Spec\\\":{\\\"Affinity\\\":{\\\"Key\\\":null,\\\"Values\\\":\\\"nodes\\\",\\\"key\\\":\\\"\\\"}},\\\"StartupProbe\\\":{\\\"Path\\\":\\\"\\\",\\\"command\\\":[],\\\"failureThreshold\\\":3,\\\"httpHeaders\\\":[],\\\"initialDelaySeconds\\\":20,\\\"periodSeconds\\\":10,\\\"port\\\":6969,\\\"successThreshold\\\":1,\\\"tcp\\\":false,\\\"timeoutSeconds\\\":5},\\\"ambassadorMapping\\\":{\\\"ambassadorId\\\":\\\"\\\",\\\"cors\\\":{},\\\"enabled\\\":false,\\\"hostname\\\":\\\"devtron.example.com\\\",\\\"labels\\\":{},\\\"prefix\\\":\\\"/\\\",\\\"retryPolicy\\\":{},\\\"rewrite\\\":\\\"\\\",\\\"tls\\\":{\\\"context\\\":\\\"\\\",\\\"create\\\":false,\\\"hosts\\\":[],\\\"secretName\\\":\\\"\\\"}},\\\"args\\\":{\\\"enabled\\\":false,\\\"value\\\":[\\\"/bin/sh\\\",\\\"-c\\\",\\\"touch /tmp/healthy; sleep 30; rm -rf /tmp/healthy; sleep 600\\\"]},\\\"autoscaling\\\":{\\\"MaxReplicas\\\":2,\\\"MinReplicas\\\":1,\\\"TargetCPUUtilizationPercentage\\\":90,\\\"TargetMemoryUtilizationPercentage\\\":69,\\\"annotations\\\":{},\\\"behavior\\\":{},\\\"enabled\\\":false,\\\"extraMetrics\\\":[],\\\"labels\\\":{}},\\\"command\\\":{\\\"enabled\\\":false,\\\"value\\\":[],\\\"workingDir\\\":{}},\\\"containerSecurityContext\\\":{},\\\"containerSpec\\\":{\\\"lifecycle\\\":{\\\"enabled\\\":false,\\\"postStart\\\":{\\\"httpGet\\\":{\\\"host\\\":\\\"example.com\\\",\\\"path\\\":\\\"/example\\\",\\\"port\\\":90}},\\\"preStop\\\":{\\\"exec\\\":{\\\"command\\\":[\\\"sleep\\\",\\\"10\\\"]}}}},\\\"containers\\\":[],\\\"dbMigrationConfig\\\":{\\\"enabled\\\":false},\\\"envoyproxy\\\":{\\\"configMapName\\\":\\\"\\\",\\\"image\\\":\\\"docker.io/envoyproxy/envoy:v1.16.0\\\",\\\"lifecycle\\\":{},\\\"resources\\\":{\\\"limits\\\":{\\\"cpu\\\":\\\"50m\\\",\\\"memory\\\":\\\"50Mi\\\"},\\\"requests\\\":{\\\"cpu\\\":\\\"50m\\\",\\\"memory\\\":\\\"50Mi\\\"}}},\\\"hostAliases\\\":[],\\\"image\\\":{\\\"pullPolicy\\\":\\\"IfNotPresent\\\"},\\\"imagePullSecrets\\\":[],\\\"ingress\\\":{\\\"annotations\\\":{},\\\"className\\\":\\\"\\\",\\\"enabled\\\":false,\\\"hosts\\\":[{\\\"host\\\":\\\"chart-example1.local\\\",\\\"pathType\\\":\\\"ImplementationSpecific\\\",\\\"paths\\\":[\\\"/example1\\\"]},{\\\"host\\\":\\\"chart-example2.local\\\",\\\"pathType\\\":\\\"ImplementationSpecific\\\",\\\"paths\\\":[\\\"/example2\\\",\\\"/example2/healthz\\\"]}],\\\"labels\\\":{},\\\"tls\\\":[]},\\\"ingressInternal\\\":{\\\"annotations\\\":{},\\\"className\\\":\\\"\\\",\\\"enabled\\\":false,\\\"hosts\\\":[{\\\"host\\\":\\\"chart-example1.internal\\\",\\\"pathType\\\":\\\"ImplementationSpecific\\\",\\\"paths\\\":[\\\"/example1\\\"]},{\\\"host\\\":\\\"chart-example2.internal\\\",\\\"pathType\\\":\\\"ImplementationSpecific\\\",\\\"paths\\\":[\\\"/example2\\\",\\\"/example2/healthz\\\"]}],\\\"tls\\\":[]},\\\"initContainers\\\":[],\\\"istio\\\":{\\\"authorizationPolicy\\\":{\\\"action\\\":null,\\\"annotations\\\":{},\\\"enabled\\\":false,\\\"labels\\\":{},\\\"provider\\\":{},\\\"rules\\\":[]},\\\"destinationRule\\\":{\\\"annotations\\\":{},\\\"enabled\\\":false,\\\"labels\\\":{},\\\"subsets\\\":[],\\\"trafficPolicy\\\":{}},\\\"enable\\\":false,\\\"gateway\\\":{\\\"annotations\\\":{},\\\"enabled\\\":false,\\\"host\\\":\\\"example.com\\\",\\\"labels\\\":{},\\\"tls\\\":{\\\"enabled\\\":false,\\\"secretName\\\":\\\"secret-name\\\"}},\\\"peerAuthentication\\\":{\\\"annotations\\\":{},\\\"enabled\\\":false,\\\"labels\\\":{},\\\"mtls\\\":{\\\"mode\\\":null},\\\"portLevelMtls\\\":{},\\\"selector\\\":{\\\"enabled\\\":false}},\\\"requestAuthentication\\\":{\\\"annotations\\\":{},\\\"enabled\\\":false,\\\"jwtRules\\\":[],\\\"labels\\\":{},\\\"selector\\\":{\\\"enabled\\\":false}},\\\"virtualService\\\":{\\\"annotations\\\":{},\\\"enabled\\\":false,\\\"gateways\\\":[],\\\"hosts\\\":[],\\\"http\\\":[],\\\"labels\\\":{}}},\\\"kedaAutoscaling\\\":{\\\"advanced\\\":{},\\\"authenticationRef\\\":{},\\\"enabled\\\":false,\\\"envSourceContainerName\\\":\\\"\\\",\\\"maxReplicaCount\\\":2,\\\"minReplicaCount\\\":1,\\\"triggerAuthentication\\\":{\\\"enabled\\\":false,\\\"name\\\":\\\"\\\",\\\"spec\\\":{}},\\\"triggers\\\":[]},\\\"networkPolicy\\\":{\\\"annotations\\\":{},\\\"egress\\\":[],\\\"enabled\\\":false,\\\"ingress\\\":[],\\\"labels\\\":{},\\\"podSelector\\\":{\\\"matchExpressions\\\":[],\\\"matchLabels\\\":{}},\\\"policyTypes\\\":[]},\\\"pauseForSecondsBeforeSwitchActive\\\":30,\\\"podAnnotations\\\":{},\\\"podDisruptionBudget\\\":{},\\\"podLabels\\\":{},\\\"podSecurityContext\\\":{},\\\"prometheus\\\":{\\\"release\\\":\\\"monitoring\\\"},\\\"rawYaml\\\":[],\\\"replicaCount\\\":1,\\\"resources\\\":{\\\"limits\\\":{\\\"cpu\\\":\\\"0.05\\\",\\\"memory\\\":\\\"50Mi\\\"},\\\"requests\\\":{\\\"cpu\\\":\\\"0.01\\\",\\\"memory\\\":\\\"10Mi\\\"}},\\\"restartPolicy\\\":\\\"Always\\\",\\\"rolloutAnnotations\\\":{},\\\"rolloutLabels\\\":{},\\\"secret\\\":{\\\"data\\\":{},\\\"enabled\\\":false},\\\"server\\\":{\\\"deployment\\\":{\\\"image\\\":\\\"\\\",\\\"image_tag\\\":\\\"1-95af053\\\"}},\\\"service\\\":{\\\"annotations\\\":{},\\\"loadBalancerSourceRanges\\\":[],\\\"type\\\":\\\"ClusterIP\\\"},\\\"serviceAccount\\\":{\\\"annotations\\\":{},\\\"create\\\":false,\\\"name\\\":\\\"\\\"},\\\"servicemonitor\\\":{\\\"additionalLabels\\\":{}},\\\"tolerations\\\":[],\\\"topologySpreadConstraints\\\":[],\\\"volumeMounts\\\":[],\\\"volumes\\\":[],\\\"waitForSecondsBeforeScalingDown\\\":30,\\\"winterSoldier\\\":{\\\"action\\\":\\\"sleep\\\",\\\"annotation\\\":{},\\\"apiVersion\\\":\\\"pincher.devtron.ai/v1alpha1\\\",\\\"enabled\\\":false,\\\"fieldSelector\\\":[\\\"AfterTime(AddTime(ParseTime({{metadata.creationTimestamp}}, '2006-01-02T15:04:05Z'), '5m'), Now())\\\"],\\\"labels\\\":{},\\\"targetReplicas\\\":[],\\\"timeRangesWithZone\\\":{\\\"timeRanges\\\":[],\\\"timeZone\\\":\\\"Asia/Kolkata\\\"},\\\"type\\\":\\\"Rollout\\\"}}" +// chart.Id = 1 +// type args struct { +// ctx context.Context +// request DeploymentTemplateRequest +// } +// tests := []struct { +// name string +// +// args args +// want DeploymentTemplateResponse +// wantErr error +// }{ +// { +// name: "get values same as that of request", +// args: args{ +// ctx: context.Background(), +// request: DeploymentTemplateRequest{ +// Values: "{\\\"ContainerPort\\\":[{\\\"envoyPort\\\":6969,\\\"idleTimeout\\\":\\\"6969s\\\",\\\"name\\\":\\\"app\\\",\\\"port\\\":6969,\\\"servicePort\\\":69,\\\"supportStreaming\\\":false,\\\"useHTTP2\\\":false}],\\\"EnvVariables\\\":[],\\\"GracePeriod\\\":30,\\\"LivenessProbe\\\":{\\\"Path\\\":\\\"\\\",\\\"command\\\":[],\\\"failureThreshold\\\":3,\\\"httpHeaders\\\":[],\\\"initialDelaySeconds\\\":20,\\\"periodSeconds\\\":10,\\\"port\\\":6969,\\\"scheme\\\":\\\"\\\",\\\"successThreshold\\\":1,\\\"tcp\\\":false,\\\"timeoutSeconds\\\":5},\\\"MaxSurge\\\":1,\\\"MaxUnavailable\\\":0,\\\"MinReadySeconds\\\":60,\\\"ReadinessProbe\\\":{\\\"Path\\\":\\\"\\\",\\\"command\\\":[],\\\"failureThreshold\\\":3,\\\"httpHeaders\\\":[],\\\"initialDelaySeconds\\\":20,\\\"periodSeconds\\\":10,\\\"port\\\":6969,\\\"scheme\\\":\\\"\\\",\\\"successThreshold\\\":1,\\\"tcp\\\":false,\\\"timeoutSeconds\\\":5},\\\"Spec\\\":{\\\"Affinity\\\":{\\\"Key\\\":null,\\\"Values\\\":\\\"nodes\\\",\\\"key\\\":\\\"\\\"}},\\\"StartupProbe\\\":{\\\"Path\\\":\\\"\\\",\\\"command\\\":[],\\\"failureThreshold\\\":3,\\\"httpHeaders\\\":[],\\\"initialDelaySeconds\\\":20,\\\"periodSeconds\\\":10,\\\"port\\\":6969,\\\"successThreshold\\\":1,\\\"tcp\\\":false,\\\"timeoutSeconds\\\":5},\\\"ambassadorMapping\\\":{\\\"ambassadorId\\\":\\\"\\\",\\\"cors\\\":{},\\\"enabled\\\":false,\\\"hostname\\\":\\\"devtron.example.com\\\",\\\"labels\\\":{},\\\"prefix\\\":\\\"/\\\",\\\"retryPolicy\\\":{},\\\"rewrite\\\":\\\"\\\",\\\"tls\\\":{\\\"context\\\":\\\"\\\",\\\"create\\\":false,\\\"hosts\\\":[],\\\"secretName\\\":\\\"\\\"}},\\\"args\\\":{\\\"enabled\\\":false,\\\"value\\\":[\\\"/bin/sh\\\",\\\"-c\\\",\\\"touch /tmp/healthy; sleep 30; rm -rf /tmp/healthy; sleep 600\\\"]},\\\"autoscaling\\\":{\\\"MaxReplicas\\\":2,\\\"MinReplicas\\\":1,\\\"TargetCPUUtilizationPercentage\\\":90,\\\"TargetMemoryUtilizationPercentage\\\":69,\\\"annotations\\\":{},\\\"behavior\\\":{},\\\"enabled\\\":false,\\\"extraMetrics\\\":[],\\\"labels\\\":{}},\\\"command\\\":{\\\"enabled\\\":false,\\\"value\\\":[],\\\"workingDir\\\":{}},\\\"containerSecurityContext\\\":{},\\\"containerSpec\\\":{\\\"lifecycle\\\":{\\\"enabled\\\":false,\\\"postStart\\\":{\\\"httpGet\\\":{\\\"host\\\":\\\"example.com\\\",\\\"path\\\":\\\"/example\\\",\\\"port\\\":90}},\\\"preStop\\\":{\\\"exec\\\":{\\\"command\\\":[\\\"sleep\\\",\\\"10\\\"]}}}},\\\"containers\\\":[],\\\"dbMigrationConfig\\\":{\\\"enabled\\\":false},\\\"envoyproxy\\\":{\\\"configMapName\\\":\\\"\\\",\\\"image\\\":\\\"docker.io/envoyproxy/envoy:v1.16.0\\\",\\\"lifecycle\\\":{},\\\"resources\\\":{\\\"limits\\\":{\\\"cpu\\\":\\\"50m\\\",\\\"memory\\\":\\\"50Mi\\\"},\\\"requests\\\":{\\\"cpu\\\":\\\"50m\\\",\\\"memory\\\":\\\"50Mi\\\"}}},\\\"hostAliases\\\":[],\\\"image\\\":{\\\"pullPolicy\\\":\\\"IfNotPresent\\\"},\\\"imagePullSecrets\\\":[],\\\"ingress\\\":{\\\"annotations\\\":{},\\\"className\\\":\\\"\\\",\\\"enabled\\\":false,\\\"hosts\\\":[{\\\"host\\\":\\\"chart-example1.local\\\",\\\"pathType\\\":\\\"ImplementationSpecific\\\",\\\"paths\\\":[\\\"/example1\\\"]},{\\\"host\\\":\\\"chart-example2.local\\\",\\\"pathType\\\":\\\"ImplementationSpecific\\\",\\\"paths\\\":[\\\"/example2\\\",\\\"/example2/healthz\\\"]}],\\\"labels\\\":{},\\\"tls\\\":[]},\\\"ingressInternal\\\":{\\\"annotations\\\":{},\\\"className\\\":\\\"\\\",\\\"enabled\\\":false,\\\"hosts\\\":[{\\\"host\\\":\\\"chart-example1.internal\\\",\\\"pathType\\\":\\\"ImplementationSpecific\\\",\\\"paths\\\":[\\\"/example1\\\"]},{\\\"host\\\":\\\"chart-example2.internal\\\",\\\"pathType\\\":\\\"ImplementationSpecific\\\",\\\"paths\\\":[\\\"/example2\\\",\\\"/example2/healthz\\\"]}],\\\"tls\\\":[]},\\\"initContainers\\\":[],\\\"istio\\\":{\\\"authorizationPolicy\\\":{\\\"action\\\":null,\\\"annotations\\\":{},\\\"enabled\\\":false,\\\"labels\\\":{},\\\"provider\\\":{},\\\"rules\\\":[]},\\\"destinationRule\\\":{\\\"annotations\\\":{},\\\"enabled\\\":false,\\\"labels\\\":{},\\\"subsets\\\":[],\\\"trafficPolicy\\\":{}},\\\"enable\\\":false,\\\"gateway\\\":{\\\"annotations\\\":{},\\\"enabled\\\":false,\\\"host\\\":\\\"example.com\\\",\\\"labels\\\":{},\\\"tls\\\":{\\\"enabled\\\":false,\\\"secretName\\\":\\\"secret-name\\\"}},\\\"peerAuthentication\\\":{\\\"annotations\\\":{},\\\"enabled\\\":false,\\\"labels\\\":{},\\\"mtls\\\":{\\\"mode\\\":null},\\\"portLevelMtls\\\":{},\\\"selector\\\":{\\\"enabled\\\":false}},\\\"requestAuthentication\\\":{\\\"annotations\\\":{},\\\"enabled\\\":false,\\\"jwtRules\\\":[],\\\"labels\\\":{},\\\"selector\\\":{\\\"enabled\\\":false}},\\\"virtualService\\\":{\\\"annotations\\\":{},\\\"enabled\\\":false,\\\"gateways\\\":[],\\\"hosts\\\":[],\\\"http\\\":[],\\\"labels\\\":{}}},\\\"kedaAutoscaling\\\":{\\\"advanced\\\":{},\\\"authenticationRef\\\":{},\\\"enabled\\\":false,\\\"envSourceContainerName\\\":\\\"\\\",\\\"maxReplicaCount\\\":2,\\\"minReplicaCount\\\":1,\\\"triggerAuthentication\\\":{\\\"enabled\\\":false,\\\"name\\\":\\\"\\\",\\\"spec\\\":{}},\\\"triggers\\\":[]},\\\"networkPolicy\\\":{\\\"annotations\\\":{},\\\"egress\\\":[],\\\"enabled\\\":false,\\\"ingress\\\":[],\\\"labels\\\":{},\\\"podSelector\\\":{\\\"matchExpressions\\\":[],\\\"matchLabels\\\":{}},\\\"policyTypes\\\":[]},\\\"pauseForSecondsBeforeSwitchActive\\\":30,\\\"podAnnotations\\\":{},\\\"podDisruptionBudget\\\":{},\\\"podLabels\\\":{},\\\"podSecurityContext\\\":{},\\\"prometheus\\\":{\\\"release\\\":\\\"monitoring\\\"},\\\"rawYaml\\\":[],\\\"replicaCount\\\":1,\\\"resources\\\":{\\\"limits\\\":{\\\"cpu\\\":\\\"0.05\\\",\\\"memory\\\":\\\"50Mi\\\"},\\\"requests\\\":{\\\"cpu\\\":\\\"0.01\\\",\\\"memory\\\":\\\"10Mi\\\"}},\\\"restartPolicy\\\":\\\"Always\\\",\\\"rolloutAnnotations\\\":{},\\\"rolloutLabels\\\":{},\\\"secret\\\":{\\\"data\\\":{},\\\"enabled\\\":false},\\\"server\\\":{\\\"deployment\\\":{\\\"image\\\":\\\"\\\",\\\"image_tag\\\":\\\"1-95af053\\\"}},\\\"service\\\":{\\\"annotations\\\":{},\\\"loadBalancerSourceRanges\\\":[],\\\"type\\\":\\\"ClusterIP\\\"},\\\"serviceAccount\\\":{\\\"annotations\\\":{},\\\"create\\\":false,\\\"name\\\":\\\"\\\"},\\\"servicemonitor\\\":{\\\"additionalLabels\\\":{}},\\\"tolerations\\\":[],\\\"topologySpreadConstraints\\\":[],\\\"volumeMounts\\\":[],\\\"volumes\\\":[],\\\"waitForSecondsBeforeScalingDown\\\":30,\\\"winterSoldier\\\":{\\\"action\\\":\\\"sleep\\\",\\\"annotation\\\":{},\\\"apiVersion\\\":\\\"pincher.devtron.ai/v1alpha1\\\",\\\"enabled\\\":false,\\\"fieldSelector\\\":[\\\"AfterTime(AddTime(ParseTime({{metadata.creationTimestamp}}, '2006-01-02T15:04:05Z'), '5m'), Now())\\\"],\\\"labels\\\":{},\\\"targetReplicas\\\":[],\\\"timeRangesWithZone\\\":{\\\"timeRanges\\\":[],\\\"timeZone\\\":\\\"Asia/Kolkata\\\"},\\\"type\\\":\\\"Rollout\\\"}}", +// //ValuesAndManifestFlag: Values, +// }, +// }, +// want: DeploymentTemplateResponse{ +// Data: "{\\\"ContainerPort\\\":[{\\\"envoyPort\\\":6969,\\\"idleTimeout\\\":\\\"6969s\\\",\\\"name\\\":\\\"app\\\",\\\"port\\\":6969,\\\"servicePort\\\":69,\\\"supportStreaming\\\":false,\\\"useHTTP2\\\":false}],\\\"EnvVariables\\\":[],\\\"GracePeriod\\\":30,\\\"LivenessProbe\\\":{\\\"Path\\\":\\\"\\\",\\\"command\\\":[],\\\"failureThreshold\\\":3,\\\"httpHeaders\\\":[],\\\"initialDelaySeconds\\\":20,\\\"periodSeconds\\\":10,\\\"port\\\":6969,\\\"scheme\\\":\\\"\\\",\\\"successThreshold\\\":1,\\\"tcp\\\":false,\\\"timeoutSeconds\\\":5},\\\"MaxSurge\\\":1,\\\"MaxUnavailable\\\":0,\\\"MinReadySeconds\\\":60,\\\"ReadinessProbe\\\":{\\\"Path\\\":\\\"\\\",\\\"command\\\":[],\\\"failureThreshold\\\":3,\\\"httpHeaders\\\":[],\\\"initialDelaySeconds\\\":20,\\\"periodSeconds\\\":10,\\\"port\\\":6969,\\\"scheme\\\":\\\"\\\",\\\"successThreshold\\\":1,\\\"tcp\\\":false,\\\"timeoutSeconds\\\":5},\\\"Spec\\\":{\\\"Affinity\\\":{\\\"Key\\\":null,\\\"Values\\\":\\\"nodes\\\",\\\"key\\\":\\\"\\\"}},\\\"StartupProbe\\\":{\\\"Path\\\":\\\"\\\",\\\"command\\\":[],\\\"failureThreshold\\\":3,\\\"httpHeaders\\\":[],\\\"initialDelaySeconds\\\":20,\\\"periodSeconds\\\":10,\\\"port\\\":6969,\\\"successThreshold\\\":1,\\\"tcp\\\":false,\\\"timeoutSeconds\\\":5},\\\"ambassadorMapping\\\":{\\\"ambassadorId\\\":\\\"\\\",\\\"cors\\\":{},\\\"enabled\\\":false,\\\"hostname\\\":\\\"devtron.example.com\\\",\\\"labels\\\":{},\\\"prefix\\\":\\\"/\\\",\\\"retryPolicy\\\":{},\\\"rewrite\\\":\\\"\\\",\\\"tls\\\":{\\\"context\\\":\\\"\\\",\\\"create\\\":false,\\\"hosts\\\":[],\\\"secretName\\\":\\\"\\\"}},\\\"args\\\":{\\\"enabled\\\":false,\\\"value\\\":[\\\"/bin/sh\\\",\\\"-c\\\",\\\"touch /tmp/healthy; sleep 30; rm -rf /tmp/healthy; sleep 600\\\"]},\\\"autoscaling\\\":{\\\"MaxReplicas\\\":2,\\\"MinReplicas\\\":1,\\\"TargetCPUUtilizationPercentage\\\":90,\\\"TargetMemoryUtilizationPercentage\\\":69,\\\"annotations\\\":{},\\\"behavior\\\":{},\\\"enabled\\\":false,\\\"extraMetrics\\\":[],\\\"labels\\\":{}},\\\"command\\\":{\\\"enabled\\\":false,\\\"value\\\":[],\\\"workingDir\\\":{}},\\\"containerSecurityContext\\\":{},\\\"containerSpec\\\":{\\\"lifecycle\\\":{\\\"enabled\\\":false,\\\"postStart\\\":{\\\"httpGet\\\":{\\\"host\\\":\\\"example.com\\\",\\\"path\\\":\\\"/example\\\",\\\"port\\\":90}},\\\"preStop\\\":{\\\"exec\\\":{\\\"command\\\":[\\\"sleep\\\",\\\"10\\\"]}}}},\\\"containers\\\":[],\\\"dbMigrationConfig\\\":{\\\"enabled\\\":false},\\\"envoyproxy\\\":{\\\"configMapName\\\":\\\"\\\",\\\"image\\\":\\\"docker.io/envoyproxy/envoy:v1.16.0\\\",\\\"lifecycle\\\":{},\\\"resources\\\":{\\\"limits\\\":{\\\"cpu\\\":\\\"50m\\\",\\\"memory\\\":\\\"50Mi\\\"},\\\"requests\\\":{\\\"cpu\\\":\\\"50m\\\",\\\"memory\\\":\\\"50Mi\\\"}}},\\\"hostAliases\\\":[],\\\"image\\\":{\\\"pullPolicy\\\":\\\"IfNotPresent\\\"},\\\"imagePullSecrets\\\":[],\\\"ingress\\\":{\\\"annotations\\\":{},\\\"className\\\":\\\"\\\",\\\"enabled\\\":false,\\\"hosts\\\":[{\\\"host\\\":\\\"chart-example1.local\\\",\\\"pathType\\\":\\\"ImplementationSpecific\\\",\\\"paths\\\":[\\\"/example1\\\"]},{\\\"host\\\":\\\"chart-example2.local\\\",\\\"pathType\\\":\\\"ImplementationSpecific\\\",\\\"paths\\\":[\\\"/example2\\\",\\\"/example2/healthz\\\"]}],\\\"labels\\\":{},\\\"tls\\\":[]},\\\"ingressInternal\\\":{\\\"annotations\\\":{},\\\"className\\\":\\\"\\\",\\\"enabled\\\":false,\\\"hosts\\\":[{\\\"host\\\":\\\"chart-example1.internal\\\",\\\"pathType\\\":\\\"ImplementationSpecific\\\",\\\"paths\\\":[\\\"/example1\\\"]},{\\\"host\\\":\\\"chart-example2.internal\\\",\\\"pathType\\\":\\\"ImplementationSpecific\\\",\\\"paths\\\":[\\\"/example2\\\",\\\"/example2/healthz\\\"]}],\\\"tls\\\":[]},\\\"initContainers\\\":[],\\\"istio\\\":{\\\"authorizationPolicy\\\":{\\\"action\\\":null,\\\"annotations\\\":{},\\\"enabled\\\":false,\\\"labels\\\":{},\\\"provider\\\":{},\\\"rules\\\":[]},\\\"destinationRule\\\":{\\\"annotations\\\":{},\\\"enabled\\\":false,\\\"labels\\\":{},\\\"subsets\\\":[],\\\"trafficPolicy\\\":{}},\\\"enable\\\":false,\\\"gateway\\\":{\\\"annotations\\\":{},\\\"enabled\\\":false,\\\"host\\\":\\\"example.com\\\",\\\"labels\\\":{},\\\"tls\\\":{\\\"enabled\\\":false,\\\"secretName\\\":\\\"secret-name\\\"}},\\\"peerAuthentication\\\":{\\\"annotations\\\":{},\\\"enabled\\\":false,\\\"labels\\\":{},\\\"mtls\\\":{\\\"mode\\\":null},\\\"portLevelMtls\\\":{},\\\"selector\\\":{\\\"enabled\\\":false}},\\\"requestAuthentication\\\":{\\\"annotations\\\":{},\\\"enabled\\\":false,\\\"jwtRules\\\":[],\\\"labels\\\":{},\\\"selector\\\":{\\\"enabled\\\":false}},\\\"virtualService\\\":{\\\"annotations\\\":{},\\\"enabled\\\":false,\\\"gateways\\\":[],\\\"hosts\\\":[],\\\"http\\\":[],\\\"labels\\\":{}}},\\\"kedaAutoscaling\\\":{\\\"advanced\\\":{},\\\"authenticationRef\\\":{},\\\"enabled\\\":false,\\\"envSourceContainerName\\\":\\\"\\\",\\\"maxReplicaCount\\\":2,\\\"minReplicaCount\\\":1,\\\"triggerAuthentication\\\":{\\\"enabled\\\":false,\\\"name\\\":\\\"\\\",\\\"spec\\\":{}},\\\"triggers\\\":[]},\\\"networkPolicy\\\":{\\\"annotations\\\":{},\\\"egress\\\":[],\\\"enabled\\\":false,\\\"ingress\\\":[],\\\"labels\\\":{},\\\"podSelector\\\":{\\\"matchExpressions\\\":[],\\\"matchLabels\\\":{}},\\\"policyTypes\\\":[]},\\\"pauseForSecondsBeforeSwitchActive\\\":30,\\\"podAnnotations\\\":{},\\\"podDisruptionBudget\\\":{},\\\"podLabels\\\":{},\\\"podSecurityContext\\\":{},\\\"prometheus\\\":{\\\"release\\\":\\\"monitoring\\\"},\\\"rawYaml\\\":[],\\\"replicaCount\\\":1,\\\"resources\\\":{\\\"limits\\\":{\\\"cpu\\\":\\\"0.05\\\",\\\"memory\\\":\\\"50Mi\\\"},\\\"requests\\\":{\\\"cpu\\\":\\\"0.01\\\",\\\"memory\\\":\\\"10Mi\\\"}},\\\"restartPolicy\\\":\\\"Always\\\",\\\"rolloutAnnotations\\\":{},\\\"rolloutLabels\\\":{},\\\"secret\\\":{\\\"data\\\":{},\\\"enabled\\\":false},\\\"server\\\":{\\\"deployment\\\":{\\\"image\\\":\\\"\\\",\\\"image_tag\\\":\\\"1-95af053\\\"}},\\\"service\\\":{\\\"annotations\\\":{},\\\"loadBalancerSourceRanges\\\":[],\\\"type\\\":\\\"ClusterIP\\\"},\\\"serviceAccount\\\":{\\\"annotations\\\":{},\\\"create\\\":false,\\\"name\\\":\\\"\\\"},\\\"servicemonitor\\\":{\\\"additionalLabels\\\":{}},\\\"tolerations\\\":[],\\\"topologySpreadConstraints\\\":[],\\\"volumeMounts\\\":[],\\\"volumes\\\":[],\\\"waitForSecondsBeforeScalingDown\\\":30,\\\"winterSoldier\\\":{\\\"action\\\":\\\"sleep\\\",\\\"annotation\\\":{},\\\"apiVersion\\\":\\\"pincher.devtron.ai/v1alpha1\\\",\\\"enabled\\\":false,\\\"fieldSelector\\\":[\\\"AfterTime(AddTime(ParseTime({{metadata.creationTimestamp}}, '2006-01-02T15:04:05Z'), '5m'), Now())\\\"],\\\"labels\\\":{},\\\"targetReplicas\\\":[],\\\"timeRangesWithZone\\\":{\\\"timeRanges\\\":[],\\\"timeZone\\\":\\\"Asia/Kolkata\\\"},\\\"type\\\":\\\"Rollout\\\"}}"}, +// }, +// { +// name: "get values for base charts", +// args: args{ +// ctx: context.Background(), +// request: DeploymentTemplateRequest{ +// Values: "", +// //ValuesAndManifestFlag: Values, +// Type: 1, +// ChartRefId: 1, +// }, +// }, +// want: DeploymentTemplateResponse{ +// Data: "{\\\"ContainerPort\\\":[{\\\"envoyPort\\\":6969,\\\"idleTimeout\\\":\\\"6969s\\\",\\\"name\\\":\\\"app\\\",\\\"port\\\":6969,\\\"servicePort\\\":69,\\\"supportStreaming\\\":false,\\\"useHTTP2\\\":false}],\\\"EnvVariables\\\":[],\\\"GracePeriod\\\":30,\\\"LivenessProbe\\\":{\\\"Path\\\":\\\"\\\",\\\"command\\\":[],\\\"failureThreshold\\\":3,\\\"httpHeaders\\\":[],\\\"initialDelaySeconds\\\":20,\\\"periodSeconds\\\":10,\\\"port\\\":6969,\\\"scheme\\\":\\\"\\\",\\\"successThreshold\\\":1,\\\"tcp\\\":false,\\\"timeoutSeconds\\\":5},\\\"MaxSurge\\\":1,\\\"MaxUnavailable\\\":0,\\\"MinReadySeconds\\\":60,\\\"ReadinessProbe\\\":{\\\"Path\\\":\\\"\\\",\\\"command\\\":[],\\\"failureThreshold\\\":3,\\\"httpHeaders\\\":[],\\\"initialDelaySeconds\\\":20,\\\"periodSeconds\\\":10,\\\"port\\\":6969,\\\"scheme\\\":\\\"\\\",\\\"successThreshold\\\":1,\\\"tcp\\\":false,\\\"timeoutSeconds\\\":5},\\\"Spec\\\":{\\\"Affinity\\\":{\\\"Key\\\":null,\\\"Values\\\":\\\"nodes\\\",\\\"key\\\":\\\"\\\"}},\\\"StartupProbe\\\":{\\\"Path\\\":\\\"\\\",\\\"command\\\":[],\\\"failureThreshold\\\":3,\\\"httpHeaders\\\":[],\\\"initialDelaySeconds\\\":20,\\\"periodSeconds\\\":10,\\\"port\\\":6969,\\\"successThreshold\\\":1,\\\"tcp\\\":false,\\\"timeoutSeconds\\\":5},\\\"ambassadorMapping\\\":{\\\"ambassadorId\\\":\\\"\\\",\\\"cors\\\":{},\\\"enabled\\\":false,\\\"hostname\\\":\\\"devtron.example.com\\\",\\\"labels\\\":{},\\\"prefix\\\":\\\"/\\\",\\\"retryPolicy\\\":{},\\\"rewrite\\\":\\\"\\\",\\\"tls\\\":{\\\"context\\\":\\\"\\\",\\\"create\\\":false,\\\"hosts\\\":[],\\\"secretName\\\":\\\"\\\"}},\\\"args\\\":{\\\"enabled\\\":false,\\\"value\\\":[\\\"/bin/sh\\\",\\\"-c\\\",\\\"touch /tmp/healthy; sleep 30; rm -rf /tmp/healthy; sleep 600\\\"]},\\\"autoscaling\\\":{\\\"MaxReplicas\\\":2,\\\"MinReplicas\\\":1,\\\"TargetCPUUtilizationPercentage\\\":90,\\\"TargetMemoryUtilizationPercentage\\\":69,\\\"annotations\\\":{},\\\"behavior\\\":{},\\\"enabled\\\":false,\\\"extraMetrics\\\":[],\\\"labels\\\":{}},\\\"command\\\":{\\\"enabled\\\":false,\\\"value\\\":[],\\\"workingDir\\\":{}},\\\"containerSecurityContext\\\":{},\\\"containerSpec\\\":{\\\"lifecycle\\\":{\\\"enabled\\\":false,\\\"postStart\\\":{\\\"httpGet\\\":{\\\"host\\\":\\\"example.com\\\",\\\"path\\\":\\\"/example\\\",\\\"port\\\":90}},\\\"preStop\\\":{\\\"exec\\\":{\\\"command\\\":[\\\"sleep\\\",\\\"10\\\"]}}}},\\\"containers\\\":[],\\\"dbMigrationConfig\\\":{\\\"enabled\\\":false},\\\"envoyproxy\\\":{\\\"configMapName\\\":\\\"\\\",\\\"image\\\":\\\"docker.io/envoyproxy/envoy:v1.16.0\\\",\\\"lifecycle\\\":{},\\\"resources\\\":{\\\"limits\\\":{\\\"cpu\\\":\\\"50m\\\",\\\"memory\\\":\\\"50Mi\\\"},\\\"requests\\\":{\\\"cpu\\\":\\\"50m\\\",\\\"memory\\\":\\\"50Mi\\\"}}},\\\"hostAliases\\\":[],\\\"image\\\":{\\\"pullPolicy\\\":\\\"IfNotPresent\\\"},\\\"imagePullSecrets\\\":[],\\\"ingress\\\":{\\\"annotations\\\":{},\\\"className\\\":\\\"\\\",\\\"enabled\\\":false,\\\"hosts\\\":[{\\\"host\\\":\\\"chart-example1.local\\\",\\\"pathType\\\":\\\"ImplementationSpecific\\\",\\\"paths\\\":[\\\"/example1\\\"]},{\\\"host\\\":\\\"chart-example2.local\\\",\\\"pathType\\\":\\\"ImplementationSpecific\\\",\\\"paths\\\":[\\\"/example2\\\",\\\"/example2/healthz\\\"]}],\\\"labels\\\":{},\\\"tls\\\":[]},\\\"ingressInternal\\\":{\\\"annotations\\\":{},\\\"className\\\":\\\"\\\",\\\"enabled\\\":false,\\\"hosts\\\":[{\\\"host\\\":\\\"chart-example1.internal\\\",\\\"pathType\\\":\\\"ImplementationSpecific\\\",\\\"paths\\\":[\\\"/example1\\\"]},{\\\"host\\\":\\\"chart-example2.internal\\\",\\\"pathType\\\":\\\"ImplementationSpecific\\\",\\\"paths\\\":[\\\"/example2\\\",\\\"/example2/healthz\\\"]}],\\\"tls\\\":[]},\\\"initContainers\\\":[],\\\"istio\\\":{\\\"authorizationPolicy\\\":{\\\"action\\\":null,\\\"annotations\\\":{},\\\"enabled\\\":false,\\\"labels\\\":{},\\\"provider\\\":{},\\\"rules\\\":[]},\\\"destinationRule\\\":{\\\"annotations\\\":{},\\\"enabled\\\":false,\\\"labels\\\":{},\\\"subsets\\\":[],\\\"trafficPolicy\\\":{}},\\\"enable\\\":false,\\\"gateway\\\":{\\\"annotations\\\":{},\\\"enabled\\\":false,\\\"host\\\":\\\"example.com\\\",\\\"labels\\\":{},\\\"tls\\\":{\\\"enabled\\\":false,\\\"secretName\\\":\\\"secret-name\\\"}},\\\"peerAuthentication\\\":{\\\"annotations\\\":{},\\\"enabled\\\":false,\\\"labels\\\":{},\\\"mtls\\\":{\\\"mode\\\":null},\\\"portLevelMtls\\\":{},\\\"selector\\\":{\\\"enabled\\\":false}},\\\"requestAuthentication\\\":{\\\"annotations\\\":{},\\\"enabled\\\":false,\\\"jwtRules\\\":[],\\\"labels\\\":{},\\\"selector\\\":{\\\"enabled\\\":false}},\\\"virtualService\\\":{\\\"annotations\\\":{},\\\"enabled\\\":false,\\\"gateways\\\":[],\\\"hosts\\\":[],\\\"http\\\":[],\\\"labels\\\":{}}},\\\"kedaAutoscaling\\\":{\\\"advanced\\\":{},\\\"authenticationRef\\\":{},\\\"enabled\\\":false,\\\"envSourceContainerName\\\":\\\"\\\",\\\"maxReplicaCount\\\":2,\\\"minReplicaCount\\\":1,\\\"triggerAuthentication\\\":{\\\"enabled\\\":false,\\\"name\\\":\\\"\\\",\\\"spec\\\":{}},\\\"triggers\\\":[]},\\\"networkPolicy\\\":{\\\"annotations\\\":{},\\\"egress\\\":[],\\\"enabled\\\":false,\\\"ingress\\\":[],\\\"labels\\\":{},\\\"podSelector\\\":{\\\"matchExpressions\\\":[],\\\"matchLabels\\\":{}},\\\"policyTypes\\\":[]},\\\"pauseForSecondsBeforeSwitchActive\\\":30,\\\"podAnnotations\\\":{},\\\"podDisruptionBudget\\\":{},\\\"podLabels\\\":{},\\\"podSecurityContext\\\":{},\\\"prometheus\\\":{\\\"release\\\":\\\"monitoring\\\"},\\\"rawYaml\\\":[],\\\"replicaCount\\\":1,\\\"resources\\\":{\\\"limits\\\":{\\\"cpu\\\":\\\"0.05\\\",\\\"memory\\\":\\\"50Mi\\\"},\\\"requests\\\":{\\\"cpu\\\":\\\"0.01\\\",\\\"memory\\\":\\\"10Mi\\\"}},\\\"restartPolicy\\\":\\\"Always\\\",\\\"rolloutAnnotations\\\":{},\\\"rolloutLabels\\\":{},\\\"secret\\\":{\\\"data\\\":{},\\\"enabled\\\":false},\\\"server\\\":{\\\"deployment\\\":{\\\"image\\\":\\\"\\\",\\\"image_tag\\\":\\\"1-95af053\\\"}},\\\"service\\\":{\\\"annotations\\\":{},\\\"loadBalancerSourceRanges\\\":[],\\\"type\\\":\\\"ClusterIP\\\"},\\\"serviceAccount\\\":{\\\"annotations\\\":{},\\\"create\\\":false,\\\"name\\\":\\\"\\\"},\\\"servicemonitor\\\":{\\\"additionalLabels\\\":{}},\\\"tolerations\\\":[],\\\"topologySpreadConstraints\\\":[],\\\"volumeMounts\\\":[],\\\"volumes\\\":[],\\\"waitForSecondsBeforeScalingDown\\\":30,\\\"winterSoldier\\\":{\\\"action\\\":\\\"sleep\\\",\\\"annotation\\\":{},\\\"apiVersion\\\":\\\"pincher.devtron.ai/v1alpha1\\\",\\\"enabled\\\":false,\\\"fieldSelector\\\":[\\\"AfterTime(AddTime(ParseTime({{metadata.creationTimestamp}}, '2006-01-02T15:04:05Z'), '5m'), Now())\\\"],\\\"labels\\\":{},\\\"targetReplicas\\\":[],\\\"timeRangesWithZone\\\":{\\\"timeRanges\\\":[],\\\"timeZone\\\":\\\"Asia/Kolkata\\\"},\\\"type\\\":\\\"Rollout\\\"}}"}, +// }, +// { +// name: "get values for published on other envs", +// args: args{ +// ctx: context.Background(), +// request: DeploymentTemplateRequest{ +// Values: "", +// //ValuesAndManifestFlag: Values, +// Type: 2, +// ChartRefId: 1, +// AppId: 1, +// }, +// }, +// want: DeploymentTemplateResponse{ +// Data: "{\\\"ContainerPort\\\":[{\\\"envoyPort\\\":6969,\\\"idleTimeout\\\":\\\"6969s\\\",\\\"name\\\":\\\"app\\\",\\\"port\\\":6969,\\\"servicePort\\\":69,\\\"supportStreaming\\\":false,\\\"useHTTP2\\\":false}],\\\"EnvVariables\\\":[],\\\"GracePeriod\\\":30,\\\"LivenessProbe\\\":{\\\"Path\\\":\\\"\\\",\\\"command\\\":[],\\\"failureThreshold\\\":3,\\\"httpHeaders\\\":[],\\\"initialDelaySeconds\\\":20,\\\"periodSeconds\\\":10,\\\"port\\\":6969,\\\"scheme\\\":\\\"\\\",\\\"successThreshold\\\":1,\\\"tcp\\\":false,\\\"timeoutSeconds\\\":5},\\\"MaxSurge\\\":1,\\\"MaxUnavailable\\\":0,\\\"MinReadySeconds\\\":60,\\\"ReadinessProbe\\\":{\\\"Path\\\":\\\"\\\",\\\"command\\\":[],\\\"failureThreshold\\\":3,\\\"httpHeaders\\\":[],\\\"initialDelaySeconds\\\":20,\\\"periodSeconds\\\":10,\\\"port\\\":6969,\\\"scheme\\\":\\\"\\\",\\\"successThreshold\\\":1,\\\"tcp\\\":false,\\\"timeoutSeconds\\\":5},\\\"Spec\\\":{\\\"Affinity\\\":{\\\"Key\\\":null,\\\"Values\\\":\\\"nodes\\\",\\\"key\\\":\\\"\\\"}},\\\"StartupProbe\\\":{\\\"Path\\\":\\\"\\\",\\\"command\\\":[],\\\"failureThreshold\\\":3,\\\"httpHeaders\\\":[],\\\"initialDelaySeconds\\\":20,\\\"periodSeconds\\\":10,\\\"port\\\":6969,\\\"successThreshold\\\":1,\\\"tcp\\\":false,\\\"timeoutSeconds\\\":5},\\\"ambassadorMapping\\\":{\\\"ambassadorId\\\":\\\"\\\",\\\"cors\\\":{},\\\"enabled\\\":false,\\\"hostname\\\":\\\"devtron.example.com\\\",\\\"labels\\\":{},\\\"prefix\\\":\\\"/\\\",\\\"retryPolicy\\\":{},\\\"rewrite\\\":\\\"\\\",\\\"tls\\\":{\\\"context\\\":\\\"\\\",\\\"create\\\":false,\\\"hosts\\\":[],\\\"secretName\\\":\\\"\\\"}},\\\"args\\\":{\\\"enabled\\\":false,\\\"value\\\":[\\\"/bin/sh\\\",\\\"-c\\\",\\\"touch /tmp/healthy; sleep 30; rm -rf /tmp/healthy; sleep 600\\\"]},\\\"autoscaling\\\":{\\\"MaxReplicas\\\":2,\\\"MinReplicas\\\":1,\\\"TargetCPUUtilizationPercentage\\\":90,\\\"TargetMemoryUtilizationPercentage\\\":69,\\\"annotations\\\":{},\\\"behavior\\\":{},\\\"enabled\\\":false,\\\"extraMetrics\\\":[],\\\"labels\\\":{}},\\\"command\\\":{\\\"enabled\\\":false,\\\"value\\\":[],\\\"workingDir\\\":{}},\\\"containerSecurityContext\\\":{},\\\"containerSpec\\\":{\\\"lifecycle\\\":{\\\"enabled\\\":false,\\\"postStart\\\":{\\\"httpGet\\\":{\\\"host\\\":\\\"example.com\\\",\\\"path\\\":\\\"/example\\\",\\\"port\\\":90}},\\\"preStop\\\":{\\\"exec\\\":{\\\"command\\\":[\\\"sleep\\\",\\\"10\\\"]}}}},\\\"containers\\\":[],\\\"dbMigrationConfig\\\":{\\\"enabled\\\":false},\\\"envoyproxy\\\":{\\\"configMapName\\\":\\\"\\\",\\\"image\\\":\\\"docker.io/envoyproxy/envoy:v1.16.0\\\",\\\"lifecycle\\\":{},\\\"resources\\\":{\\\"limits\\\":{\\\"cpu\\\":\\\"50m\\\",\\\"memory\\\":\\\"50Mi\\\"},\\\"requests\\\":{\\\"cpu\\\":\\\"50m\\\",\\\"memory\\\":\\\"50Mi\\\"}}},\\\"hostAliases\\\":[],\\\"image\\\":{\\\"pullPolicy\\\":\\\"IfNotPresent\\\"},\\\"imagePullSecrets\\\":[],\\\"ingress\\\":{\\\"annotations\\\":{},\\\"className\\\":\\\"\\\",\\\"enabled\\\":false,\\\"hosts\\\":[{\\\"host\\\":\\\"chart-example1.local\\\",\\\"pathType\\\":\\\"ImplementationSpecific\\\",\\\"paths\\\":[\\\"/example1\\\"]},{\\\"host\\\":\\\"chart-example2.local\\\",\\\"pathType\\\":\\\"ImplementationSpecific\\\",\\\"paths\\\":[\\\"/example2\\\",\\\"/example2/healthz\\\"]}],\\\"labels\\\":{},\\\"tls\\\":[]},\\\"ingressInternal\\\":{\\\"annotations\\\":{},\\\"className\\\":\\\"\\\",\\\"enabled\\\":false,\\\"hosts\\\":[{\\\"host\\\":\\\"chart-example1.internal\\\",\\\"pathType\\\":\\\"ImplementationSpecific\\\",\\\"paths\\\":[\\\"/example1\\\"]},{\\\"host\\\":\\\"chart-example2.internal\\\",\\\"pathType\\\":\\\"ImplementationSpecific\\\",\\\"paths\\\":[\\\"/example2\\\",\\\"/example2/healthz\\\"]}],\\\"tls\\\":[]},\\\"initContainers\\\":[],\\\"istio\\\":{\\\"authorizationPolicy\\\":{\\\"action\\\":null,\\\"annotations\\\":{},\\\"enabled\\\":false,\\\"labels\\\":{},\\\"provider\\\":{},\\\"rules\\\":[]},\\\"destinationRule\\\":{\\\"annotations\\\":{},\\\"enabled\\\":false,\\\"labels\\\":{},\\\"subsets\\\":[],\\\"trafficPolicy\\\":{}},\\\"enable\\\":false,\\\"gateway\\\":{\\\"annotations\\\":{},\\\"enabled\\\":false,\\\"host\\\":\\\"example.com\\\",\\\"labels\\\":{},\\\"tls\\\":{\\\"enabled\\\":false,\\\"secretName\\\":\\\"secret-name\\\"}},\\\"peerAuthentication\\\":{\\\"annotations\\\":{},\\\"enabled\\\":false,\\\"labels\\\":{},\\\"mtls\\\":{\\\"mode\\\":null},\\\"portLevelMtls\\\":{},\\\"selector\\\":{\\\"enabled\\\":false}},\\\"requestAuthentication\\\":{\\\"annotations\\\":{},\\\"enabled\\\":false,\\\"jwtRules\\\":[],\\\"labels\\\":{},\\\"selector\\\":{\\\"enabled\\\":false}},\\\"virtualService\\\":{\\\"annotations\\\":{},\\\"enabled\\\":false,\\\"gateways\\\":[],\\\"hosts\\\":[],\\\"http\\\":[],\\\"labels\\\":{}}},\\\"kedaAutoscaling\\\":{\\\"advanced\\\":{},\\\"authenticationRef\\\":{},\\\"enabled\\\":false,\\\"envSourceContainerName\\\":\\\"\\\",\\\"maxReplicaCount\\\":2,\\\"minReplicaCount\\\":1,\\\"triggerAuthentication\\\":{\\\"enabled\\\":false,\\\"name\\\":\\\"\\\",\\\"spec\\\":{}},\\\"triggers\\\":[]},\\\"networkPolicy\\\":{\\\"annotations\\\":{},\\\"egress\\\":[],\\\"enabled\\\":false,\\\"ingress\\\":[],\\\"labels\\\":{},\\\"podSelector\\\":{\\\"matchExpressions\\\":[],\\\"matchLabels\\\":{}},\\\"policyTypes\\\":[]},\\\"pauseForSecondsBeforeSwitchActive\\\":30,\\\"podAnnotations\\\":{},\\\"podDisruptionBudget\\\":{},\\\"podLabels\\\":{},\\\"podSecurityContext\\\":{},\\\"prometheus\\\":{\\\"release\\\":\\\"monitoring\\\"},\\\"rawYaml\\\":[],\\\"replicaCount\\\":1,\\\"resources\\\":{\\\"limits\\\":{\\\"cpu\\\":\\\"0.05\\\",\\\"memory\\\":\\\"50Mi\\\"},\\\"requests\\\":{\\\"cpu\\\":\\\"0.01\\\",\\\"memory\\\":\\\"10Mi\\\"}},\\\"restartPolicy\\\":\\\"Always\\\",\\\"rolloutAnnotations\\\":{},\\\"rolloutLabels\\\":{},\\\"secret\\\":{\\\"data\\\":{},\\\"enabled\\\":false},\\\"server\\\":{\\\"deployment\\\":{\\\"image\\\":\\\"\\\",\\\"image_tag\\\":\\\"1-95af053\\\"}},\\\"service\\\":{\\\"annotations\\\":{},\\\"loadBalancerSourceRanges\\\":[],\\\"type\\\":\\\"ClusterIP\\\"},\\\"serviceAccount\\\":{\\\"annotations\\\":{},\\\"create\\\":false,\\\"name\\\":\\\"\\\"},\\\"servicemonitor\\\":{\\\"additionalLabels\\\":{}},\\\"tolerations\\\":[],\\\"topologySpreadConstraints\\\":[],\\\"volumeMounts\\\":[],\\\"volumes\\\":[],\\\"waitForSecondsBeforeScalingDown\\\":30,\\\"winterSoldier\\\":{\\\"action\\\":\\\"sleep\\\",\\\"annotation\\\":{},\\\"apiVersion\\\":\\\"pincher.devtron.ai/v1alpha1\\\",\\\"enabled\\\":false,\\\"fieldSelector\\\":[\\\"AfterTime(AddTime(ParseTime({{metadata.creationTimestamp}}, '2006-01-02T15:04:05Z'), '5m'), Now())\\\"],\\\"labels\\\":{},\\\"targetReplicas\\\":[],\\\"timeRangesWithZone\\\":{\\\"timeRanges\\\":[],\\\"timeZone\\\":\\\"Asia/Kolkata\\\"},\\\"type\\\":\\\"Rollout\\\"}}"}, +// }, +// { +// name: "get error for published on other envs", +// args: args{ +// ctx: context.Background(), +// request: DeploymentTemplateRequest{ +// Values: "", +// //ValuesAndManifestFlag: Values, +// Type: 2, +// ChartRefId: 1, +// AppId: 1, +// }, +// }, +// wantErr: errors.New("error in getting chart"), +// want: DeploymentTemplateResponse{ +// Data: "{\\\"ContainerPort\\\":[{\\\"envoyPort\\\":6969,\\\"idleTimeout\\\":\\\"6969s\\\",\\\"name\\\":\\\"app\\\",\\\"port\\\":6969,\\\"servicePort\\\":69,\\\"supportStreaming\\\":false,\\\"useHTTP2\\\":false}],\\\"EnvVariables\\\":[],\\\"GracePeriod\\\":30,\\\"LivenessProbe\\\":{\\\"Path\\\":\\\"\\\",\\\"command\\\":[],\\\"failureThreshold\\\":3,\\\"httpHeaders\\\":[],\\\"initialDelaySeconds\\\":20,\\\"periodSeconds\\\":10,\\\"port\\\":6969,\\\"scheme\\\":\\\"\\\",\\\"successThreshold\\\":1,\\\"tcp\\\":false,\\\"timeoutSeconds\\\":5},\\\"MaxSurge\\\":1,\\\"MaxUnavailable\\\":0,\\\"MinReadySeconds\\\":60,\\\"ReadinessProbe\\\":{\\\"Path\\\":\\\"\\\",\\\"command\\\":[],\\\"failureThreshold\\\":3,\\\"httpHeaders\\\":[],\\\"initialDelaySeconds\\\":20,\\\"periodSeconds\\\":10,\\\"port\\\":6969,\\\"scheme\\\":\\\"\\\",\\\"successThreshold\\\":1,\\\"tcp\\\":false,\\\"timeoutSeconds\\\":5},\\\"Spec\\\":{\\\"Affinity\\\":{\\\"Key\\\":null,\\\"Values\\\":\\\"nodes\\\",\\\"key\\\":\\\"\\\"}},\\\"StartupProbe\\\":{\\\"Path\\\":\\\"\\\",\\\"command\\\":[],\\\"failureThreshold\\\":3,\\\"httpHeaders\\\":[],\\\"initialDelaySeconds\\\":20,\\\"periodSeconds\\\":10,\\\"port\\\":6969,\\\"successThreshold\\\":1,\\\"tcp\\\":false,\\\"timeoutSeconds\\\":5},\\\"ambassadorMapping\\\":{\\\"ambassadorId\\\":\\\"\\\",\\\"cors\\\":{},\\\"enabled\\\":false,\\\"hostname\\\":\\\"devtron.example.com\\\",\\\"labels\\\":{},\\\"prefix\\\":\\\"/\\\",\\\"retryPolicy\\\":{},\\\"rewrite\\\":\\\"\\\",\\\"tls\\\":{\\\"context\\\":\\\"\\\",\\\"create\\\":false,\\\"hosts\\\":[],\\\"secretName\\\":\\\"\\\"}},\\\"args\\\":{\\\"enabled\\\":false,\\\"value\\\":[\\\"/bin/sh\\\",\\\"-c\\\",\\\"touch /tmp/healthy; sleep 30; rm -rf /tmp/healthy; sleep 600\\\"]},\\\"autoscaling\\\":{\\\"MaxReplicas\\\":2,\\\"MinReplicas\\\":1,\\\"TargetCPUUtilizationPercentage\\\":90,\\\"TargetMemoryUtilizationPercentage\\\":69,\\\"annotations\\\":{},\\\"behavior\\\":{},\\\"enabled\\\":false,\\\"extraMetrics\\\":[],\\\"labels\\\":{}},\\\"command\\\":{\\\"enabled\\\":false,\\\"value\\\":[],\\\"workingDir\\\":{}},\\\"containerSecurityContext\\\":{},\\\"containerSpec\\\":{\\\"lifecycle\\\":{\\\"enabled\\\":false,\\\"postStart\\\":{\\\"httpGet\\\":{\\\"host\\\":\\\"example.com\\\",\\\"path\\\":\\\"/example\\\",\\\"port\\\":90}},\\\"preStop\\\":{\\\"exec\\\":{\\\"command\\\":[\\\"sleep\\\",\\\"10\\\"]}}}},\\\"containers\\\":[],\\\"dbMigrationConfig\\\":{\\\"enabled\\\":false},\\\"envoyproxy\\\":{\\\"configMapName\\\":\\\"\\\",\\\"image\\\":\\\"docker.io/envoyproxy/envoy:v1.16.0\\\",\\\"lifecycle\\\":{},\\\"resources\\\":{\\\"limits\\\":{\\\"cpu\\\":\\\"50m\\\",\\\"memory\\\":\\\"50Mi\\\"},\\\"requests\\\":{\\\"cpu\\\":\\\"50m\\\",\\\"memory\\\":\\\"50Mi\\\"}}},\\\"hostAliases\\\":[],\\\"image\\\":{\\\"pullPolicy\\\":\\\"IfNotPresent\\\"},\\\"imagePullSecrets\\\":[],\\\"ingress\\\":{\\\"annotations\\\":{},\\\"className\\\":\\\"\\\",\\\"enabled\\\":false,\\\"hosts\\\":[{\\\"host\\\":\\\"chart-example1.local\\\",\\\"pathType\\\":\\\"ImplementationSpecific\\\",\\\"paths\\\":[\\\"/example1\\\"]},{\\\"host\\\":\\\"chart-example2.local\\\",\\\"pathType\\\":\\\"ImplementationSpecific\\\",\\\"paths\\\":[\\\"/example2\\\",\\\"/example2/healthz\\\"]}],\\\"labels\\\":{},\\\"tls\\\":[]},\\\"ingressInternal\\\":{\\\"annotations\\\":{},\\\"className\\\":\\\"\\\",\\\"enabled\\\":false,\\\"hosts\\\":[{\\\"host\\\":\\\"chart-example1.internal\\\",\\\"pathType\\\":\\\"ImplementationSpecific\\\",\\\"paths\\\":[\\\"/example1\\\"]},{\\\"host\\\":\\\"chart-example2.internal\\\",\\\"pathType\\\":\\\"ImplementationSpecific\\\",\\\"paths\\\":[\\\"/example2\\\",\\\"/example2/healthz\\\"]}],\\\"tls\\\":[]},\\\"initContainers\\\":[],\\\"istio\\\":{\\\"authorizationPolicy\\\":{\\\"action\\\":null,\\\"annotations\\\":{},\\\"enabled\\\":false,\\\"labels\\\":{},\\\"provider\\\":{},\\\"rules\\\":[]},\\\"destinationRule\\\":{\\\"annotations\\\":{},\\\"enabled\\\":false,\\\"labels\\\":{},\\\"subsets\\\":[],\\\"trafficPolicy\\\":{}},\\\"enable\\\":false,\\\"gateway\\\":{\\\"annotations\\\":{},\\\"enabled\\\":false,\\\"host\\\":\\\"example.com\\\",\\\"labels\\\":{},\\\"tls\\\":{\\\"enabled\\\":false,\\\"secretName\\\":\\\"secret-name\\\"}},\\\"peerAuthentication\\\":{\\\"annotations\\\":{},\\\"enabled\\\":false,\\\"labels\\\":{},\\\"mtls\\\":{\\\"mode\\\":null},\\\"portLevelMtls\\\":{},\\\"selector\\\":{\\\"enabled\\\":false}},\\\"requestAuthentication\\\":{\\\"annotations\\\":{},\\\"enabled\\\":false,\\\"jwtRules\\\":[],\\\"labels\\\":{},\\\"selector\\\":{\\\"enabled\\\":false}},\\\"virtualService\\\":{\\\"annotations\\\":{},\\\"enabled\\\":false,\\\"gateways\\\":[],\\\"hosts\\\":[],\\\"http\\\":[],\\\"labels\\\":{}}},\\\"kedaAutoscaling\\\":{\\\"advanced\\\":{},\\\"authenticationRef\\\":{},\\\"enabled\\\":false,\\\"envSourceContainerName\\\":\\\"\\\",\\\"maxReplicaCount\\\":2,\\\"minReplicaCount\\\":1,\\\"triggerAuthentication\\\":{\\\"enabled\\\":false,\\\"name\\\":\\\"\\\",\\\"spec\\\":{}},\\\"triggers\\\":[]},\\\"networkPolicy\\\":{\\\"annotations\\\":{},\\\"egress\\\":[],\\\"enabled\\\":false,\\\"ingress\\\":[],\\\"labels\\\":{},\\\"podSelector\\\":{\\\"matchExpressions\\\":[],\\\"matchLabels\\\":{}},\\\"policyTypes\\\":[]},\\\"pauseForSecondsBeforeSwitchActive\\\":30,\\\"podAnnotations\\\":{},\\\"podDisruptionBudget\\\":{},\\\"podLabels\\\":{},\\\"podSecurityContext\\\":{},\\\"prometheus\\\":{\\\"release\\\":\\\"monitoring\\\"},\\\"rawYaml\\\":[],\\\"replicaCount\\\":1,\\\"resources\\\":{\\\"limits\\\":{\\\"cpu\\\":\\\"0.05\\\",\\\"memory\\\":\\\"50Mi\\\"},\\\"requests\\\":{\\\"cpu\\\":\\\"0.01\\\",\\\"memory\\\":\\\"10Mi\\\"}},\\\"restartPolicy\\\":\\\"Always\\\",\\\"rolloutAnnotations\\\":{},\\\"rolloutLabels\\\":{},\\\"secret\\\":{\\\"data\\\":{},\\\"enabled\\\":false},\\\"server\\\":{\\\"deployment\\\":{\\\"image\\\":\\\"\\\",\\\"image_tag\\\":\\\"1-95af053\\\"}},\\\"service\\\":{\\\"annotations\\\":{},\\\"loadBalancerSourceRanges\\\":[],\\\"type\\\":\\\"ClusterIP\\\"},\\\"serviceAccount\\\":{\\\"annotations\\\":{},\\\"create\\\":false,\\\"name\\\":\\\"\\\"},\\\"servicemonitor\\\":{\\\"additionalLabels\\\":{}},\\\"tolerations\\\":[],\\\"topologySpreadConstraints\\\":[],\\\"volumeMounts\\\":[],\\\"volumes\\\":[],\\\"waitForSecondsBeforeScalingDown\\\":30,\\\"winterSoldier\\\":{\\\"action\\\":\\\"sleep\\\",\\\"annotation\\\":{},\\\"apiVersion\\\":\\\"pincher.devtron.ai/v1alpha1\\\",\\\"enabled\\\":false,\\\"fieldSelector\\\":[\\\"AfterTime(AddTime(ParseTime({{metadata.creationTimestamp}}, '2006-01-02T15:04:05Z'), '5m'), Now())\\\"],\\\"labels\\\":{},\\\"targetReplicas\\\":[],\\\"timeRangesWithZone\\\":{\\\"timeRanges\\\":[],\\\"timeZone\\\":\\\"Asia/Kolkata\\\"},\\\"type\\\":\\\"Rollout\\\"}}"}, +// }, +// { +// name: "get values for deployed on envs", +// args: args{ +// ctx: context.Background(), +// request: DeploymentTemplateRequest{ +// Values: "", +// //ValuesAndManifestFlag: Values, +// Type: 3, +// ChartRefId: 1, +// AppId: 1, +// //PipelineConfigOverrideId: 1, +// }, +// }, +// want: DeploymentTemplateResponse{ +// Data: "{\\\"ContainerPort\\\":[{\\\"envoyPort\\\":6969,\\\"idleTimeout\\\":\\\"6969s\\\",\\\"name\\\":\\\"app\\\",\\\"port\\\":6969,\\\"servicePort\\\":69,\\\"supportStreaming\\\":false,\\\"useHTTP2\\\":false}],\\\"EnvVariables\\\":[],\\\"GracePeriod\\\":30,\\\"LivenessProbe\\\":{\\\"Path\\\":\\\"\\\",\\\"command\\\":[],\\\"failureThreshold\\\":3,\\\"httpHeaders\\\":[],\\\"initialDelaySeconds\\\":20,\\\"periodSeconds\\\":10,\\\"port\\\":6969,\\\"scheme\\\":\\\"\\\",\\\"successThreshold\\\":1,\\\"tcp\\\":false,\\\"timeoutSeconds\\\":5},\\\"MaxSurge\\\":1,\\\"MaxUnavailable\\\":0,\\\"MinReadySeconds\\\":60,\\\"ReadinessProbe\\\":{\\\"Path\\\":\\\"\\\",\\\"command\\\":[],\\\"failureThreshold\\\":3,\\\"httpHeaders\\\":[],\\\"initialDelaySeconds\\\":20,\\\"periodSeconds\\\":10,\\\"port\\\":6969,\\\"scheme\\\":\\\"\\\",\\\"successThreshold\\\":1,\\\"tcp\\\":false,\\\"timeoutSeconds\\\":5},\\\"Spec\\\":{\\\"Affinity\\\":{\\\"Key\\\":null,\\\"Values\\\":\\\"nodes\\\",\\\"key\\\":\\\"\\\"}},\\\"StartupProbe\\\":{\\\"Path\\\":\\\"\\\",\\\"command\\\":[],\\\"failureThreshold\\\":3,\\\"httpHeaders\\\":[],\\\"initialDelaySeconds\\\":20,\\\"periodSeconds\\\":10,\\\"port\\\":6969,\\\"successThreshold\\\":1,\\\"tcp\\\":false,\\\"timeoutSeconds\\\":5},\\\"ambassadorMapping\\\":{\\\"ambassadorId\\\":\\\"\\\",\\\"cors\\\":{},\\\"enabled\\\":false,\\\"hostname\\\":\\\"devtron.example.com\\\",\\\"labels\\\":{},\\\"prefix\\\":\\\"/\\\",\\\"retryPolicy\\\":{},\\\"rewrite\\\":\\\"\\\",\\\"tls\\\":{\\\"context\\\":\\\"\\\",\\\"create\\\":false,\\\"hosts\\\":[],\\\"secretName\\\":\\\"\\\"}},\\\"args\\\":{\\\"enabled\\\":false,\\\"value\\\":[\\\"/bin/sh\\\",\\\"-c\\\",\\\"touch /tmp/healthy; sleep 30; rm -rf /tmp/healthy; sleep 600\\\"]},\\\"autoscaling\\\":{\\\"MaxReplicas\\\":2,\\\"MinReplicas\\\":1,\\\"TargetCPUUtilizationPercentage\\\":90,\\\"TargetMemoryUtilizationPercentage\\\":69,\\\"annotations\\\":{},\\\"behavior\\\":{},\\\"enabled\\\":false,\\\"extraMetrics\\\":[],\\\"labels\\\":{}},\\\"command\\\":{\\\"enabled\\\":false,\\\"value\\\":[],\\\"workingDir\\\":{}},\\\"containerSecurityContext\\\":{},\\\"containerSpec\\\":{\\\"lifecycle\\\":{\\\"enabled\\\":false,\\\"postStart\\\":{\\\"httpGet\\\":{\\\"host\\\":\\\"example.com\\\",\\\"path\\\":\\\"/example\\\",\\\"port\\\":90}},\\\"preStop\\\":{\\\"exec\\\":{\\\"command\\\":[\\\"sleep\\\",\\\"10\\\"]}}}},\\\"containers\\\":[],\\\"dbMigrationConfig\\\":{\\\"enabled\\\":false},\\\"envoyproxy\\\":{\\\"configMapName\\\":\\\"\\\",\\\"image\\\":\\\"docker.io/envoyproxy/envoy:v1.16.0\\\",\\\"lifecycle\\\":{},\\\"resources\\\":{\\\"limits\\\":{\\\"cpu\\\":\\\"50m\\\",\\\"memory\\\":\\\"50Mi\\\"},\\\"requests\\\":{\\\"cpu\\\":\\\"50m\\\",\\\"memory\\\":\\\"50Mi\\\"}}},\\\"hostAliases\\\":[],\\\"image\\\":{\\\"pullPolicy\\\":\\\"IfNotPresent\\\"},\\\"imagePullSecrets\\\":[],\\\"ingress\\\":{\\\"annotations\\\":{},\\\"className\\\":\\\"\\\",\\\"enabled\\\":false,\\\"hosts\\\":[{\\\"host\\\":\\\"chart-example1.local\\\",\\\"pathType\\\":\\\"ImplementationSpecific\\\",\\\"paths\\\":[\\\"/example1\\\"]},{\\\"host\\\":\\\"chart-example2.local\\\",\\\"pathType\\\":\\\"ImplementationSpecific\\\",\\\"paths\\\":[\\\"/example2\\\",\\\"/example2/healthz\\\"]}],\\\"labels\\\":{},\\\"tls\\\":[]},\\\"ingressInternal\\\":{\\\"annotations\\\":{},\\\"className\\\":\\\"\\\",\\\"enabled\\\":false,\\\"hosts\\\":[{\\\"host\\\":\\\"chart-example1.internal\\\",\\\"pathType\\\":\\\"ImplementationSpecific\\\",\\\"paths\\\":[\\\"/example1\\\"]},{\\\"host\\\":\\\"chart-example2.internal\\\",\\\"pathType\\\":\\\"ImplementationSpecific\\\",\\\"paths\\\":[\\\"/example2\\\",\\\"/example2/healthz\\\"]}],\\\"tls\\\":[]},\\\"initContainers\\\":[],\\\"istio\\\":{\\\"authorizationPolicy\\\":{\\\"action\\\":null,\\\"annotations\\\":{},\\\"enabled\\\":false,\\\"labels\\\":{},\\\"provider\\\":{},\\\"rules\\\":[]},\\\"destinationRule\\\":{\\\"annotations\\\":{},\\\"enabled\\\":false,\\\"labels\\\":{},\\\"subsets\\\":[],\\\"trafficPolicy\\\":{}},\\\"enable\\\":false,\\\"gateway\\\":{\\\"annotations\\\":{},\\\"enabled\\\":false,\\\"host\\\":\\\"example.com\\\",\\\"labels\\\":{},\\\"tls\\\":{\\\"enabled\\\":false,\\\"secretName\\\":\\\"secret-name\\\"}},\\\"peerAuthentication\\\":{\\\"annotations\\\":{},\\\"enabled\\\":false,\\\"labels\\\":{},\\\"mtls\\\":{\\\"mode\\\":null},\\\"portLevelMtls\\\":{},\\\"selector\\\":{\\\"enabled\\\":false}},\\\"requestAuthentication\\\":{\\\"annotations\\\":{},\\\"enabled\\\":false,\\\"jwtRules\\\":[],\\\"labels\\\":{},\\\"selector\\\":{\\\"enabled\\\":false}},\\\"virtualService\\\":{\\\"annotations\\\":{},\\\"enabled\\\":false,\\\"gateways\\\":[],\\\"hosts\\\":[],\\\"http\\\":[],\\\"labels\\\":{}}},\\\"kedaAutoscaling\\\":{\\\"advanced\\\":{},\\\"authenticationRef\\\":{},\\\"enabled\\\":false,\\\"envSourceContainerName\\\":\\\"\\\",\\\"maxReplicaCount\\\":2,\\\"minReplicaCount\\\":1,\\\"triggerAuthentication\\\":{\\\"enabled\\\":false,\\\"name\\\":\\\"\\\",\\\"spec\\\":{}},\\\"triggers\\\":[]},\\\"networkPolicy\\\":{\\\"annotations\\\":{},\\\"egress\\\":[],\\\"enabled\\\":false,\\\"ingress\\\":[],\\\"labels\\\":{},\\\"podSelector\\\":{\\\"matchExpressions\\\":[],\\\"matchLabels\\\":{}},\\\"policyTypes\\\":[]},\\\"pauseForSecondsBeforeSwitchActive\\\":30,\\\"podAnnotations\\\":{},\\\"podDisruptionBudget\\\":{},\\\"podLabels\\\":{},\\\"podSecurityContext\\\":{},\\\"prometheus\\\":{\\\"release\\\":\\\"monitoring\\\"},\\\"rawYaml\\\":[],\\\"replicaCount\\\":1,\\\"resources\\\":{\\\"limits\\\":{\\\"cpu\\\":\\\"0.05\\\",\\\"memory\\\":\\\"50Mi\\\"},\\\"requests\\\":{\\\"cpu\\\":\\\"0.01\\\",\\\"memory\\\":\\\"10Mi\\\"}},\\\"restartPolicy\\\":\\\"Always\\\",\\\"rolloutAnnotations\\\":{},\\\"rolloutLabels\\\":{},\\\"secret\\\":{\\\"data\\\":{},\\\"enabled\\\":false},\\\"server\\\":{\\\"deployment\\\":{\\\"image\\\":\\\"\\\",\\\"image_tag\\\":\\\"1-95af053\\\"}},\\\"service\\\":{\\\"annotations\\\":{},\\\"loadBalancerSourceRanges\\\":[],\\\"type\\\":\\\"ClusterIP\\\"},\\\"serviceAccount\\\":{\\\"annotations\\\":{},\\\"create\\\":false,\\\"name\\\":\\\"\\\"},\\\"servicemonitor\\\":{\\\"additionalLabels\\\":{}},\\\"tolerations\\\":[],\\\"topologySpreadConstraints\\\":[],\\\"volumeMounts\\\":[],\\\"volumes\\\":[],\\\"waitForSecondsBeforeScalingDown\\\":30,\\\"winterSoldier\\\":{\\\"action\\\":\\\"sleep\\\",\\\"annotation\\\":{},\\\"apiVersion\\\":\\\"pincher.devtron.ai/v1alpha1\\\",\\\"enabled\\\":false,\\\"fieldSelector\\\":[\\\"AfterTime(AddTime(ParseTime({{metadata.creationTimestamp}}, '2006-01-02T15:04:05Z'), '5m'), Now())\\\"],\\\"labels\\\":{},\\\"targetReplicas\\\":[],\\\"timeRangesWithZone\\\":{\\\"timeRanges\\\":[],\\\"timeZone\\\":\\\"Asia/Kolkata\\\"},\\\"type\\\":\\\"Rollout\\\"}}"}, +// }, +// { +// name: "get error for deployed on envs", +// args: args{ +// ctx: context.Background(), +// request: DeploymentTemplateRequest{ +// Values: "", +// //ValuesAndManifestFlag: Values, +// Type: 3, +// ChartRefId: 1, +// AppId: 1, +// //PipelineConfigOverrideId: 1, +// }, +// }, +// wantErr: errors.New("error in getting values"), +// want: DeploymentTemplateResponse{ +// Data: "{\\\"ContainerPort\\\":[{\\\"envoyPort\\\":6969,\\\"idleTimeout\\\":\\\"6969s\\\",\\\"name\\\":\\\"app\\\",\\\"port\\\":6969,\\\"servicePort\\\":69,\\\"supportStreaming\\\":false,\\\"useHTTP2\\\":false}],\\\"EnvVariables\\\":[],\\\"GracePeriod\\\":30,\\\"LivenessProbe\\\":{\\\"Path\\\":\\\"\\\",\\\"command\\\":[],\\\"failureThreshold\\\":3,\\\"httpHeaders\\\":[],\\\"initialDelaySeconds\\\":20,\\\"periodSeconds\\\":10,\\\"port\\\":6969,\\\"scheme\\\":\\\"\\\",\\\"successThreshold\\\":1,\\\"tcp\\\":false,\\\"timeoutSeconds\\\":5},\\\"MaxSurge\\\":1,\\\"MaxUnavailable\\\":0,\\\"MinReadySeconds\\\":60,\\\"ReadinessProbe\\\":{\\\"Path\\\":\\\"\\\",\\\"command\\\":[],\\\"failureThreshold\\\":3,\\\"httpHeaders\\\":[],\\\"initialDelaySeconds\\\":20,\\\"periodSeconds\\\":10,\\\"port\\\":6969,\\\"scheme\\\":\\\"\\\",\\\"successThreshold\\\":1,\\\"tcp\\\":false,\\\"timeoutSeconds\\\":5},\\\"Spec\\\":{\\\"Affinity\\\":{\\\"Key\\\":null,\\\"Values\\\":\\\"nodes\\\",\\\"key\\\":\\\"\\\"}},\\\"StartupProbe\\\":{\\\"Path\\\":\\\"\\\",\\\"command\\\":[],\\\"failureThreshold\\\":3,\\\"httpHeaders\\\":[],\\\"initialDelaySeconds\\\":20,\\\"periodSeconds\\\":10,\\\"port\\\":6969,\\\"successThreshold\\\":1,\\\"tcp\\\":false,\\\"timeoutSeconds\\\":5},\\\"ambassadorMapping\\\":{\\\"ambassadorId\\\":\\\"\\\",\\\"cors\\\":{},\\\"enabled\\\":false,\\\"hostname\\\":\\\"devtron.example.com\\\",\\\"labels\\\":{},\\\"prefix\\\":\\\"/\\\",\\\"retryPolicy\\\":{},\\\"rewrite\\\":\\\"\\\",\\\"tls\\\":{\\\"context\\\":\\\"\\\",\\\"create\\\":false,\\\"hosts\\\":[],\\\"secretName\\\":\\\"\\\"}},\\\"args\\\":{\\\"enabled\\\":false,\\\"value\\\":[\\\"/bin/sh\\\",\\\"-c\\\",\\\"touch /tmp/healthy; sleep 30; rm -rf /tmp/healthy; sleep 600\\\"]},\\\"autoscaling\\\":{\\\"MaxReplicas\\\":2,\\\"MinReplicas\\\":1,\\\"TargetCPUUtilizationPercentage\\\":90,\\\"TargetMemoryUtilizationPercentage\\\":69,\\\"annotations\\\":{},\\\"behavior\\\":{},\\\"enabled\\\":false,\\\"extraMetrics\\\":[],\\\"labels\\\":{}},\\\"command\\\":{\\\"enabled\\\":false,\\\"value\\\":[],\\\"workingDir\\\":{}},\\\"containerSecurityContext\\\":{},\\\"containerSpec\\\":{\\\"lifecycle\\\":{\\\"enabled\\\":false,\\\"postStart\\\":{\\\"httpGet\\\":{\\\"host\\\":\\\"example.com\\\",\\\"path\\\":\\\"/example\\\",\\\"port\\\":90}},\\\"preStop\\\":{\\\"exec\\\":{\\\"command\\\":[\\\"sleep\\\",\\\"10\\\"]}}}},\\\"containers\\\":[],\\\"dbMigrationConfig\\\":{\\\"enabled\\\":false},\\\"envoyproxy\\\":{\\\"configMapName\\\":\\\"\\\",\\\"image\\\":\\\"docker.io/envoyproxy/envoy:v1.16.0\\\",\\\"lifecycle\\\":{},\\\"resources\\\":{\\\"limits\\\":{\\\"cpu\\\":\\\"50m\\\",\\\"memory\\\":\\\"50Mi\\\"},\\\"requests\\\":{\\\"cpu\\\":\\\"50m\\\",\\\"memory\\\":\\\"50Mi\\\"}}},\\\"hostAliases\\\":[],\\\"image\\\":{\\\"pullPolicy\\\":\\\"IfNotPresent\\\"},\\\"imagePullSecrets\\\":[],\\\"ingress\\\":{\\\"annotations\\\":{},\\\"className\\\":\\\"\\\",\\\"enabled\\\":false,\\\"hosts\\\":[{\\\"host\\\":\\\"chart-example1.local\\\",\\\"pathType\\\":\\\"ImplementationSpecific\\\",\\\"paths\\\":[\\\"/example1\\\"]},{\\\"host\\\":\\\"chart-example2.local\\\",\\\"pathType\\\":\\\"ImplementationSpecific\\\",\\\"paths\\\":[\\\"/example2\\\",\\\"/example2/healthz\\\"]}],\\\"labels\\\":{},\\\"tls\\\":[]},\\\"ingressInternal\\\":{\\\"annotations\\\":{},\\\"className\\\":\\\"\\\",\\\"enabled\\\":false,\\\"hosts\\\":[{\\\"host\\\":\\\"chart-example1.internal\\\",\\\"pathType\\\":\\\"ImplementationSpecific\\\",\\\"paths\\\":[\\\"/example1\\\"]},{\\\"host\\\":\\\"chart-example2.internal\\\",\\\"pathType\\\":\\\"ImplementationSpecific\\\",\\\"paths\\\":[\\\"/example2\\\",\\\"/example2/healthz\\\"]}],\\\"tls\\\":[]},\\\"initContainers\\\":[],\\\"istio\\\":{\\\"authorizationPolicy\\\":{\\\"action\\\":null,\\\"annotations\\\":{},\\\"enabled\\\":false,\\\"labels\\\":{},\\\"provider\\\":{},\\\"rules\\\":[]},\\\"destinationRule\\\":{\\\"annotations\\\":{},\\\"enabled\\\":false,\\\"labels\\\":{},\\\"subsets\\\":[],\\\"trafficPolicy\\\":{}},\\\"enable\\\":false,\\\"gateway\\\":{\\\"annotations\\\":{},\\\"enabled\\\":false,\\\"host\\\":\\\"example.com\\\",\\\"labels\\\":{},\\\"tls\\\":{\\\"enabled\\\":false,\\\"secretName\\\":\\\"secret-name\\\"}},\\\"peerAuthentication\\\":{\\\"annotations\\\":{},\\\"enabled\\\":false,\\\"labels\\\":{},\\\"mtls\\\":{\\\"mode\\\":null},\\\"portLevelMtls\\\":{},\\\"selector\\\":{\\\"enabled\\\":false}},\\\"requestAuthentication\\\":{\\\"annotations\\\":{},\\\"enabled\\\":false,\\\"jwtRules\\\":[],\\\"labels\\\":{},\\\"selector\\\":{\\\"enabled\\\":false}},\\\"virtualService\\\":{\\\"annotations\\\":{},\\\"enabled\\\":false,\\\"gateways\\\":[],\\\"hosts\\\":[],\\\"http\\\":[],\\\"labels\\\":{}}},\\\"kedaAutoscaling\\\":{\\\"advanced\\\":{},\\\"authenticationRef\\\":{},\\\"enabled\\\":false,\\\"envSourceContainerName\\\":\\\"\\\",\\\"maxReplicaCount\\\":2,\\\"minReplicaCount\\\":1,\\\"triggerAuthentication\\\":{\\\"enabled\\\":false,\\\"name\\\":\\\"\\\",\\\"spec\\\":{}},\\\"triggers\\\":[]},\\\"networkPolicy\\\":{\\\"annotations\\\":{},\\\"egress\\\":[],\\\"enabled\\\":false,\\\"ingress\\\":[],\\\"labels\\\":{},\\\"podSelector\\\":{\\\"matchExpressions\\\":[],\\\"matchLabels\\\":{}},\\\"policyTypes\\\":[]},\\\"pauseForSecondsBeforeSwitchActive\\\":30,\\\"podAnnotations\\\":{},\\\"podDisruptionBudget\\\":{},\\\"podLabels\\\":{},\\\"podSecurityContext\\\":{},\\\"prometheus\\\":{\\\"release\\\":\\\"monitoring\\\"},\\\"rawYaml\\\":[],\\\"replicaCount\\\":1,\\\"resources\\\":{\\\"limits\\\":{\\\"cpu\\\":\\\"0.05\\\",\\\"memory\\\":\\\"50Mi\\\"},\\\"requests\\\":{\\\"cpu\\\":\\\"0.01\\\",\\\"memory\\\":\\\"10Mi\\\"}},\\\"restartPolicy\\\":\\\"Always\\\",\\\"rolloutAnnotations\\\":{},\\\"rolloutLabels\\\":{},\\\"secret\\\":{\\\"data\\\":{},\\\"enabled\\\":false},\\\"server\\\":{\\\"deployment\\\":{\\\"image\\\":\\\"\\\",\\\"image_tag\\\":\\\"1-95af053\\\"}},\\\"service\\\":{\\\"annotations\\\":{},\\\"loadBalancerSourceRanges\\\":[],\\\"type\\\":\\\"ClusterIP\\\"},\\\"serviceAccount\\\":{\\\"annotations\\\":{},\\\"create\\\":false,\\\"name\\\":\\\"\\\"},\\\"servicemonitor\\\":{\\\"additionalLabels\\\":{}},\\\"tolerations\\\":[],\\\"topologySpreadConstraints\\\":[],\\\"volumeMounts\\\":[],\\\"volumes\\\":[],\\\"waitForSecondsBeforeScalingDown\\\":30,\\\"winterSoldier\\\":{\\\"action\\\":\\\"sleep\\\",\\\"annotation\\\":{},\\\"apiVersion\\\":\\\"pincher.devtron.ai/v1alpha1\\\",\\\"enabled\\\":false,\\\"fieldSelector\\\":[\\\"AfterTime(AddTime(ParseTime({{metadata.creationTimestamp}}, '2006-01-02T15:04:05Z'), '5m'), Now())\\\"],\\\"labels\\\":{},\\\"targetReplicas\\\":[],\\\"timeRangesWithZone\\\":{\\\"timeRanges\\\":[],\\\"timeZone\\\":\\\"Asia/Kolkata\\\"},\\\"type\\\":\\\"Rollout\\\"}}"}, +// }, +// } +// for _, tt := range tests { +// t.Run(tt.name, func(t *testing.T) { +// impl, chartService, _, deploymentTemplateRepository, chartRepository, _, _, _ := InitEventSimpleFactoryImpl(t) +// if tt.name == "get values for base charts" { +// chartService.On("GetAppOverrideForDefaultTemplate", tt.args.request.ChartRefId).Return(myMap, myString, nil) +// } +// if tt.name == "get values for published on other envs" { +// chartRepository.On("FindLatestChartForAppByAppId", tt.args.request.AppId).Return(chart, nil) +// } +// +// if tt.name == "get error for published on other envs" { +// chartRepository.On("FindLatestChartForAppByAppId", tt.args.request.AppId).Return(nil, errors.New("error in getting chart")) +// } +// +// if tt.name == "get values for deployed on envs" { +// deploymentTemplateRepository.On("FetchPipelineOverrideValues", tt.args.request.PipelineConfigOverrideId).Return(myString, nil) +// } +// +// if tt.name == "get error for deployed on envs" { +// deploymentTemplateRepository.On("FetchPipelineOverrideValues", tt.args.request.PipelineConfigOverrideId).Return(myString, errors.New("error in getting values")) +// } +// +// got, err := impl.GetDeploymentTemplate(tt.args.ctx, tt.args.request) +// assert.Equal(t, tt.wantErr, err) +// if err == nil { +// assert.Equal(t, got.Data, tt.want.Data) +// } +// }) +// } +//} +// +//func TestDeploymentTemplateServiceImpl_GetManifest(t *testing.T) { +// refChart := "refChart" +// template := "template" +// version := "version" +// myString := "myString" +// var chartBytes []byte +// +// t.Run("TestErrorInGettingRefChart", func(t *testing.T) { +// impl, chartService, _, _, _, _, _, _ := InitEventSimpleFactoryImpl(t) +// valuesYaml := "{\\\"ContainerPort\\\":[{\\\"envoyPort\\\":6961,\\\"idleTimeout\\\":\\\"6969s\\\",\\\"name\\\":\\\"app\\\",\\\"port\\\":6969,\\\"servicePort\\\":69,\\\"supportStreaming\\\":false,\\\"useHTTP2\\\":false}],\\\"EnvVariables\\\":[],\\\"GracePeriod\\\":30,\\\"LivenessProbe\\\":{\\\"Path\\\":\\\"\\\",\\\"command\\\":[],\\\"failureThreshold\\\":3,\\\"httpHeaders\\\":[],\\\"initialDelaySeconds\\\":20,\\\"periodSeconds\\\":10,\\\"port\\\":6969,\\\"scheme\\\":\\\"\\\",\\\"successThreshold\\\":1,\\\"tcp\\\":false,\\\"timeoutSeconds\\\":5},\\\"MaxSurge\\\":1,\\\"MaxUnavailable\\\":0,\\\"MinReadySeconds\\\":60,\\\"ReadinessProbe\\\":{\\\"Path\\\":\\\"\\\",\\\"command\\\":[],\\\"failureThreshold\\\":3,\\\"httpHeaders\\\":[],\\\"initialDelaySeconds\\\":20,\\\"periodSeconds\\\":10,\\\"port\\\":6969,\\\"scheme\\\":\\\"\\\",\\\"successThreshold\\\":1,\\\"tcp\\\":false,\\\"timeoutSeconds\\\":5},\\\"Spec\\\":{\\\"Affinity\\\":{\\\"Key\\\":null,\\\"Values\\\":\\\"nodes\\\",\\\"key\\\":\\\"\\\"}},\\\"StartupProbe\\\":{\\\"Path\\\":\\\"\\\",\\\"command\\\":[],\\\"failureThreshold\\\":3,\\\"httpHeaders\\\":[],\\\"initialDelaySeconds\\\":20,\\\"periodSeconds\\\":10,\\\"port\\\":6969,\\\"successThreshold\\\":1,\\\"tcp\\\":false,\\\"timeoutSeconds\\\":5},\\\"ambassadorMapping\\\":{\\\"ambassadorId\\\":\\\"\\\",\\\"cors\\\":{},\\\"enabled\\\":false,\\\"hostname\\\":\\\"devtron.example.com\\\",\\\"labels\\\":{},\\\"prefix\\\":\\\"/\\\",\\\"retryPolicy\\\":{},\\\"rewrite\\\":\\\"\\\",\\\"tls\\\":{\\\"context\\\":\\\"\\\",\\\"create\\\":false,\\\"hosts\\\":[],\\\"secretName\\\":\\\"\\\"}},\\\"args\\\":{\\\"enabled\\\":false,\\\"value\\\":[\\\"/bin/sh\\\",\\\"-c\\\",\\\"touch /tmp/healthy; sleep 30; rm -rf /tmp/healthy; sleep 600\\\"]},\\\"autoscaling\\\":{\\\"MaxReplicas\\\":2,\\\"MinReplicas\\\":1,\\\"TargetCPUUtilizationPercentage\\\":90,\\\"TargetMemoryUtilizationPercentage\\\":69,\\\"annotations\\\":{},\\\"behavior\\\":{},\\\"enabled\\\":false,\\\"extraMetrics\\\":[],\\\"labels\\\":{}},\\\"command\\\":{\\\"enabled\\\":false,\\\"value\\\":[],\\\"workingDir\\\":{}},\\\"containerSecurityContext\\\":{},\\\"containerSpec\\\":{\\\"lifecycle\\\":{\\\"enabled\\\":false,\\\"postStart\\\":{\\\"httpGet\\\":{\\\"host\\\":\\\"example.com\\\",\\\"path\\\":\\\"/example\\\",\\\"port\\\":90}},\\\"preStop\\\":{\\\"exec\\\":{\\\"command\\\":[\\\"sleep\\\",\\\"10\\\"]}}}},\\\"containers\\\":[],\\\"dbMigrationConfig\\\":{\\\"enabled\\\":false},\\\"envoyproxy\\\":{\\\"configMapName\\\":\\\"\\\",\\\"image\\\":\\\"docker.io/envoyproxy/envoy:v1.16.0\\\",\\\"lifecycle\\\":{},\\\"resources\\\":{\\\"limits\\\":{\\\"cpu\\\":\\\"50m\\\",\\\"memory\\\":\\\"50Mi\\\"},\\\"requests\\\":{\\\"cpu\\\":\\\"50m\\\",\\\"memory\\\":\\\"50Mi\\\"}}},\\\"hostAliases\\\":[],\\\"image\\\":{\\\"pullPolicy\\\":\\\"IfNotPresent\\\"},\\\"imagePullSecrets\\\":[],\\\"ingress\\\":{\\\"annotations\\\":{},\\\"className\\\":\\\"\\\",\\\"enabled\\\":false,\\\"hosts\\\":[{\\\"host\\\":\\\"chart-example1.local\\\",\\\"pathType\\\":\\\"ImplementationSpecific\\\",\\\"paths\\\":[\\\"/example1\\\"]},{\\\"host\\\":\\\"chart-example2.local\\\",\\\"pathType\\\":\\\"ImplementationSpecific\\\",\\\"paths\\\":[\\\"/example2\\\",\\\"/example2/healthz\\\"]}],\\\"labels\\\":{},\\\"tls\\\":[]},\\\"ingressInternal\\\":{\\\"annotations\\\":{},\\\"className\\\":\\\"\\\",\\\"enabled\\\":false,\\\"hosts\\\":[{\\\"host\\\":\\\"chart-example1.internal\\\",\\\"pathType\\\":\\\"ImplementationSpecific\\\",\\\"paths\\\":[\\\"/example1\\\"]},{\\\"host\\\":\\\"chart-example2.internal\\\",\\\"pathType\\\":\\\"ImplementationSpecific\\\",\\\"paths\\\":[\\\"/example2\\\",\\\"/example2/healthz\\\"]}],\\\"tls\\\":[]},\\\"initContainers\\\":[],\\\"istio\\\":{\\\"authorizationPolicy\\\":{\\\"action\\\":null,\\\"annotations\\\":{},\\\"enabled\\\":false,\\\"labels\\\":{},\\\"provider\\\":{},\\\"rules\\\":[]},\\\"destinationRule\\\":{\\\"annotations\\\":{},\\\"enabled\\\":false,\\\"labels\\\":{},\\\"subsets\\\":[],\\\"trafficPolicy\\\":{}},\\\"enable\\\":false,\\\"gateway\\\":{\\\"annotations\\\":{},\\\"enabled\\\":false,\\\"host\\\":\\\"example.com\\\",\\\"labels\\\":{},\\\"tls\\\":{\\\"enabled\\\":false,\\\"secretName\\\":\\\"secret-name\\\"}},\\\"peerAuthentication\\\":{\\\"annotations\\\":{},\\\"enabled\\\":false,\\\"labels\\\":{},\\\"mtls\\\":{\\\"mode\\\":null},\\\"portLevelMtls\\\":{},\\\"selector\\\":{\\\"enabled\\\":false}},\\\"requestAuthentication\\\":{\\\"annotations\\\":{},\\\"enabled\\\":false,\\\"jwtRules\\\":[],\\\"labels\\\":{},\\\"selector\\\":{\\\"enabled\\\":false}},\\\"virtualService\\\":{\\\"annotations\\\":{},\\\"enabled\\\":false,\\\"gateways\\\":[],\\\"hosts\\\":[],\\\"http\\\":[],\\\"labels\\\":{}}},\\\"kedaAutoscaling\\\":{\\\"advanced\\\":{},\\\"authenticationRef\\\":{},\\\"enabled\\\":false,\\\"envSourceContainerName\\\":\\\"\\\",\\\"maxReplicaCount\\\":2,\\\"minReplicaCount\\\":1,\\\"triggerAuthentication\\\":{\\\"enabled\\\":false,\\\"name\\\":\\\"\\\",\\\"spec\\\":{}},\\\"triggers\\\":[]},\\\"networkPolicy\\\":{\\\"annotations\\\":{},\\\"egress\\\":[],\\\"enabled\\\":false,\\\"ingress\\\":[],\\\"labels\\\":{},\\\"podSelector\\\":{\\\"matchExpressions\\\":[],\\\"matchLabels\\\":{}},\\\"policyTypes\\\":[]},\\\"pauseForSecondsBeforeSwitchActive\\\":30,\\\"podAnnotations\\\":{},\\\"podDisruptionBudget\\\":{},\\\"podLabels\\\":{},\\\"podSecurityContext\\\":{},\\\"prometheus\\\":{\\\"release\\\":\\\"monitoring\\\"},\\\"rawYaml\\\":[],\\\"replicaCount\\\":1,\\\"resources\\\":{\\\"limits\\\":{\\\"cpu\\\":\\\"0.05\\\",\\\"memory\\\":\\\"50Mi\\\"},\\\"requests\\\":{\\\"cpu\\\":\\\"0.01\\\",\\\"memory\\\":\\\"10Mi\\\"}},\\\"restartPolicy\\\":\\\"Always\\\",\\\"rolloutAnnotations\\\":{},\\\"rolloutLabels\\\":{},\\\"secret\\\":{\\\"data\\\":{},\\\"enabled\\\":false},\\\"server\\\":{\\\"deployment\\\":{\\\"image\\\":\\\"\\\",\\\"image_tag\\\":\\\"1-95af053\\\"}},\\\"service\\\":{\\\"annotations\\\":{},\\\"loadBalancerSourceRanges\\\":[],\\\"type\\\":\\\"ClusterIP\\\"},\\\"serviceAccount\\\":{\\\"annotations\\\":{},\\\"create\\\":false,\\\"name\\\":\\\"\\\"},\\\"servicemonitor\\\":{\\\"additionalLabels\\\":{}},\\\"tolerations\\\":[],\\\"topologySpreadConstraints\\\":[],\\\"volumeMounts\\\":[],\\\"volumes\\\":[],\\\"waitForSecondsBeforeScalingDown\\\":30,\\\"winterSoldier\\\":{\\\"action\\\":\\\"sleep\\\",\\\"annotation\\\":{},\\\"apiVersion\\\":\\\"pincher.devtron.ai/v1alpha1\\\",\\\"enabled\\\":false,\\\"fieldSelector\\\":[\\\"AfterTime(AddTime(ParseTime({{metadata.creationTimestamp}}, '2006-01-02T15:04:05Z'), '5m'), Now())\\\"],\\\"labels\\\":{},\\\"targetReplicas\\\":[],\\\"timeRangesWithZone\\\":{\\\"timeRanges\\\":[],\\\"timeZone\\\":\\\"Asia/Kolkata\\\"},\\\"type\\\":\\\"Rollout\\\"}}" +// ctx := context.Background() +// request := chart.TemplateRequest{ChartRefId: 1} +// wantErr := errors.New("error in getting refChart") +// chartService.On("GetRefChart", request).Return(refChart, template, wantErr, version, myString) +// _, gotErr := impl.GetManifest(ctx, 1, valuesYaml) +// assert.Equal(t, gotErr, wantErr) +// }) +// +// t.Run("TestManifestGeneration_Success", func(t *testing.T) { +// impl, chartService, _, _, _, chartTemplateServiceImpl, helmAppService, helmAppClient := InitEventSimpleFactoryImpl(t) +// valuesYaml := "{\\\"ContainerPort\\\":[{\\\"envoyPort\\\":6962,\\\"idleTimeout\\\":\\\"6969s\\\",\\\"name\\\":\\\"app\\\",\\\"port\\\":6969,\\\"servicePort\\\":69,\\\"supportStreaming\\\":false,\\\"useHTTP2\\\":false}],\\\"EnvVariables\\\":[],\\\"GracePeriod\\\":30,\\\"LivenessProbe\\\":{\\\"Path\\\":\\\"\\\",\\\"command\\\":[],\\\"failureThreshold\\\":3,\\\"httpHeaders\\\":[],\\\"initialDelaySeconds\\\":20,\\\"periodSeconds\\\":10,\\\"port\\\":6969,\\\"scheme\\\":\\\"\\\",\\\"successThreshold\\\":1,\\\"tcp\\\":false,\\\"timeoutSeconds\\\":5},\\\"MaxSurge\\\":1,\\\"MaxUnavailable\\\":0,\\\"MinReadySeconds\\\":60,\\\"ReadinessProbe\\\":{\\\"Path\\\":\\\"\\\",\\\"command\\\":[],\\\"failureThreshold\\\":3,\\\"httpHeaders\\\":[],\\\"initialDelaySeconds\\\":20,\\\"periodSeconds\\\":10,\\\"port\\\":6969,\\\"scheme\\\":\\\"\\\",\\\"successThreshold\\\":1,\\\"tcp\\\":false,\\\"timeoutSeconds\\\":5},\\\"Spec\\\":{\\\"Affinity\\\":{\\\"Key\\\":null,\\\"Values\\\":\\\"nodes\\\",\\\"key\\\":\\\"\\\"}},\\\"StartupProbe\\\":{\\\"Path\\\":\\\"\\\",\\\"command\\\":[],\\\"failureThreshold\\\":3,\\\"httpHeaders\\\":[],\\\"initialDelaySeconds\\\":20,\\\"periodSeconds\\\":10,\\\"port\\\":6969,\\\"successThreshold\\\":1,\\\"tcp\\\":false,\\\"timeoutSeconds\\\":5},\\\"ambassadorMapping\\\":{\\\"ambassadorId\\\":\\\"\\\",\\\"cors\\\":{},\\\"enabled\\\":false,\\\"hostname\\\":\\\"devtron.example.com\\\",\\\"labels\\\":{},\\\"prefix\\\":\\\"/\\\",\\\"retryPolicy\\\":{},\\\"rewrite\\\":\\\"\\\",\\\"tls\\\":{\\\"context\\\":\\\"\\\",\\\"create\\\":false,\\\"hosts\\\":[],\\\"secretName\\\":\\\"\\\"}},\\\"args\\\":{\\\"enabled\\\":false,\\\"value\\\":[\\\"/bin/sh\\\",\\\"-c\\\",\\\"touch /tmp/healthy; sleep 30; rm -rf /tmp/healthy; sleep 600\\\"]},\\\"autoscaling\\\":{\\\"MaxReplicas\\\":2,\\\"MinReplicas\\\":1,\\\"TargetCPUUtilizationPercentage\\\":90,\\\"TargetMemoryUtilizationPercentage\\\":69,\\\"annotations\\\":{},\\\"behavior\\\":{},\\\"enabled\\\":false,\\\"extraMetrics\\\":[],\\\"labels\\\":{}},\\\"command\\\":{\\\"enabled\\\":false,\\\"value\\\":[],\\\"workingDir\\\":{}},\\\"containerSecurityContext\\\":{},\\\"containerSpec\\\":{\\\"lifecycle\\\":{\\\"enabled\\\":false,\\\"postStart\\\":{\\\"httpGet\\\":{\\\"host\\\":\\\"example.com\\\",\\\"path\\\":\\\"/example\\\",\\\"port\\\":90}},\\\"preStop\\\":{\\\"exec\\\":{\\\"command\\\":[\\\"sleep\\\",\\\"10\\\"]}}}},\\\"containers\\\":[],\\\"dbMigrationConfig\\\":{\\\"enabled\\\":false},\\\"envoyproxy\\\":{\\\"configMapName\\\":\\\"\\\",\\\"image\\\":\\\"docker.io/envoyproxy/envoy:v1.16.0\\\",\\\"lifecycle\\\":{},\\\"resources\\\":{\\\"limits\\\":{\\\"cpu\\\":\\\"50m\\\",\\\"memory\\\":\\\"50Mi\\\"},\\\"requests\\\":{\\\"cpu\\\":\\\"50m\\\",\\\"memory\\\":\\\"50Mi\\\"}}},\\\"hostAliases\\\":[],\\\"image\\\":{\\\"pullPolicy\\\":\\\"IfNotPresent\\\"},\\\"imagePullSecrets\\\":[],\\\"ingress\\\":{\\\"annotations\\\":{},\\\"className\\\":\\\"\\\",\\\"enabled\\\":false,\\\"hosts\\\":[{\\\"host\\\":\\\"chart-example1.local\\\",\\\"pathType\\\":\\\"ImplementationSpecific\\\",\\\"paths\\\":[\\\"/example1\\\"]},{\\\"host\\\":\\\"chart-example2.local\\\",\\\"pathType\\\":\\\"ImplementationSpecific\\\",\\\"paths\\\":[\\\"/example2\\\",\\\"/example2/healthz\\\"]}],\\\"labels\\\":{},\\\"tls\\\":[]},\\\"ingressInternal\\\":{\\\"annotations\\\":{},\\\"className\\\":\\\"\\\",\\\"enabled\\\":false,\\\"hosts\\\":[{\\\"host\\\":\\\"chart-example1.internal\\\",\\\"pathType\\\":\\\"ImplementationSpecific\\\",\\\"paths\\\":[\\\"/example1\\\"]},{\\\"host\\\":\\\"chart-example2.internal\\\",\\\"pathType\\\":\\\"ImplementationSpecific\\\",\\\"paths\\\":[\\\"/example2\\\",\\\"/example2/healthz\\\"]}],\\\"tls\\\":[]},\\\"initContainers\\\":[],\\\"istio\\\":{\\\"authorizationPolicy\\\":{\\\"action\\\":null,\\\"annotations\\\":{},\\\"enabled\\\":false,\\\"labels\\\":{},\\\"provider\\\":{},\\\"rules\\\":[]},\\\"destinationRule\\\":{\\\"annotations\\\":{},\\\"enabled\\\":false,\\\"labels\\\":{},\\\"subsets\\\":[],\\\"trafficPolicy\\\":{}},\\\"enable\\\":false,\\\"gateway\\\":{\\\"annotations\\\":{},\\\"enabled\\\":false,\\\"host\\\":\\\"example.com\\\",\\\"labels\\\":{},\\\"tls\\\":{\\\"enabled\\\":false,\\\"secretName\\\":\\\"secret-name\\\"}},\\\"peerAuthentication\\\":{\\\"annotations\\\":{},\\\"enabled\\\":false,\\\"labels\\\":{},\\\"mtls\\\":{\\\"mode\\\":null},\\\"portLevelMtls\\\":{},\\\"selector\\\":{\\\"enabled\\\":false}},\\\"requestAuthentication\\\":{\\\"annotations\\\":{},\\\"enabled\\\":false,\\\"jwtRules\\\":[],\\\"labels\\\":{},\\\"selector\\\":{\\\"enabled\\\":false}},\\\"virtualService\\\":{\\\"annotations\\\":{},\\\"enabled\\\":false,\\\"gateways\\\":[],\\\"hosts\\\":[],\\\"http\\\":[],\\\"labels\\\":{}}},\\\"kedaAutoscaling\\\":{\\\"advanced\\\":{},\\\"authenticationRef\\\":{},\\\"enabled\\\":false,\\\"envSourceContainerName\\\":\\\"\\\",\\\"maxReplicaCount\\\":2,\\\"minReplicaCount\\\":1,\\\"triggerAuthentication\\\":{\\\"enabled\\\":false,\\\"name\\\":\\\"\\\",\\\"spec\\\":{}},\\\"triggers\\\":[]},\\\"networkPolicy\\\":{\\\"annotations\\\":{},\\\"egress\\\":[],\\\"enabled\\\":false,\\\"ingress\\\":[],\\\"labels\\\":{},\\\"podSelector\\\":{\\\"matchExpressions\\\":[],\\\"matchLabels\\\":{}},\\\"policyTypes\\\":[]},\\\"pauseForSecondsBeforeSwitchActive\\\":30,\\\"podAnnotations\\\":{},\\\"podDisruptionBudget\\\":{},\\\"podLabels\\\":{},\\\"podSecurityContext\\\":{},\\\"prometheus\\\":{\\\"release\\\":\\\"monitoring\\\"},\\\"rawYaml\\\":[],\\\"replicaCount\\\":1,\\\"resources\\\":{\\\"limits\\\":{\\\"cpu\\\":\\\"0.05\\\",\\\"memory\\\":\\\"50Mi\\\"},\\\"requests\\\":{\\\"cpu\\\":\\\"0.01\\\",\\\"memory\\\":\\\"10Mi\\\"}},\\\"restartPolicy\\\":\\\"Always\\\",\\\"rolloutAnnotations\\\":{},\\\"rolloutLabels\\\":{},\\\"secret\\\":{\\\"data\\\":{},\\\"enabled\\\":false},\\\"server\\\":{\\\"deployment\\\":{\\\"image\\\":\\\"\\\",\\\"image_tag\\\":\\\"1-95af053\\\"}},\\\"service\\\":{\\\"annotations\\\":{},\\\"loadBalancerSourceRanges\\\":[],\\\"type\\\":\\\"ClusterIP\\\"},\\\"serviceAccount\\\":{\\\"annotations\\\":{},\\\"create\\\":false,\\\"name\\\":\\\"\\\"},\\\"servicemonitor\\\":{\\\"additionalLabels\\\":{}},\\\"tolerations\\\":[],\\\"topologySpreadConstraints\\\":[],\\\"volumeMounts\\\":[],\\\"volumes\\\":[],\\\"waitForSecondsBeforeScalingDown\\\":30,\\\"winterSoldier\\\":{\\\"action\\\":\\\"sleep\\\",\\\"annotation\\\":{},\\\"apiVersion\\\":\\\"pincher.devtron.ai/v1alpha1\\\",\\\"enabled\\\":false,\\\"fieldSelector\\\":[\\\"AfterTime(AddTime(ParseTime({{metadata.creationTimestamp}}, '2006-01-02T15:04:05Z'), '5m'), Now())\\\"],\\\"labels\\\":{},\\\"targetReplicas\\\":[],\\\"timeRangesWithZone\\\":{\\\"timeRanges\\\":[],\\\"timeZone\\\":\\\"Asia/Kolkata\\\"},\\\"type\\\":\\\"Rollout\\\"}}" +// ctx := context.Background() +// request := chart.TemplateRequest{ChartRefId: 2} +// var config *client.ClusterConfig +// templateChartResponse := &client.TemplateChartResponse{ +// GeneratedManifest: "test generated manifest", +// } +// var zipPath string +// chartService.On("GetRefChart", request).Return(refChart, template, nil, version, myString) +// chartTemplateServiceImpl.On("LoadChartInBytes", "refChart", false, "", "").Return(chartBytes, zipPath, nil) +// helmAppService.On("GetClusterConf", 1).Return(config, nil) +// helmAppClient.On("TemplateChart", ctx, mock.AnythingOfType("*client.InstallReleaseRequest")).Return(templateChartResponse, nil) +// chartTemplateServiceImpl.On("CleanDir", zipPath) +// got, _ := impl.GetManifest(ctx, 2, valuesYaml) +// assert.Equal(t, *got.Manifest, templateChartResponse.GeneratedManifest) +// }) +// +// t.Run("TestErrorInGetClusterConf", func(t *testing.T) { +// impl, chartService, _, _, _, chartTemplateServiceImpl, helmAppService, _ := InitEventSimpleFactoryImpl(t) +// valuesYaml := "{\\\"ContainerPort\\\":[{\\\"envoyPort\\\":6963,\\\"idleTimeout\\\":\\\"6969s\\\",\\\"name\\\":\\\"app\\\",\\\"port\\\":6969,\\\"servicePort\\\":69,\\\"supportStreaming\\\":false,\\\"useHTTP2\\\":false}],\\\"EnvVariables\\\":[],\\\"GracePeriod\\\":30,\\\"LivenessProbe\\\":{\\\"Path\\\":\\\"\\\",\\\"command\\\":[],\\\"failureThreshold\\\":3,\\\"httpHeaders\\\":[],\\\"initialDelaySeconds\\\":20,\\\"periodSeconds\\\":10,\\\"port\\\":6969,\\\"scheme\\\":\\\"\\\",\\\"successThreshold\\\":1,\\\"tcp\\\":false,\\\"timeoutSeconds\\\":5},\\\"MaxSurge\\\":1,\\\"MaxUnavailable\\\":0,\\\"MinReadySeconds\\\":60,\\\"ReadinessProbe\\\":{\\\"Path\\\":\\\"\\\",\\\"command\\\":[],\\\"failureThreshold\\\":3,\\\"httpHeaders\\\":[],\\\"initialDelaySeconds\\\":20,\\\"periodSeconds\\\":10,\\\"port\\\":6969,\\\"scheme\\\":\\\"\\\",\\\"successThreshold\\\":1,\\\"tcp\\\":false,\\\"timeoutSeconds\\\":5},\\\"Spec\\\":{\\\"Affinity\\\":{\\\"Key\\\":null,\\\"Values\\\":\\\"nodes\\\",\\\"key\\\":\\\"\\\"}},\\\"StartupProbe\\\":{\\\"Path\\\":\\\"\\\",\\\"command\\\":[],\\\"failureThreshold\\\":3,\\\"httpHeaders\\\":[],\\\"initialDelaySeconds\\\":20,\\\"periodSeconds\\\":10,\\\"port\\\":6969,\\\"successThreshold\\\":1,\\\"tcp\\\":false,\\\"timeoutSeconds\\\":5},\\\"ambassadorMapping\\\":{\\\"ambassadorId\\\":\\\"\\\",\\\"cors\\\":{},\\\"enabled\\\":false,\\\"hostname\\\":\\\"devtron.example.com\\\",\\\"labels\\\":{},\\\"prefix\\\":\\\"/\\\",\\\"retryPolicy\\\":{},\\\"rewrite\\\":\\\"\\\",\\\"tls\\\":{\\\"context\\\":\\\"\\\",\\\"create\\\":false,\\\"hosts\\\":[],\\\"secretName\\\":\\\"\\\"}},\\\"args\\\":{\\\"enabled\\\":false,\\\"value\\\":[\\\"/bin/sh\\\",\\\"-c\\\",\\\"touch /tmp/healthy; sleep 30; rm -rf /tmp/healthy; sleep 600\\\"]},\\\"autoscaling\\\":{\\\"MaxReplicas\\\":2,\\\"MinReplicas\\\":1,\\\"TargetCPUUtilizationPercentage\\\":90,\\\"TargetMemoryUtilizationPercentage\\\":69,\\\"annotations\\\":{},\\\"behavior\\\":{},\\\"enabled\\\":false,\\\"extraMetrics\\\":[],\\\"labels\\\":{}},\\\"command\\\":{\\\"enabled\\\":false,\\\"value\\\":[],\\\"workingDir\\\":{}},\\\"containerSecurityContext\\\":{},\\\"containerSpec\\\":{\\\"lifecycle\\\":{\\\"enabled\\\":false,\\\"postStart\\\":{\\\"httpGet\\\":{\\\"host\\\":\\\"example.com\\\",\\\"path\\\":\\\"/example\\\",\\\"port\\\":90}},\\\"preStop\\\":{\\\"exec\\\":{\\\"command\\\":[\\\"sleep\\\",\\\"10\\\"]}}}},\\\"containers\\\":[],\\\"dbMigrationConfig\\\":{\\\"enabled\\\":false},\\\"envoyproxy\\\":{\\\"configMapName\\\":\\\"\\\",\\\"image\\\":\\\"docker.io/envoyproxy/envoy:v1.16.0\\\",\\\"lifecycle\\\":{},\\\"resources\\\":{\\\"limits\\\":{\\\"cpu\\\":\\\"50m\\\",\\\"memory\\\":\\\"50Mi\\\"},\\\"requests\\\":{\\\"cpu\\\":\\\"50m\\\",\\\"memory\\\":\\\"50Mi\\\"}}},\\\"hostAliases\\\":[],\\\"image\\\":{\\\"pullPolicy\\\":\\\"IfNotPresent\\\"},\\\"imagePullSecrets\\\":[],\\\"ingress\\\":{\\\"annotations\\\":{},\\\"className\\\":\\\"\\\",\\\"enabled\\\":false,\\\"hosts\\\":[{\\\"host\\\":\\\"chart-example1.local\\\",\\\"pathType\\\":\\\"ImplementationSpecific\\\",\\\"paths\\\":[\\\"/example1\\\"]},{\\\"host\\\":\\\"chart-example2.local\\\",\\\"pathType\\\":\\\"ImplementationSpecific\\\",\\\"paths\\\":[\\\"/example2\\\",\\\"/example2/healthz\\\"]}],\\\"labels\\\":{},\\\"tls\\\":[]},\\\"ingressInternal\\\":{\\\"annotations\\\":{},\\\"className\\\":\\\"\\\",\\\"enabled\\\":false,\\\"hosts\\\":[{\\\"host\\\":\\\"chart-example1.internal\\\",\\\"pathType\\\":\\\"ImplementationSpecific\\\",\\\"paths\\\":[\\\"/example1\\\"]},{\\\"host\\\":\\\"chart-example2.internal\\\",\\\"pathType\\\":\\\"ImplementationSpecific\\\",\\\"paths\\\":[\\\"/example2\\\",\\\"/example2/healthz\\\"]}],\\\"tls\\\":[]},\\\"initContainers\\\":[],\\\"istio\\\":{\\\"authorizationPolicy\\\":{\\\"action\\\":null,\\\"annotations\\\":{},\\\"enabled\\\":false,\\\"labels\\\":{},\\\"provider\\\":{},\\\"rules\\\":[]},\\\"destinationRule\\\":{\\\"annotations\\\":{},\\\"enabled\\\":false,\\\"labels\\\":{},\\\"subsets\\\":[],\\\"trafficPolicy\\\":{}},\\\"enable\\\":false,\\\"gateway\\\":{\\\"annotations\\\":{},\\\"enabled\\\":false,\\\"host\\\":\\\"example.com\\\",\\\"labels\\\":{},\\\"tls\\\":{\\\"enabled\\\":false,\\\"secretName\\\":\\\"secret-name\\\"}},\\\"peerAuthentication\\\":{\\\"annotations\\\":{},\\\"enabled\\\":false,\\\"labels\\\":{},\\\"mtls\\\":{\\\"mode\\\":null},\\\"portLevelMtls\\\":{},\\\"selector\\\":{\\\"enabled\\\":false}},\\\"requestAuthentication\\\":{\\\"annotations\\\":{},\\\"enabled\\\":false,\\\"jwtRules\\\":[],\\\"labels\\\":{},\\\"selector\\\":{\\\"enabled\\\":false}},\\\"virtualService\\\":{\\\"annotations\\\":{},\\\"enabled\\\":false,\\\"gateways\\\":[],\\\"hosts\\\":[],\\\"http\\\":[],\\\"labels\\\":{}}},\\\"kedaAutoscaling\\\":{\\\"advanced\\\":{},\\\"authenticationRef\\\":{},\\\"enabled\\\":false,\\\"envSourceContainerName\\\":\\\"\\\",\\\"maxReplicaCount\\\":2,\\\"minReplicaCount\\\":1,\\\"triggerAuthentication\\\":{\\\"enabled\\\":false,\\\"name\\\":\\\"\\\",\\\"spec\\\":{}},\\\"triggers\\\":[]},\\\"networkPolicy\\\":{\\\"annotations\\\":{},\\\"egress\\\":[],\\\"enabled\\\":false,\\\"ingress\\\":[],\\\"labels\\\":{},\\\"podSelector\\\":{\\\"matchExpressions\\\":[],\\\"matchLabels\\\":{}},\\\"policyTypes\\\":[]},\\\"pauseForSecondsBeforeSwitchActive\\\":30,\\\"podAnnotations\\\":{},\\\"podDisruptionBudget\\\":{},\\\"podLabels\\\":{},\\\"podSecurityContext\\\":{},\\\"prometheus\\\":{\\\"release\\\":\\\"monitoring\\\"},\\\"rawYaml\\\":[],\\\"replicaCount\\\":1,\\\"resources\\\":{\\\"limits\\\":{\\\"cpu\\\":\\\"0.05\\\",\\\"memory\\\":\\\"50Mi\\\"},\\\"requests\\\":{\\\"cpu\\\":\\\"0.01\\\",\\\"memory\\\":\\\"10Mi\\\"}},\\\"restartPolicy\\\":\\\"Always\\\",\\\"rolloutAnnotations\\\":{},\\\"rolloutLabels\\\":{},\\\"secret\\\":{\\\"data\\\":{},\\\"enabled\\\":false},\\\"server\\\":{\\\"deployment\\\":{\\\"image\\\":\\\"\\\",\\\"image_tag\\\":\\\"1-95af053\\\"}},\\\"service\\\":{\\\"annotations\\\":{},\\\"loadBalancerSourceRanges\\\":[],\\\"type\\\":\\\"ClusterIP\\\"},\\\"serviceAccount\\\":{\\\"annotations\\\":{},\\\"create\\\":false,\\\"name\\\":\\\"\\\"},\\\"servicemonitor\\\":{\\\"additionalLabels\\\":{}},\\\"tolerations\\\":[],\\\"topologySpreadConstraints\\\":[],\\\"volumeMounts\\\":[],\\\"volumes\\\":[],\\\"waitForSecondsBeforeScalingDown\\\":30,\\\"winterSoldier\\\":{\\\"action\\\":\\\"sleep\\\",\\\"annotation\\\":{},\\\"apiVersion\\\":\\\"pincher.devtron.ai/v1alpha1\\\",\\\"enabled\\\":false,\\\"fieldSelector\\\":[\\\"AfterTime(AddTime(ParseTime({{metadata.creationTimestamp}}, '2006-01-02T15:04:05Z'), '5m'), Now())\\\"],\\\"labels\\\":{},\\\"targetReplicas\\\":[],\\\"timeRangesWithZone\\\":{\\\"timeRanges\\\":[],\\\"timeZone\\\":\\\"Asia/Kolkata\\\"},\\\"type\\\":\\\"Rollout\\\"}}" +// ctx := context.Background() +// request := chart.TemplateRequest{ChartRefId: 5} +// wantErr1 := errors.New("error in fetching cluster detail") +// var zipPath string +// chartService.On("GetRefChart", request).Return("refChart5", "template5", nil, "version5", "myString5") +// chartTemplateServiceImpl.On("LoadChartInBytes", "refChart5", false, "", "").Return(chartBytes, zipPath, nil) +// helmAppService.On("GetClusterConf", 1).Return(nil, errors.New("error in fetching cluster detail")) +// chartTemplateServiceImpl.On("CleanDir", zipPath) +// _, gotErr1 := impl.GetManifest(ctx, 5, valuesYaml) +// assert.Equal(t, gotErr1, wantErr1) +// }) +// +// t.Run("TestErrorInTemplateChart", func(t *testing.T) { +// impl, chartService, _, _, _, chartTemplateServiceImpl, helmAppService, helmAppClient := InitEventSimpleFactoryImpl(t) +// valuesYaml := "{\\\"ContainerPort\\\":[{\\\"envoyPort\\\":6963,\\\"idleTimeout\\\":\\\"6969s\\\",\\\"name\\\":\\\"app\\\",\\\"port\\\":6969,\\\"servicePort\\\":69,\\\"supportStreaming\\\":false,\\\"useHTTP2\\\":false}],\\\"EnvVariables\\\":[],\\\"GracePeriod\\\":30,\\\"LivenessProbe\\\":{\\\"Path\\\":\\\"\\\",\\\"command\\\":[],\\\"failureThreshold\\\":3,\\\"httpHeaders\\\":[],\\\"initialDelaySeconds\\\":20,\\\"periodSeconds\\\":10,\\\"port\\\":6969,\\\"scheme\\\":\\\"\\\",\\\"successThreshold\\\":1,\\\"tcp\\\":false,\\\"timeoutSeconds\\\":5},\\\"MaxSurge\\\":1,\\\"MaxUnavailable\\\":0,\\\"MinReadySeconds\\\":60,\\\"ReadinessProbe\\\":{\\\"Path\\\":\\\"\\\",\\\"command\\\":[],\\\"failureThreshold\\\":3,\\\"httpHeaders\\\":[],\\\"initialDelaySeconds\\\":20,\\\"periodSeconds\\\":10,\\\"port\\\":6969,\\\"scheme\\\":\\\"\\\",\\\"successThreshold\\\":1,\\\"tcp\\\":false,\\\"timeoutSeconds\\\":5},\\\"Spec\\\":{\\\"Affinity\\\":{\\\"Key\\\":null,\\\"Values\\\":\\\"nodes\\\",\\\"key\\\":\\\"\\\"}},\\\"StartupProbe\\\":{\\\"Path\\\":\\\"\\\",\\\"command\\\":[],\\\"failureThreshold\\\":3,\\\"httpHeaders\\\":[],\\\"initialDelaySeconds\\\":20,\\\"periodSeconds\\\":10,\\\"port\\\":6969,\\\"successThreshold\\\":1,\\\"tcp\\\":false,\\\"timeoutSeconds\\\":5},\\\"ambassadorMapping\\\":{\\\"ambassadorId\\\":\\\"\\\",\\\"cors\\\":{},\\\"enabled\\\":false,\\\"hostname\\\":\\\"devtron.example.com\\\",\\\"labels\\\":{},\\\"prefix\\\":\\\"/\\\",\\\"retryPolicy\\\":{},\\\"rewrite\\\":\\\"\\\",\\\"tls\\\":{\\\"context\\\":\\\"\\\",\\\"create\\\":false,\\\"hosts\\\":[],\\\"secretName\\\":\\\"\\\"}},\\\"args\\\":{\\\"enabled\\\":false,\\\"value\\\":[\\\"/bin/sh\\\",\\\"-c\\\",\\\"touch /tmp/healthy; sleep 30; rm -rf /tmp/healthy; sleep 600\\\"]},\\\"autoscaling\\\":{\\\"MaxReplicas\\\":2,\\\"MinReplicas\\\":1,\\\"TargetCPUUtilizationPercentage\\\":90,\\\"TargetMemoryUtilizationPercentage\\\":69,\\\"annotations\\\":{},\\\"behavior\\\":{},\\\"enabled\\\":false,\\\"extraMetrics\\\":[],\\\"labels\\\":{}},\\\"command\\\":{\\\"enabled\\\":false,\\\"value\\\":[],\\\"workingDir\\\":{}},\\\"containerSecurityContext\\\":{},\\\"containerSpec\\\":{\\\"lifecycle\\\":{\\\"enabled\\\":false,\\\"postStart\\\":{\\\"httpGet\\\":{\\\"host\\\":\\\"example.com\\\",\\\"path\\\":\\\"/example\\\",\\\"port\\\":90}},\\\"preStop\\\":{\\\"exec\\\":{\\\"command\\\":[\\\"sleep\\\",\\\"10\\\"]}}}},\\\"containers\\\":[],\\\"dbMigrationConfig\\\":{\\\"enabled\\\":false},\\\"envoyproxy\\\":{\\\"configMapName\\\":\\\"\\\",\\\"image\\\":\\\"docker.io/envoyproxy/envoy:v1.16.0\\\",\\\"lifecycle\\\":{},\\\"resources\\\":{\\\"limits\\\":{\\\"cpu\\\":\\\"50m\\\",\\\"memory\\\":\\\"50Mi\\\"},\\\"requests\\\":{\\\"cpu\\\":\\\"50m\\\",\\\"memory\\\":\\\"50Mi\\\"}}},\\\"hostAliases\\\":[],\\\"image\\\":{\\\"pullPolicy\\\":\\\"IfNotPresent\\\"},\\\"imagePullSecrets\\\":[],\\\"ingress\\\":{\\\"annotations\\\":{},\\\"className\\\":\\\"\\\",\\\"enabled\\\":false,\\\"hosts\\\":[{\\\"host\\\":\\\"chart-example1.local\\\",\\\"pathType\\\":\\\"ImplementationSpecific\\\",\\\"paths\\\":[\\\"/example1\\\"]},{\\\"host\\\":\\\"chart-example2.local\\\",\\\"pathType\\\":\\\"ImplementationSpecific\\\",\\\"paths\\\":[\\\"/example2\\\",\\\"/example2/healthz\\\"]}],\\\"labels\\\":{},\\\"tls\\\":[]},\\\"ingressInternal\\\":{\\\"annotations\\\":{},\\\"className\\\":\\\"\\\",\\\"enabled\\\":false,\\\"hosts\\\":[{\\\"host\\\":\\\"chart-example1.internal\\\",\\\"pathType\\\":\\\"ImplementationSpecific\\\",\\\"paths\\\":[\\\"/example1\\\"]},{\\\"host\\\":\\\"chart-example2.internal\\\",\\\"pathType\\\":\\\"ImplementationSpecific\\\",\\\"paths\\\":[\\\"/example2\\\",\\\"/example2/healthz\\\"]}],\\\"tls\\\":[]},\\\"initContainers\\\":[],\\\"istio\\\":{\\\"authorizationPolicy\\\":{\\\"action\\\":null,\\\"annotations\\\":{},\\\"enabled\\\":false,\\\"labels\\\":{},\\\"provider\\\":{},\\\"rules\\\":[]},\\\"destinationRule\\\":{\\\"annotations\\\":{},\\\"enabled\\\":false,\\\"labels\\\":{},\\\"subsets\\\":[],\\\"trafficPolicy\\\":{}},\\\"enable\\\":false,\\\"gateway\\\":{\\\"annotations\\\":{},\\\"enabled\\\":false,\\\"host\\\":\\\"example.com\\\",\\\"labels\\\":{},\\\"tls\\\":{\\\"enabled\\\":false,\\\"secretName\\\":\\\"secret-name\\\"}},\\\"peerAuthentication\\\":{\\\"annotations\\\":{},\\\"enabled\\\":false,\\\"labels\\\":{},\\\"mtls\\\":{\\\"mode\\\":null},\\\"portLevelMtls\\\":{},\\\"selector\\\":{\\\"enabled\\\":false}},\\\"requestAuthentication\\\":{\\\"annotations\\\":{},\\\"enabled\\\":false,\\\"jwtRules\\\":[],\\\"labels\\\":{},\\\"selector\\\":{\\\"enabled\\\":false}},\\\"virtualService\\\":{\\\"annotations\\\":{},\\\"enabled\\\":false,\\\"gateways\\\":[],\\\"hosts\\\":[],\\\"http\\\":[],\\\"labels\\\":{}}},\\\"kedaAutoscaling\\\":{\\\"advanced\\\":{},\\\"authenticationRef\\\":{},\\\"enabled\\\":false,\\\"envSourceContainerName\\\":\\\"\\\",\\\"maxReplicaCount\\\":2,\\\"minReplicaCount\\\":1,\\\"triggerAuthentication\\\":{\\\"enabled\\\":false,\\\"name\\\":\\\"\\\",\\\"spec\\\":{}},\\\"triggers\\\":[]},\\\"networkPolicy\\\":{\\\"annotations\\\":{},\\\"egress\\\":[],\\\"enabled\\\":false,\\\"ingress\\\":[],\\\"labels\\\":{},\\\"podSelector\\\":{\\\"matchExpressions\\\":[],\\\"matchLabels\\\":{}},\\\"policyTypes\\\":[]},\\\"pauseForSecondsBeforeSwitchActive\\\":30,\\\"podAnnotations\\\":{},\\\"podDisruptionBudget\\\":{},\\\"podLabels\\\":{},\\\"podSecurityContext\\\":{},\\\"prometheus\\\":{\\\"release\\\":\\\"monitoring\\\"},\\\"rawYaml\\\":[],\\\"replicaCount\\\":1,\\\"resources\\\":{\\\"limits\\\":{\\\"cpu\\\":\\\"0.05\\\",\\\"memory\\\":\\\"50Mi\\\"},\\\"requests\\\":{\\\"cpu\\\":\\\"0.01\\\",\\\"memory\\\":\\\"10Mi\\\"}},\\\"restartPolicy\\\":\\\"Always\\\",\\\"rolloutAnnotations\\\":{},\\\"rolloutLabels\\\":{},\\\"secret\\\":{\\\"data\\\":{},\\\"enabled\\\":false},\\\"server\\\":{\\\"deployment\\\":{\\\"image\\\":\\\"\\\",\\\"image_tag\\\":\\\"1-95af053\\\"}},\\\"service\\\":{\\\"annotations\\\":{},\\\"loadBalancerSourceRanges\\\":[],\\\"type\\\":\\\"ClusterIP\\\"},\\\"serviceAccount\\\":{\\\"annotations\\\":{},\\\"create\\\":false,\\\"name\\\":\\\"\\\"},\\\"servicemonitor\\\":{\\\"additionalLabels\\\":{}},\\\"tolerations\\\":[],\\\"topologySpreadConstraints\\\":[],\\\"volumeMounts\\\":[],\\\"volumes\\\":[],\\\"waitForSecondsBeforeScalingDown\\\":30,\\\"winterSoldier\\\":{\\\"action\\\":\\\"sleep\\\",\\\"annotation\\\":{},\\\"apiVersion\\\":\\\"pincher.devtron.ai/v1alpha1\\\",\\\"enabled\\\":false,\\\"fieldSelector\\\":[\\\"AfterTime(AddTime(ParseTime({{metadata.creationTimestamp}}, '2006-01-02T15:04:05Z'), '5m'), Now())\\\"],\\\"labels\\\":{},\\\"targetReplicas\\\":[],\\\"timeRangesWithZone\\\":{\\\"timeRanges\\\":[],\\\"timeZone\\\":\\\"Asia/Kolkata\\\"},\\\"type\\\":\\\"Rollout\\\"}}" +// ctx := context.Background() +// request := chart.TemplateRequest{ChartRefId: 5} +// var config *client.ClusterConfig +// wantErr1 := errors.New("error in templating chart") +// var zipPath string +// chartService.On("GetRefChart", request).Return("refChart5", "template5", nil, "version5", "myString5") +// chartTemplateServiceImpl.On("LoadChartInBytes", "refChart5", false, "", "").Return(chartBytes, zipPath, nil) +// helmAppService.On("GetClusterConf", 1).Return(config, nil) +// helmAppClient.On("TemplateChart", ctx, mock.AnythingOfType("*client.InstallReleaseRequest")).Return(nil, errors.New("error in templating chart")) +// chartTemplateServiceImpl.On("CleanDir", zipPath) +// _, gotErr1 := impl.GetManifest(ctx, 5, valuesYaml) +// assert.Equal(t, gotErr1, wantErr1) +// }) +// +// t.Run("TestErrorInLoadChartInBytes", func(t *testing.T) { +// impl, chartService, _, _, _, chartTemplateServiceImpl, _, _ := InitEventSimpleFactoryImpl(t) +// valuesYaml := "{\\\"ContainerPort\\\":[{\\\"envoyPort\\\":6964,\\\"idleTimeout\\\":\\\"6969s\\\",\\\"name\\\":\\\"app\\\",\\\"port\\\":6969,\\\"servicePort\\\":69,\\\"supportStreaming\\\":false,\\\"useHTTP2\\\":false}],\\\"EnvVariables\\\":[],\\\"GracePeriod\\\":30,\\\"LivenessProbe\\\":{\\\"Path\\\":\\\"\\\",\\\"command\\\":[],\\\"failureThreshold\\\":3,\\\"httpHeaders\\\":[],\\\"initialDelaySeconds\\\":20,\\\"periodSeconds\\\":10,\\\"port\\\":6969,\\\"scheme\\\":\\\"\\\",\\\"successThreshold\\\":1,\\\"tcp\\\":false,\\\"timeoutSeconds\\\":5},\\\"MaxSurge\\\":1,\\\"MaxUnavailable\\\":0,\\\"MinReadySeconds\\\":60,\\\"ReadinessProbe\\\":{\\\"Path\\\":\\\"\\\",\\\"command\\\":[],\\\"failureThreshold\\\":3,\\\"httpHeaders\\\":[],\\\"initialDelaySeconds\\\":20,\\\"periodSeconds\\\":10,\\\"port\\\":6969,\\\"scheme\\\":\\\"\\\",\\\"successThreshold\\\":1,\\\"tcp\\\":false,\\\"timeoutSeconds\\\":5},\\\"Spec\\\":{\\\"Affinity\\\":{\\\"Key\\\":null,\\\"Values\\\":\\\"nodes\\\",\\\"key\\\":\\\"\\\"}},\\\"StartupProbe\\\":{\\\"Path\\\":\\\"\\\",\\\"command\\\":[],\\\"failureThreshold\\\":3,\\\"httpHeaders\\\":[],\\\"initialDelaySeconds\\\":20,\\\"periodSeconds\\\":10,\\\"port\\\":6969,\\\"successThreshold\\\":1,\\\"tcp\\\":false,\\\"timeoutSeconds\\\":5},\\\"ambassadorMapping\\\":{\\\"ambassadorId\\\":\\\"\\\",\\\"cors\\\":{},\\\"enabled\\\":false,\\\"hostname\\\":\\\"devtron.example.com\\\",\\\"labels\\\":{},\\\"prefix\\\":\\\"/\\\",\\\"retryPolicy\\\":{},\\\"rewrite\\\":\\\"\\\",\\\"tls\\\":{\\\"context\\\":\\\"\\\",\\\"create\\\":false,\\\"hosts\\\":[],\\\"secretName\\\":\\\"\\\"}},\\\"args\\\":{\\\"enabled\\\":false,\\\"value\\\":[\\\"/bin/sh\\\",\\\"-c\\\",\\\"touch /tmp/healthy; sleep 30; rm -rf /tmp/healthy; sleep 600\\\"]},\\\"autoscaling\\\":{\\\"MaxReplicas\\\":2,\\\"MinReplicas\\\":1,\\\"TargetCPUUtilizationPercentage\\\":90,\\\"TargetMemoryUtilizationPercentage\\\":69,\\\"annotations\\\":{},\\\"behavior\\\":{},\\\"enabled\\\":false,\\\"extraMetrics\\\":[],\\\"labels\\\":{}},\\\"command\\\":{\\\"enabled\\\":false,\\\"value\\\":[],\\\"workingDir\\\":{}},\\\"containerSecurityContext\\\":{},\\\"containerSpec\\\":{\\\"lifecycle\\\":{\\\"enabled\\\":false,\\\"postStart\\\":{\\\"httpGet\\\":{\\\"host\\\":\\\"example.com\\\",\\\"path\\\":\\\"/example\\\",\\\"port\\\":90}},\\\"preStop\\\":{\\\"exec\\\":{\\\"command\\\":[\\\"sleep\\\",\\\"10\\\"]}}}},\\\"containers\\\":[],\\\"dbMigrationConfig\\\":{\\\"enabled\\\":false},\\\"envoyproxy\\\":{\\\"configMapName\\\":\\\"\\\",\\\"image\\\":\\\"docker.io/envoyproxy/envoy:v1.16.0\\\",\\\"lifecycle\\\":{},\\\"resources\\\":{\\\"limits\\\":{\\\"cpu\\\":\\\"50m\\\",\\\"memory\\\":\\\"50Mi\\\"},\\\"requests\\\":{\\\"cpu\\\":\\\"50m\\\",\\\"memory\\\":\\\"50Mi\\\"}}},\\\"hostAliases\\\":[],\\\"image\\\":{\\\"pullPolicy\\\":\\\"IfNotPresent\\\"},\\\"imagePullSecrets\\\":[],\\\"ingress\\\":{\\\"annotations\\\":{},\\\"className\\\":\\\"\\\",\\\"enabled\\\":false,\\\"hosts\\\":[{\\\"host\\\":\\\"chart-example1.local\\\",\\\"pathType\\\":\\\"ImplementationSpecific\\\",\\\"paths\\\":[\\\"/example1\\\"]},{\\\"host\\\":\\\"chart-example2.local\\\",\\\"pathType\\\":\\\"ImplementationSpecific\\\",\\\"paths\\\":[\\\"/example2\\\",\\\"/example2/healthz\\\"]}],\\\"labels\\\":{},\\\"tls\\\":[]},\\\"ingressInternal\\\":{\\\"annotations\\\":{},\\\"className\\\":\\\"\\\",\\\"enabled\\\":false,\\\"hosts\\\":[{\\\"host\\\":\\\"chart-example1.internal\\\",\\\"pathType\\\":\\\"ImplementationSpecific\\\",\\\"paths\\\":[\\\"/example1\\\"]},{\\\"host\\\":\\\"chart-example2.internal\\\",\\\"pathType\\\":\\\"ImplementationSpecific\\\",\\\"paths\\\":[\\\"/example2\\\",\\\"/example2/healthz\\\"]}],\\\"tls\\\":[]},\\\"initContainers\\\":[],\\\"istio\\\":{\\\"authorizationPolicy\\\":{\\\"action\\\":null,\\\"annotations\\\":{},\\\"enabled\\\":false,\\\"labels\\\":{},\\\"provider\\\":{},\\\"rules\\\":[]},\\\"destinationRule\\\":{\\\"annotations\\\":{},\\\"enabled\\\":false,\\\"labels\\\":{},\\\"subsets\\\":[],\\\"trafficPolicy\\\":{}},\\\"enable\\\":false,\\\"gateway\\\":{\\\"annotations\\\":{},\\\"enabled\\\":false,\\\"host\\\":\\\"example.com\\\",\\\"labels\\\":{},\\\"tls\\\":{\\\"enabled\\\":false,\\\"secretName\\\":\\\"secret-name\\\"}},\\\"peerAuthentication\\\":{\\\"annotations\\\":{},\\\"enabled\\\":false,\\\"labels\\\":{},\\\"mtls\\\":{\\\"mode\\\":null},\\\"portLevelMtls\\\":{},\\\"selector\\\":{\\\"enabled\\\":false}},\\\"requestAuthentication\\\":{\\\"annotations\\\":{},\\\"enabled\\\":false,\\\"jwtRules\\\":[],\\\"labels\\\":{},\\\"selector\\\":{\\\"enabled\\\":false}},\\\"virtualService\\\":{\\\"annotations\\\":{},\\\"enabled\\\":false,\\\"gateways\\\":[],\\\"hosts\\\":[],\\\"http\\\":[],\\\"labels\\\":{}}},\\\"kedaAutoscaling\\\":{\\\"advanced\\\":{},\\\"authenticationRef\\\":{},\\\"enabled\\\":false,\\\"envSourceContainerName\\\":\\\"\\\",\\\"maxReplicaCount\\\":2,\\\"minReplicaCount\\\":1,\\\"triggerAuthentication\\\":{\\\"enabled\\\":false,\\\"name\\\":\\\"\\\",\\\"spec\\\":{}},\\\"triggers\\\":[]},\\\"networkPolicy\\\":{\\\"annotations\\\":{},\\\"egress\\\":[],\\\"enabled\\\":false,\\\"ingress\\\":[],\\\"labels\\\":{},\\\"podSelector\\\":{\\\"matchExpressions\\\":[],\\\"matchLabels\\\":{}},\\\"policyTypes\\\":[]},\\\"pauseForSecondsBeforeSwitchActive\\\":30,\\\"podAnnotations\\\":{},\\\"podDisruptionBudget\\\":{},\\\"podLabels\\\":{},\\\"podSecurityContext\\\":{},\\\"prometheus\\\":{\\\"release\\\":\\\"monitoring\\\"},\\\"rawYaml\\\":[],\\\"replicaCount\\\":1,\\\"resources\\\":{\\\"limits\\\":{\\\"cpu\\\":\\\"0.05\\\",\\\"memory\\\":\\\"50Mi\\\"},\\\"requests\\\":{\\\"cpu\\\":\\\"0.01\\\",\\\"memory\\\":\\\"10Mi\\\"}},\\\"restartPolicy\\\":\\\"Always\\\",\\\"rolloutAnnotations\\\":{},\\\"rolloutLabels\\\":{},\\\"secret\\\":{\\\"data\\\":{},\\\"enabled\\\":false},\\\"server\\\":{\\\"deployment\\\":{\\\"image\\\":\\\"\\\",\\\"image_tag\\\":\\\"1-95af053\\\"}},\\\"service\\\":{\\\"annotations\\\":{},\\\"loadBalancerSourceRanges\\\":[],\\\"type\\\":\\\"ClusterIP\\\"},\\\"serviceAccount\\\":{\\\"annotations\\\":{},\\\"create\\\":false,\\\"name\\\":\\\"\\\"},\\\"servicemonitor\\\":{\\\"additionalLabels\\\":{}},\\\"tolerations\\\":[],\\\"topologySpreadConstraints\\\":[],\\\"volumeMounts\\\":[],\\\"volumes\\\":[],\\\"waitForSecondsBeforeScalingDown\\\":30,\\\"winterSoldier\\\":{\\\"action\\\":\\\"sleep\\\",\\\"annotation\\\":{},\\\"apiVersion\\\":\\\"pincher.devtron.ai/v1alpha1\\\",\\\"enabled\\\":false,\\\"fieldSelector\\\":[\\\"AfterTime(AddTime(ParseTime({{metadata.creationTimestamp}}, '2006-01-02T15:04:05Z'), '5m'), Now())\\\"],\\\"labels\\\":{},\\\"targetReplicas\\\":[],\\\"timeRangesWithZone\\\":{\\\"timeRanges\\\":[],\\\"timeZone\\\":\\\"Asia/Kolkata\\\"},\\\"type\\\":\\\"Rollout\\\"}}" +// ctx := context.Background() +// request := chart.TemplateRequest{ChartRefId: 3} +// wantErr := errors.New("error in getting chart") +// var zipPath string +// chartService.On("GetRefChart", request).Return("refChart1", "template1", nil, "version1", "myString1") +// chartTemplateServiceImpl.On("LoadChartInBytes", "refChart1", false, "", "").Return(chartBytes, zipPath, errors.New("error in getting chart")) +// _, err := impl.GetManifest(ctx, 3, valuesYaml) +// assert.Equal(t, err, wantErr) +// }) +//} +// +//func InitEventSimpleFactoryImpl(t *testing.T) (*DeploymentTemplateServiceImpl, *mocks.ChartService, *mocks2.AppListingService, *mocks3.DeploymentTemplateRepository, *mocks5.ChartRepository, *mocks6.ChartTemplateService, *mocks4.HelmAppService, *mocks4.HelmAppClient) { +// logger, _ := util.NewSugardLogger() +// chartService := mocks.NewChartService(t) +// appListingService := mocks2.NewAppListingService(t) +// appListingRepository := mocks3.NewAppListingRepository(t) +// deploymentTemplateRepository := mocks3.NewDeploymentTemplateRepository(t) +// helmAppService := mocks4.NewHelmAppService(t) +// chartRepository := mocks5.NewChartRepository(t) +// chartTemplateServiceImpl := mocks6.NewChartTemplateService(t) +// helmAppClient := mocks4.NewHelmAppClient(t) +// var k8sUtil *k8s.K8sUtil +// if K8sUtilObj != nil { +// k8sUtil = K8sUtilObj +// } else { +// config := &client2.RuntimeConfig{LocalDevMode: true} +// k8sUtil = k8s.NewK8sUtil(logger, config) +// K8sUtilObj = k8sUtil +// } +// impl := NewDeploymentTemplateServiceImpl(logger, chartService, appListingService, appListingRepository, deploymentTemplateRepository, helmAppService, chartRepository, chartTemplateServiceImpl, helmAppClient, k8sUtil) +// return impl, chartService, appListingService, deploymentTemplateRepository, chartRepository, chartTemplateServiceImpl, helmAppService, helmAppClient +//} diff --git a/pkg/pipeline/AppArtifactManager.go b/pkg/pipeline/AppArtifactManager.go index 386010a1c3..b38017644b 100644 --- a/pkg/pipeline/AppArtifactManager.go +++ b/pkg/pipeline/AppArtifactManager.go @@ -26,6 +26,7 @@ import ( repository2 "github.com/devtron-labs/devtron/pkg/pipeline/repository" "github.com/devtron-labs/devtron/pkg/user" "github.com/go-pg/pg" + lo "github.com/samber/lo" "go.uber.org/zap" "sort" ) @@ -393,12 +394,10 @@ func (impl *AppArtifactManagerImpl) extractParentMetaDataByPipeline(pipeline *pi // retrieve parent details parentId, parentType, err = impl.cdPipelineConfigService.RetrieveParentDetails(pipeline.Id) if err != nil { - impl.logger.Errorw("failed to retrieve parent details", - "cdPipelineId", pipeline.Id, - "err", err) return parentId, parentType, parentCdId, err } + //TODO Gireesh: why this(stage != bean.CD_WORKFLOW_TYPE_POST) check is added, explain that in comment ?? if parentType == bean.CD_WORKFLOW_TYPE_POST || (parentType == bean.CD_WORKFLOW_TYPE_DEPLOY && stage != bean.CD_WORKFLOW_TYPE_POST) { // parentCdId is being set to store the artifact currently deployed on parent cd (if applicable). // Parent component is CD only if parent type is POST/DEPLOY @@ -460,7 +459,9 @@ func (impl *AppArtifactManagerImpl) RetrieveArtifactsByCDPipelineV2(pipeline *pi }) } - artifactIds := make([]int, 0, len(ciArtifacts)) + //TODO Gireesh: need to check this behaviour, can we use this instead of below loop ?? + artifactIds := lo.FlatMap(ciArtifacts, func(artifact bean2.CiArtifactBean, _ int) []int { return []int{artifact.Id} }) + //artifactIds := make([]int, 0, len(ciArtifacts)) for _, artifact := range ciArtifacts { artifactIds = append(artifactIds, artifact.Id) } @@ -477,6 +478,7 @@ func (impl *AppArtifactManagerImpl) RetrieveArtifactsByCDPipelineV2(pipeline *pi return ciArtifactsResponse, err } + //TODO Gireesh: Create a meaningful func for i, artifact := range ciArtifacts { if imageTaggingResp := imageTagsDataMap[ciArtifacts[i].Id]; imageTaggingResp != nil { ciArtifacts[i].ImageReleaseTags = imageTaggingResp @@ -507,7 +509,6 @@ func (impl *AppArtifactManagerImpl) RetrieveArtifactsByCDPipelineV2(pipeline *pi } } ciArtifacts[i].CiConfigureSourceType = ciWorkflow.GitTriggers[ciWorkflow.CiPipelineId].CiConfigureSourceType - ciArtifacts[i].CiConfigureSourceValue = ciWorkflow.GitTriggers[ciWorkflow.CiPipelineId].CiConfigureSourceValue } ciArtifactsResponse.CdPipelineId = pipeline.Id @@ -542,6 +543,7 @@ func (impl *AppArtifactManagerImpl) BuildArtifactsList(listingFilterOpts *bean.A listingFilterOpts.ExcludeArtifactIds = []int{currentRunningArtifact.Id} currentRunningArtifactId = currentRunningArtifact.Id currentRunningWorkflowStatus = latestWf[0].Status + // TODO Gireesh: move below logic to proper func belong to CiArtifactBean //current deployed artifact should always be computed, as we have to show it every time mInfo, err := parseMaterialInfo([]byte(currentRunningArtifact.MaterialInfo), currentRunningArtifact.DataSource) if err != nil { @@ -588,6 +590,7 @@ func (impl *AppArtifactManagerImpl) BuildArtifactsForCdStageV2(listingFilterOpts impl.logger.Errorw("error in fetching cd workflow runners using filter", "filterOptions", listingFilterOpts, "err", err) return nil, err } + //TODO Gireesh: initialized array with size but are using append, not optimized solution ciArtifacts := make([]*bean2.CiArtifactBean, 0, len(cdWfrList)) //get artifact running on parent cd @@ -603,6 +606,7 @@ func (impl *AppArtifactManagerImpl) BuildArtifactsForCdStageV2(listingFilterOpts } for _, wfr := range cdWfrList { + //TODO Gireesh: Refactoring needed mInfo, err := parseMaterialInfo([]byte(wfr.CdWorkflow.CiArtifact.MaterialInfo), wfr.CdWorkflow.CiArtifact.DataSource) if err != nil { mInfo = []byte("[]") @@ -637,6 +641,7 @@ func (impl *AppArtifactManagerImpl) BuildArtifactsForCIParentV2(listingFilterOpt return nil, err } + //TODO Gireesh: if initialized then no need of using append, put value directly to index ciArtifacts := make([]*bean2.CiArtifactBean, 0, len(artifacts)) for _, artifact := range artifacts { mInfo, err := parseMaterialInfo([]byte(artifact.MaterialInfo), artifact.DataSource) diff --git a/vendor/github.com/samber/lo/.gitignore b/vendor/github.com/samber/lo/.gitignore new file mode 100644 index 0000000000..e5ecc5c40a --- /dev/null +++ b/vendor/github.com/samber/lo/.gitignore @@ -0,0 +1,38 @@ + +# Created by https://www.toptal.com/developers/gitignore/api/go +# Edit at https://www.toptal.com/developers/gitignore?templates=go + +### Go ### +# If you prefer the allow list template instead of the deny list, see community template: +# https://github.com/github/gitignore/blob/main/community/Golang/Go.AllowList.gitignore +# +# Binaries for programs and plugins +*.exe +*.exe~ +*.dll +*.so +*.dylib + +# Test binary, built with `go test -c` +*.test + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out + +# Dependency directories (remove the comment below to include it) +# vendor/ + +# Go workspace file +go.work + +### Go Patch ### +/vendor/ +/Godeps/ + +# End of https://www.toptal.com/developers/gitignore/api/go + +cover.out +cover.html +.vscode + +.idea/ diff --git a/vendor/github.com/samber/lo/.travis.yml b/vendor/github.com/samber/lo/.travis.yml new file mode 100644 index 0000000000..f0de7f51c4 --- /dev/null +++ b/vendor/github.com/samber/lo/.travis.yml @@ -0,0 +1,7 @@ +language: go +before_install: + - go mod download + - make tools +go: + - "1.18" +script: make test diff --git a/vendor/github.com/samber/lo/CHANGELOG.md b/vendor/github.com/samber/lo/CHANGELOG.md new file mode 100644 index 0000000000..25815f76aa --- /dev/null +++ b/vendor/github.com/samber/lo/CHANGELOG.md @@ -0,0 +1,429 @@ +# Changelog + +@samber: I sometimes forget to update this file. Ping me on [Twitter](https://twitter.com/samuelberthe) or open an issue in case of error. We need to keep a clear changelog for easier lib upgrade. + +## 1.38.1 (2023-03-20) + +Improvement: +- Async and AsyncX: now returns `<-chan T` instead of `chan T` + +## 1.38.0 (2023-03-20) + +Adding: +- lo.ValueOr +- lo.DebounceBy +- lo.EmptyableToPtr + +Improvement: +- Substring: add support for non-english chars + +Fix: +- Async: Fix goroutine leak + +## 1.37.0 (2022-12-15) + +Adding: +- lo.PartialX +- lo.Transaction + +Improvement: +- lo.Associate / lo.SliceToMap: faster memory allocation + +Chore: +- Remove *_test.go files from releases, in order to cleanup dev dependencies + +## 1.36.0 (2022-11-28) + +Adding: +- lo.AttemptWhile +- lo.AttemptWhileWithDelay + +## 1.35.0 (2022-11-15) + +Adding: +- lo.RandomString +- lo.BufferWithTimeout (alias to lo.BatchWithTimeout) +- lo.Buffer (alias to lo.Batch) + +Change: +- lo.Slice: avoid panic caused by out-of-bounds + +Deprecation: +- lo.BatchWithTimeout +- lo.Batch + +## 1.34.0 (2022-11-12) + +Improving: +- lo.Union: faster and can receive more than 2 lists + +Adding: +- lo.FanIn (alias to lo.ChannelMerge) +- lo.FanOut + +Deprecation: +- lo.ChannelMerge + +## 1.33.0 (2022-10-14) + +Adding: +- lo.ChannelMerge + +Improving: +- helpers with callbacks/predicates/iteratee now have named arguments, for easier autocompletion + +## 1.32.0 (2022-10-10) + +Adding: + +- lo.ChannelToSlice +- lo.CountValues +- lo.CountValuesBy +- lo.MapEntries +- lo.Sum +- lo.Interleave +- TupleX.Unpack() + +## 1.31.0 (2022-10-06) + +Adding: + +- lo.SliceToChannel +- lo.Generator +- lo.Batch +- lo.BatchWithTimeout + +## 1.30.1 (2022-10-06) + +Fix: + +- lo.Try1: remove generic type +- lo.Validate: format error properly + +## 1.30.0 (2022-10-04) + +Adding: + +- lo.TernaryF +- lo.Validate + +## 1.29.0 (2022-10-02) + +Adding: + +- lo.ErrorAs +- lo.TryOr +- lo.TryOrX + +## 1.28.0 (2022-09-05) + +Adding: + +- lo.ChannelDispatcher with 6 dispatching strategies: + - lo.DispatchingStrategyRoundRobin + - lo.DispatchingStrategyRandom + - lo.DispatchingStrategyWeightedRandom + - lo.DispatchingStrategyFirst + - lo.DispatchingStrategyLeast + - lo.DispatchingStrategyMost + +## 1.27.1 (2022-08-15) + +Bugfix: + +- Removed comparable constraint for lo.FindKeyBy + +## 1.27.0 (2022-07-29) + +Breaking: + +- Change of MapToSlice prototype: `MapToSlice[K comparable, V any, R any](in map[K]V, iteratee func(V, K) R) []R` -> `MapToSlice[K comparable, V any, R any](in map[K]V, iteratee func(K, V) R) []R` + +Added: + +- lo.ChunkString +- lo.SliceToMap (alias to lo.Associate) + +## 1.26.0 (2022-07-24) + +Adding: + +- lo.Associate +- lo.ReduceRight +- lo.FromPtrOr +- lo.MapToSlice +- lo.IsSorted +- lo.IsSortedByKey + +## 1.25.0 (2022-07-04) + +Adding: + +- lo.FindUniques +- lo.FindUniquesBy +- lo.FindDuplicates +- lo.FindDuplicatesBy +- lo.IsNotEmpty + +## 1.24.0 (2022-07-04) + +Adding: + +- lo.Without +- lo.WithoutEmpty + +## 1.23.0 (2022-07-04) + +Adding: + +- lo.FindKey +- lo.FindKeyBy + +## 1.22.0 (2022-07-04) + +Adding: + +- lo.Slice +- lo.FromPtr +- lo.IsEmpty +- lo.Compact +- lo.ToPairs: alias to lo.Entries +- lo.FromPairs: alias to lo.FromEntries +- lo.Partial + +Change: + +- lo.Must + lo.MustX: add context to panic message + +Fix: + +- lo.Nth: out of bound exception (#137) + +## 1.21.0 (2022-05-10) + +Adding: + +- lo.ToAnySlice +- lo.FromAnySlice + +## 1.20.0 (2022-05-02) + +Adding: + +- lo.Synchronize +- lo.SumBy + +Change: +- Removed generic type definition for lo.Try0: `lo.Try0[T]()` -> `lo.Try0()` + +## 1.19.0 (2022-04-30) + +Adding: + +- lo.RepeatBy +- lo.Subset +- lo.Replace +- lo.ReplaceAll +- lo.Substring +- lo.RuneLength + +## 1.18.0 (2022-04-28) + +Adding: + +- lo.SomeBy +- lo.EveryBy +- lo.None +- lo.NoneBy + +## 1.17.0 (2022-04-27) + +Adding: + +- lo.Unpack2 -> lo.Unpack3 +- lo.Async0 -> lo.Async6 + +## 1.16.0 (2022-04-26) + +Adding: + +- lo.AttemptWithDelay + +## 1.15.0 (2022-04-22) + +Improvement: + +- lo.Must: error or boolean value + +## 1.14.0 (2022-04-21) + +Adding: + +- lo.Coalesce + +## 1.13.0 (2022-04-14) + +Adding: + +- PickBy +- PickByKeys +- PickByValues +- OmitBy +- OmitByKeys +- OmitByValues +- Clamp +- MapKeys +- Invert +- IfF + ElseIfF + ElseF +- T0() + T1() + T2() + T3() + ... + +## 1.12.0 (2022-04-12) + +Adding: + +- Must +- Must{0-6} +- FindOrElse +- Async +- MinBy +- MaxBy +- Count +- CountBy +- FindIndexOf +- FindLastIndexOf +- FilterMap + +## 1.11.0 (2022-03-11) + +Adding: + +- Try +- Try{0-6} +- TryWitchValue +- TryCatch +- TryCatchWitchValue +- Debounce +- Reject + +## 1.10.0 (2022-03-11) + +Adding: + +- Range +- RangeFrom +- RangeWithSteps + +## 1.9.0 (2022-03-10) + +Added + +- Drop +- DropRight +- DropWhile +- DropRightWhile + +## 1.8.0 (2022-03-10) + +Adding Union. + +## 1.7.0 (2022-03-09) + +Adding ContainBy + +Adding MapValues + +Adding FlatMap + +## 1.6.0 (2022-03-07) + +Fixed PartitionBy. + +Adding Sample + +Adding Samples + +## 1.5.0 (2022-03-07) + +Adding Times + +Adding Attempt + +Adding Repeat + +## 1.4.0 (2022-03-07) + +- adding tuple types (2->9) +- adding Zip + Unzip +- adding lo.PartitionBy + lop.PartitionBy +- adding lop.GroupBy +- fixing Nth + +## 1.3.0 (2022-03-03) + +Last and Nth return errors + +## 1.2.0 (2022-03-03) + +Adding `lop.Map` and `lop.ForEach`. + +## 1.1.0 (2022-03-03) + +Adding `i int` param to `lo.Map()`, `lo.Filter()`, `lo.ForEach()` and `lo.Reduce()` predicates. + +## 1.0.0 (2022-03-02) + +*Initial release* + +Supported helpers for slices: + +- Filter +- Map +- Reduce +- ForEach +- Uniq +- UniqBy +- GroupBy +- Chunk +- Flatten +- Shuffle +- Reverse +- Fill +- ToMap + +Supported helpers for maps: + +- Keys +- Values +- Entries +- FromEntries +- Assign (maps merge) + +Supported intersection helpers: + +- Contains +- Every +- Some +- Intersect +- Difference + +Supported search helpers: + +- IndexOf +- LastIndexOf +- Find +- Min +- Max +- Last +- Nth + +Other functional programming helpers: + +- Ternary (1 line if/else statement) +- If / ElseIf / Else +- Switch / Case / Default +- ToPtr +- ToSlicePtr + +Constraints: + +- Clonable diff --git a/vendor/github.com/samber/lo/Dockerfile b/vendor/github.com/samber/lo/Dockerfile new file mode 100644 index 0000000000..bd01bbbb45 --- /dev/null +++ b/vendor/github.com/samber/lo/Dockerfile @@ -0,0 +1,8 @@ + +FROM golang:1.18 + +WORKDIR /go/src/github.com/samber/lo + +COPY Makefile go.* ./ + +RUN make tools diff --git a/vendor/github.com/samber/lo/LICENSE b/vendor/github.com/samber/lo/LICENSE new file mode 100644 index 0000000000..c3dc72d9ab --- /dev/null +++ b/vendor/github.com/samber/lo/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2022 Samuel Berthe + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/samber/lo/Makefile b/vendor/github.com/samber/lo/Makefile new file mode 100644 index 0000000000..57bb49159f --- /dev/null +++ b/vendor/github.com/samber/lo/Makefile @@ -0,0 +1,44 @@ + +BIN=go + +build: + ${BIN} build -v ./... + +test: + go test -race -v ./... +watch-test: + reflex -t 50ms -s -- sh -c 'gotest -race -v ./...' + +bench: + go test -benchmem -count 3 -bench ./... +watch-bench: + reflex -t 50ms -s -- sh -c 'go test -benchmem -count 3 -bench ./...' + +coverage: + ${BIN} test -v -coverprofile=cover.out -covermode=atomic . + ${BIN} tool cover -html=cover.out -o cover.html + +# tools +tools: + ${BIN} install github.com/cespare/reflex@latest + ${BIN} install github.com/rakyll/gotest@latest + ${BIN} install github.com/psampaz/go-mod-outdated@latest + ${BIN} install github.com/jondot/goweight@latest + ${BIN} install github.com/golangci/golangci-lint/cmd/golangci-lint@latest + ${BIN} get -t -u golang.org/x/tools/cmd/cover + ${BIN} install github.com/sonatype-nexus-community/nancy@latest + go mod tidy + +lint: + golangci-lint run --timeout 60s --max-same-issues 50 ./... +lint-fix: + golangci-lint run --timeout 60s --max-same-issues 50 --fix ./... + +audit: tools + ${BIN} list -json -m all | nancy sleuth + +outdated: tools + ${BIN} list -u -m -json all | go-mod-outdated -update -direct + +weight: tools + goweight diff --git a/vendor/github.com/samber/lo/README.md b/vendor/github.com/samber/lo/README.md new file mode 100644 index 0000000000..77ab2d007d --- /dev/null +++ b/vendor/github.com/samber/lo/README.md @@ -0,0 +1,2933 @@ +# lo - Iterate over slices, maps, channels... + +[![tag](https://img.shields.io/github/tag/samber/lo.svg)](https://github.com/samber/lo/releases) +![Go Version](https://img.shields.io/badge/Go-%3E%3D%201.18-%23007d9c) +[![GoDoc](https://godoc.org/github.com/samber/lo?status.svg)](https://pkg.go.dev/github.com/samber/lo) +![Build Status](https://github.com/samber/lo/actions/workflows/test.yml/badge.svg) +[![Go report](https://goreportcard.com/badge/github.com/samber/lo)](https://goreportcard.com/report/github.com/samber/lo) +[![Coverage](https://img.shields.io/codecov/c/github/samber/lo)](https://codecov.io/gh/samber/lo) +[![Contributors](https://img.shields.io/github/contributors/samber/lo)](https://github.com/samber/lo/graphs/contributors) +[![License](https://img.shields.io/github/license/samber/lo)](./LICENSE) + +✨ **`samber/lo` is a Lodash-style Go library based on Go 1.18+ Generics.** + +This project started as an experiment with the new generics implementation. It may look like [Lodash](https://github.com/lodash/lodash) in some aspects. I used to code with the fantastic ["go-funk"](https://github.com/thoas/go-funk) package, but "go-funk" uses reflection and therefore is not typesafe. + +As expected, benchmarks demonstrate that generics are much faster than implementations based on the "reflect" package. Benchmarks also show similar performance gains compared to pure `for` loops. [See below](#-benchmark). + +In the future, 5 to 10 helpers will overlap with those coming into the Go standard library (under package names `slices` and `maps`). I feel this library is legitimate and offers many more valuable abstractions. + +**See also:** + +- [samber/do](https://github.com/samber/do): A dependency injection toolkit based on Go 1.18+ Generics +- [samber/mo](https://github.com/samber/mo): Monads based on Go 1.18+ Generics (Option, Result, Either...) + +**Why this name?** + +I wanted a **short name**, similar to "Lodash" and no Go package currently uses this name. + +![lo](img/logo-full.png) + +## 🚀 Install + +```sh +go get github.com/samber/lo@v1 +``` + +This library is v1 and follows SemVer strictly. + +No breaking changes will be made to exported APIs before v2.0.0. + +This library has no dependencies outside the Go standard library. + +## 💡 Usage + +You can import `lo` using: + +```go +import ( + "github.com/samber/lo" + lop "github.com/samber/lo/parallel" +) +``` + +Then use one of the helpers below: + +```go +names := lo.Uniq[string]([]string{"Samuel", "John", "Samuel"}) +// []string{"Samuel", "John"} +``` + +Most of the time, the compiler will be able to infer the type so that you can call: `lo.Uniq([]string{...})`. + +### Tips for lazy developers + +I cannot recommend it, but in case you are too lazy for repeating `lo.` everywhere, you can import the entire library into the namespace. + +```go +import ( + . "github.com/samber/lo" +) +``` + +I take no responsibility on this junk. 😁 💩 + +## 🤠 Spec + +GoDoc: [https://godoc.org/github.com/samber/lo](https://godoc.org/github.com/samber/lo) + +Supported helpers for slices: + +- [Filter](#filter) +- [Map](#map) +- [FilterMap](#filtermap) +- [FlatMap](#flatmap) +- [Reduce](#reduce) +- [ReduceRight](#reduceright) +- [ForEach](#foreach) +- [Times](#times) +- [Uniq](#uniq) +- [UniqBy](#uniqby) +- [GroupBy](#groupby) +- [Chunk](#chunk) +- [PartitionBy](#partitionby) +- [Flatten](#flatten) +- [Interleave](#interleave) +- [Shuffle](#shuffle) +- [Reverse](#reverse) +- [Fill](#fill) +- [Repeat](#repeat) +- [RepeatBy](#repeatby) +- [KeyBy](#keyby) +- [Associate / SliceToMap](#associate-alias-slicetomap) +- [Drop](#drop) +- [DropRight](#dropright) +- [DropWhile](#dropwhile) +- [DropRightWhile](#droprightwhile) +- [Reject](#reject) +- [Count](#count) +- [CountBy](#countby) +- [CountValues](#countvalues) +- [CountValuesBy](#countvaluesby) +- [Subset](#subset) +- [Slice](#slice) +- [Replace](#replace) +- [ReplaceAll](#replaceall) +- [Compact](#compact) +- [IsSorted](#issorted) +- [IsSortedByKey](#issortedbykey) + +Supported helpers for maps: + +- [Keys](#keys) +- [ValueOr](#valueor) +- [Values](#values) +- [PickBy](#pickby) +- [PickByKeys](#pickbykeys) +- [PickByValues](#pickbyvalues) +- [OmitBy](#omitby) +- [OmitByKeys](#omitbykeys) +- [OmitByValues](#omitbyvalues) +- [Entries / ToPairs](#entries-alias-topairs) +- [FromEntries / FromPairs](#fromentries-alias-frompairs) +- [Invert](#invert) +- [Assign (merge of maps)](#assign) +- [MapKeys](#mapkeys) +- [MapValues](#mapvalues) +- [MapEntries](#mapentries) +- [MapToSlice](#maptoslice) + +Supported math helpers: + +- [Range / RangeFrom / RangeWithSteps](#range--rangefrom--rangewithsteps) +- [Clamp](#clamp) +- [Sum](#sum) +- [SumBy](#sumby) + +Supported helpers for strings: + +- [RandomString](#randomstring) +- [Substring](#substring) +- [ChunkString](#chunkstring) +- [RuneLength](#runelength) + +Supported helpers for tuples: + +- [T2 -> T9](#t2---t9) +- [Unpack2 -> Unpack9](#unpack2---unpack9) +- [Zip2 -> Zip9](#zip2---zip9) +- [Unzip2 -> Unzip9](#unzip2---unzip9) + +Supported helpers for channels: + +- [ChannelDispatcher](#channeldispatcher) +- [SliceToChannel](#slicetochannel) +- [Generator](#generator) +- [Buffer](#buffer) +- [BufferWithTimeout](#bufferwithtimeout) +- [FanIn](#fanin) +- [FanOut](#fanout) + +Supported intersection helpers: + +- [Contains](#contains) +- [ContainsBy](#containsby) +- [Every](#every) +- [EveryBy](#everyby) +- [Some](#some) +- [SomeBy](#someby) +- [None](#none) +- [NoneBy](#noneby) +- [Intersect](#intersect) +- [Difference](#difference) +- [Union](#union) +- [Without](#without) +- [WithoutEmpty](#withoutempty) + +Supported search helpers: + +- [IndexOf](#indexof) +- [LastIndexOf](#lastindexof) +- [Find](#find) +- [FindIndexOf](#findindexof) +- [FindLastIndexOf](#findlastindexof) +- [FindOrElse](#findorelse) +- [FindKey](#findkey) +- [FindKeyBy](#findkeyby) +- [FindUniques](#finduniques) +- [FindUniquesBy](#finduniquesby) +- [FindDuplicates](#findduplicates) +- [FindDuplicatesBy](#findduplicatesby) +- [Min](#min) +- [MinBy](#minby) +- [Max](#max) +- [MaxBy](#maxby) +- [Last](#last) +- [Nth](#nth) +- [Sample](#sample) +- [Samples](#samples) + +Conditional helpers: + +- [Ternary](#ternary) +- [TernaryF](#ternaryf) +- [If / ElseIf / Else](#if--elseif--else) +- [Switch / Case / Default](#switch--case--default) + +Type manipulation helpers: + +- [ToPtr](#toptr) +- [EmptyableToPtr](#emptyabletoptr) +- [FromPtr](#fromptr) +- [FromPtrOr](#fromptror) +- [ToSlicePtr](#tosliceptr) +- [ToAnySlice](#toanyslice) +- [FromAnySlice](#fromanyslice) +- [Empty](#empty) +- [IsEmpty](#isempty) +- [IsNotEmpty](#isnotempty) +- [Coalesce](#coalesce) + +Function helpers: + +- [Partial](#partial) +- [Partial2 -> Partial5](#partial2---partial5) + +Concurrency helpers: + +- [Attempt](#attempt) +- [AttemptWhile](#attemptwhile) +- [AttemptWithDelay](#attemptwithdelay) +- [AttemptWhileWithDelay](#attemptwhilewithdelay) +- [Debounce](#debounce) +- [DebounceBy](#debounceby) +- [Synchronize](#synchronize) +- [Async](#async) +- [Transaction](#transaction) + +Error handling: + +- [Validate](#validate) +- [Must](#must) +- [Try](#try) +- [Try1 -> Try6](#try0-6) +- [TryOr](#tryor) +- [TryOr1 -> TryOr6](#tryor0-6) +- [TryCatch](#trycatch) +- [TryWithErrorValue](#trywitherrorvalue) +- [TryCatchWithErrorValue](#trycatchwitherrorvalue) +- [ErrorsAs](#errorsas) + +Constraints: + +- Clonable + +### Filter + +Iterates over a collection and returns an array of all the elements the predicate function returns `true` for. + +```go +even := lo.Filter([]int{1, 2, 3, 4}, func(x int, index int) bool { + return x%2 == 0 +}) +// []int{2, 4} +``` + +[[play](https://go.dev/play/p/Apjg3WeSi7K)] + +### Map + +Manipulates a slice of one type and transforms it into a slice of another type: + +```go +import "github.com/samber/lo" + +lo.Map([]int64{1, 2, 3, 4}, func(x int64, index int) string { + return strconv.FormatInt(x, 10) +}) +// []string{"1", "2", "3", "4"} +``` + +[[play](https://go.dev/play/p/OkPcYAhBo0D)] + +Parallel processing: like `lo.Map()`, but the mapper function is called in a goroutine. Results are returned in the same order. + +```go +import lop "github.com/samber/lo/parallel" + +lop.Map([]int64{1, 2, 3, 4}, func(x int64, _ int) string { + return strconv.FormatInt(x, 10) +}) +// []string{"1", "2", "3", "4"} +``` + +### FilterMap + +Returns a slice which obtained after both filtering and mapping using the given callback function. + +The callback function should return two values: the result of the mapping operation and whether the result element should be included or not. + +```go +matching := lo.FilterMap([]string{"cpu", "gpu", "mouse", "keyboard"}, func(x string, _ int) (string, bool) { + if strings.HasSuffix(x, "pu") { + return "xpu", true + } + return "", false +}) +// []string{"xpu", "xpu"} +``` + +[[play](https://go.dev/play/p/-AuYXfy7opz)] + +### FlatMap + +Manipulates a slice and transforms and flattens it to a slice of another type. The transform function can either return a slice or a `nil`, and in the `nil` case no value is added to the final slice. + +```go +lo.FlatMap([]int{0, 1, 2}, func(x int, _ int) []string { + return []string{ + strconv.FormatInt(x, 10), + strconv.FormatInt(x, 10), + } +}) +// []string{"0", "0", "1", "1", "2", "2"} +``` + +[[play](https://go.dev/play/p/YSoYmQTA8-U)] + +### Reduce + +Reduces a collection to a single value. The value is calculated by accumulating the result of running each element in the collection through an accumulator function. Each successive invocation is supplied with the return value returned by the previous call. + +```go +sum := lo.Reduce([]int{1, 2, 3, 4}, func(agg int, item int, _ int) int { + return agg + item +}, 0) +// 10 +``` + +[[play](https://go.dev/play/p/R4UHXZNaaUG)] + +### ReduceRight + +Like `lo.Reduce` except that it iterates over elements of collection from right to left. + +```go +result := lo.ReduceRight([][]int{{0, 1}, {2, 3}, {4, 5}}, func(agg []int, item []int, _ int) []int { + return append(agg, item...) +}, []int{}) +// []int{4, 5, 2, 3, 0, 1} +``` + +[[play](https://go.dev/play/p/Fq3W70l7wXF)] + +### ForEach + +Iterates over elements of a collection and invokes the function over each element. + +```go +import "github.com/samber/lo" + +lo.ForEach([]string{"hello", "world"}, func(x string, _ int) { + println(x) +}) +// prints "hello\nworld\n" +``` + +[[play](https://go.dev/play/p/oofyiUPRf8t)] + +Parallel processing: like `lo.ForEach()`, but the callback is called as a goroutine. + +```go +import lop "github.com/samber/lo/parallel" + +lop.ForEach([]string{"hello", "world"}, func(x string, _ int) { + println(x) +}) +// prints "hello\nworld\n" or "world\nhello\n" +``` + +### Times + +Times invokes the iteratee n times, returning an array of the results of each invocation. The iteratee is invoked with index as argument. + +```go +import "github.com/samber/lo" + +lo.Times(3, func(i int) string { + return strconv.FormatInt(int64(i), 10) +}) +// []string{"0", "1", "2"} +``` + +[[play](https://go.dev/play/p/vgQj3Glr6lT)] + +Parallel processing: like `lo.Times()`, but callback is called in goroutine. + +```go +import lop "github.com/samber/lo/parallel" + +lop.Times(3, func(i int) string { + return strconv.FormatInt(int64(i), 10) +}) +// []string{"0", "1", "2"} +``` + +### Uniq + +Returns a duplicate-free version of an array, in which only the first occurrence of each element is kept. The order of result values is determined by the order they occur in the array. + +```go +uniqValues := lo.Uniq([]int{1, 2, 2, 1}) +// []int{1, 2} +``` + +[[play](https://go.dev/play/p/DTzbeXZ6iEN)] + +### UniqBy + +Returns a duplicate-free version of an array, in which only the first occurrence of each element is kept. The order of result values is determined by the order they occur in the array. It accepts `iteratee` which is invoked for each element in array to generate the criterion by which uniqueness is computed. + +```go +uniqValues := lo.UniqBy([]int{0, 1, 2, 3, 4, 5}, func(i int) int { + return i%3 +}) +// []int{0, 1, 2} +``` + +[[play](https://go.dev/play/p/g42Z3QSb53u)] + +### GroupBy + +Returns an object composed of keys generated from the results of running each element of collection through iteratee. + +```go +import lo "github.com/samber/lo" + +groups := lo.GroupBy([]int{0, 1, 2, 3, 4, 5}, func(i int) int { + return i%3 +}) +// map[int][]int{0: []int{0, 3}, 1: []int{1, 4}, 2: []int{2, 5}} +``` + +[[play](https://go.dev/play/p/XnQBd_v6brd)] + +Parallel processing: like `lo.GroupBy()`, but callback is called in goroutine. + +```go +import lop "github.com/samber/lo/parallel" + +lop.GroupBy([]int{0, 1, 2, 3, 4, 5}, func(i int) int { + return i%3 +}) +// map[int][]int{0: []int{0, 3}, 1: []int{1, 4}, 2: []int{2, 5}} +``` + +### Chunk + +Returns an array of elements split into groups the length of size. If array can't be split evenly, the final chunk will be the remaining elements. + +```go +lo.Chunk([]int{0, 1, 2, 3, 4, 5}, 2) +// [][]int{{0, 1}, {2, 3}, {4, 5}} + +lo.Chunk([]int{0, 1, 2, 3, 4, 5, 6}, 2) +// [][]int{{0, 1}, {2, 3}, {4, 5}, {6}} + +lo.Chunk([]int{}, 2) +// [][]int{} + +lo.Chunk([]int{0}, 2) +// [][]int{{0}} +``` + +[[play](https://go.dev/play/p/EeKl0AuTehH)] + +### PartitionBy + +Returns an array of elements split into groups. The order of grouped values is determined by the order they occur in collection. The grouping is generated from the results of running each element of collection through iteratee. + +```go +import lo "github.com/samber/lo" + +partitions := lo.PartitionBy([]int{-2, -1, 0, 1, 2, 3, 4, 5}, func(x int) string { + if x < 0 { + return "negative" + } else if x%2 == 0 { + return "even" + } + return "odd" +}) +// [][]int{{-2, -1}, {0, 2, 4}, {1, 3, 5}} +``` + +[[play](https://go.dev/play/p/NfQ_nGjkgXW)] + +Parallel processing: like `lo.PartitionBy()`, but callback is called in goroutine. Results are returned in the same order. + +```go +import lop "github.com/samber/lo/parallel" + +partitions := lop.PartitionBy([]int{-2, -1, 0, 1, 2, 3, 4, 5}, func(x int) string { + if x < 0 { + return "negative" + } else if x%2 == 0 { + return "even" + } + return "odd" +}) +// [][]int{{-2, -1}, {0, 2, 4}, {1, 3, 5}} +``` + +### Flatten + +Returns an array a single level deep. + +```go +flat := lo.Flatten([][]int{{0, 1}, {2, 3, 4, 5}}) +// []int{0, 1, 2, 3, 4, 5} +``` + +[[play](https://go.dev/play/p/rbp9ORaMpjw)] + +### Interleave + +Round-robin alternating input slices and sequentially appending value at index into result. + +```go +interleaved := lo.Interleave([]int{1, 4, 7}, []int{2, 5, 8}, []int{3, 6, 9}) +// []int{1, 2, 3, 4, 5, 6, 7, 8, 9} + +interleaved := lo.Interleave([]int{1}, []int{2, 5, 8}, []int{3, 6}, []int{4, 7, 9, 10}) +// []int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10} +``` + +[[play](https://go.dev/play/p/DDhlwrShbwe)] + +### Shuffle + +Returns an array of shuffled values. Uses the Fisher-Yates shuffle algorithm. + +```go +randomOrder := lo.Shuffle([]int{0, 1, 2, 3, 4, 5}) +// []int{1, 4, 0, 3, 5, 2} +``` + +[[play](https://go.dev/play/p/Qp73bnTDnc7)] + +### Reverse + +Reverses array so that the first element becomes the last, the second element becomes the second to last, and so on. + +⚠️ This helper is **mutable**. This behavior might change in `v2.0.0`. See [#160](https://github.com/samber/lo/issues/160). + +```go +reverseOrder := lo.Reverse([]int{0, 1, 2, 3, 4, 5}) +// []int{5, 4, 3, 2, 1, 0} +``` + +[[play](https://go.dev/play/p/fhUMLvZ7vS6)] + +### Fill + +Fills elements of array with `initial` value. + +```go +type foo struct { + bar string +} + +func (f foo) Clone() foo { + return foo{f.bar} +} + +initializedSlice := lo.Fill([]foo{foo{"a"}, foo{"a"}}, foo{"b"}) +// []foo{foo{"b"}, foo{"b"}} +``` + +[[play](https://go.dev/play/p/VwR34GzqEub)] + +### Repeat + +Builds a slice with N copies of initial value. + +```go +type foo struct { + bar string +} + +func (f foo) Clone() foo { + return foo{f.bar} +} + +slice := lo.Repeat(2, foo{"a"}) +// []foo{foo{"a"}, foo{"a"}} +``` + +[[play](https://go.dev/play/p/g3uHXbmc3b6)] + +### RepeatBy + +Builds a slice with values returned by N calls of callback. + +```go +slice := lo.RepeatBy(0, func (i int) string { + return strconv.FormatInt(int64(math.Pow(float64(i), 2)), 10) +}) +// []string{} + +slice := lo.RepeatBy(5, func(i int) string { + return strconv.FormatInt(int64(math.Pow(float64(i), 2)), 10) +}) +// []string{"0", "1", "4", "9", "16"} +``` + +[[play](https://go.dev/play/p/ozZLCtX_hNU)] + +### KeyBy + +Transforms a slice or an array of structs to a map based on a pivot callback. + +```go +m := lo.KeyBy([]string{"a", "aa", "aaa"}, func(str string) int { + return len(str) +}) +// map[int]string{1: "a", 2: "aa", 3: "aaa"} + +type Character struct { + dir string + code int +} +characters := []Character{ + {dir: "left", code: 97}, + {dir: "right", code: 100}, +} +result := lo.KeyBy(characters, func(char Character) string { + return string(rune(char.code)) +}) +//map[a:{dir:left code:97} d:{dir:right code:100}] +``` + +[[play](https://go.dev/play/p/mdaClUAT-zZ)] + +### Associate (alias: SliceToMap) + +Returns a map containing key-value pairs provided by transform function applied to elements of the given slice. +If any of two pairs would have the same key the last one gets added to the map. + +The order of keys in returned map is not specified and is not guaranteed to be the same from the original array. + +```go +in := []*foo{{baz: "apple", bar: 1}, {baz: "banana", bar: 2}} + +aMap := lo.Associate(in, func (f *foo) (string, int) { + return f.baz, f.bar +}) +// map[string][int]{ "apple":1, "banana":2 } +``` + +[[play](https://go.dev/play/p/WHa2CfMO3Lr)] + +### Drop + +Drops n elements from the beginning of a slice or array. + +```go +l := lo.Drop([]int{0, 1, 2, 3, 4, 5}, 2) +// []int{2, 3, 4, 5} +``` + +[[play](https://go.dev/play/p/JswS7vXRJP2)] + +### DropRight + +Drops n elements from the end of a slice or array. + +```go +l := lo.DropRight([]int{0, 1, 2, 3, 4, 5}, 2) +// []int{0, 1, 2, 3} +``` + +[[play](https://go.dev/play/p/GG0nXkSJJa3)] + +### DropWhile + +Drop elements from the beginning of a slice or array while the predicate returns true. + +```go +l := lo.DropWhile([]string{"a", "aa", "aaa", "aa", "aa"}, func(val string) bool { + return len(val) <= 2 +}) +// []string{"aaa", "aa", "aa"} +``` + +[[play](https://go.dev/play/p/7gBPYw2IK16)] + +### DropRightWhile + +Drop elements from the end of a slice or array while the predicate returns true. + +```go +l := lo.DropRightWhile([]string{"a", "aa", "aaa", "aa", "aa"}, func(val string) bool { + return len(val) <= 2 +}) +// []string{"a", "aa", "aaa"} +``` + +[[play](https://go.dev/play/p/3-n71oEC0Hz)] + +### Reject + +The opposite of Filter, this method returns the elements of collection that predicate does not return truthy for. + +```go +odd := lo.Reject([]int{1, 2, 3, 4}, func(x int, _ int) bool { + return x%2 == 0 +}) +// []int{1, 3} +``` + +[[play](https://go.dev/play/p/YkLMODy1WEL)] + +### Count + +Counts the number of elements in the collection that compare equal to value. + +```go +count := lo.Count([]int{1, 5, 1}, 1) +// 2 +``` + +[[play](https://go.dev/play/p/Y3FlK54yveC)] + +### CountBy + +Counts the number of elements in the collection for which predicate is true. + +```go +count := lo.CountBy([]int{1, 5, 1}, func(i int) bool { + return i < 4 +}) +// 2 +``` + +[[play](https://go.dev/play/p/ByQbNYQQi4X)] + +### CountValues + +Counts the number of each element in the collection. + +```go +lo.CountValues([]int{}) +// map[int]int{} + +lo.CountValues([]int{1, 2}) +// map[int]int{1: 1, 2: 1} + +lo.CountValues([]int{1, 2, 2}) +// map[int]int{1: 1, 2: 2} + +lo.CountValues([]string{"foo", "bar", ""}) +// map[string]int{"": 1, "foo": 1, "bar": 1} + +lo.CountValues([]string{"foo", "bar", "bar"}) +// map[string]int{"foo": 1, "bar": 2} +``` + +[[play](https://go.dev/play/p/-p-PyLT4dfy)] + +### CountValuesBy + +Counts the number of each element in the collection. It ss equivalent to chaining lo.Map and lo.CountValues. + +```go +isEven := func(v int) bool { + return v%2==0 +} + +lo.CountValuesBy([]int{}, isEven) +// map[bool]int{} + +lo.CountValuesBy([]int{1, 2}, isEven) +// map[bool]int{false: 1, true: 1} + +lo.CountValuesBy([]int{1, 2, 2}, isEven) +// map[bool]int{false: 1, true: 2} + +length := func(v string) int { + return len(v) +} + +lo.CountValuesBy([]string{"foo", "bar", ""}, length) +// map[int]int{0: 1, 3: 2} + +lo.CountValuesBy([]string{"foo", "bar", "bar"}, length) +// map[int]int{3: 3} +``` + +[[play](https://go.dev/play/p/2U0dG1SnOmS)] + +### Subset + +Returns a copy of a slice from `offset` up to `length` elements. Like `slice[start:start+length]`, but does not panic on overflow. + +```go +in := []int{0, 1, 2, 3, 4} + +sub := lo.Subset(in, 2, 3) +// []int{2, 3, 4} + +sub := lo.Subset(in, -4, 3) +// []int{1, 2, 3} + +sub := lo.Subset(in, -2, math.MaxUint) +// []int{3, 4} +``` + +[[play](https://go.dev/play/p/tOQu1GhFcog)] + +### Slice + +Returns a copy of a slice from `start` up to, but not including `end`. Like `slice[start:end]`, but does not panic on overflow. + +```go +in := []int{0, 1, 2, 3, 4} + +slice := lo.Slice(in, 0, 5) +// []int{0, 1, 2, 3, 4} + +slice := lo.Slice(in, 2, 3) +// []int{2} + +slice := lo.Slice(in, 2, 6) +// []int{2, 3, 4} + +slice := lo.Slice(in, 4, 3) +// []int{} +``` + +[[play](https://go.dev/play/p/8XWYhfMMA1h)] + +### Replace + +Returns a copy of the slice with the first n non-overlapping instances of old replaced by new. + +```go +in := []int{0, 1, 0, 1, 2, 3, 0} + +slice := lo.Replace(in, 0, 42, 1) +// []int{42, 1, 0, 1, 2, 3, 0} + +slice := lo.Replace(in, -1, 42, 1) +// []int{0, 1, 0, 1, 2, 3, 0} + +slice := lo.Replace(in, 0, 42, 2) +// []int{42, 1, 42, 1, 2, 3, 0} + +slice := lo.Replace(in, 0, 42, -1) +// []int{42, 1, 42, 1, 2, 3, 42} +``` + +[[play](https://go.dev/play/p/XfPzmf9gql6)] + +### ReplaceAll + +Returns a copy of the slice with all non-overlapping instances of old replaced by new. + +```go +in := []int{0, 1, 0, 1, 2, 3, 0} + +slice := lo.ReplaceAll(in, 0, 42) +// []int{42, 1, 42, 1, 2, 3, 42} + +slice := lo.ReplaceAll(in, -1, 42) +// []int{0, 1, 0, 1, 2, 3, 0} +``` + +[[play](https://go.dev/play/p/a9xZFUHfYcV)] + +### Compact + +Returns a slice of all non-zero elements. + +```go +in := []string{"", "foo", "", "bar", ""} + +slice := lo.Compact[string](in) +// []string{"foo", "bar"} +``` + +[[play](https://go.dev/play/p/tXiy-iK6PAc)] + +### IsSorted + +Checks if a slice is sorted. + +```go +slice := lo.IsSorted([]int{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}) +// true +``` + +[[play](https://go.dev/play/p/mc3qR-t4mcx)] + +### IsSortedByKey + +Checks if a slice is sorted by iteratee. + +```go +slice := lo.IsSortedByKey([]string{"a", "bb", "ccc"}, func(s string) int { + return len(s) +}) +// true +``` + +[[play](https://go.dev/play/p/wiG6XyBBu49)] + +### Keys + +Creates an array of the map keys. + +```go +keys := lo.Keys[string, int](map[string]int{"foo": 1, "bar": 2}) +// []string{"foo", "bar"} +``` + +[[play](https://go.dev/play/p/Uu11fHASqrU)] + +### Values + +Creates an array of the map values. + +```go +values := lo.Values[string, int](map[string]int{"foo": 1, "bar": 2}) +// []int{1, 2} +``` + +[[play](https://go.dev/play/p/nnRTQkzQfF6)] + +### ValueOr + +Creates an array of the map values. + +```go +value := lo.ValueOr[string, int](map[string]int{"foo": 1, "bar": 2}, "foo", 42) +// 1 + +value := lo.ValueOr[string, int](map[string]int{"foo": 1, "bar": 2}, "baz", 42) +// 42 +``` + +[[play](https://go.dev/play/p/bAq9mHErB4V)] + +### PickBy + +Returns same map type filtered by given predicate. + +```go +m := lo.PickBy(map[string]int{"foo": 1, "bar": 2, "baz": 3}, func(key string, value int) bool { + return value%2 == 1 +}) +// map[string]int{"foo": 1, "baz": 3} +``` + +[[play](https://go.dev/play/p/kdg8GR_QMmf)] + +### PickByKeys + +Returns same map type filtered by given keys. + +```go +m := lo.PickByKeys(map[string]int{"foo": 1, "bar": 2, "baz": 3}, []string{"foo", "baz"}) +// map[string]int{"foo": 1, "baz": 3} +``` + +[[play](https://go.dev/play/p/R1imbuci9qU)] + +### PickByValues + +Returns same map type filtered by given values. + +```go +m := lo.PickByValues(map[string]int{"foo": 1, "bar": 2, "baz": 3}, []int{1, 3}) +// map[string]int{"foo": 1, "baz": 3} +``` + +[[play](https://go.dev/play/p/1zdzSvbfsJc)] + +### OmitBy + +Returns same map type filtered by given predicate. + +```go +m := lo.OmitBy(map[string]int{"foo": 1, "bar": 2, "baz": 3}, func(key string, value int) bool { + return value%2 == 1 +}) +// map[string]int{"bar": 2} +``` + +[[play](https://go.dev/play/p/EtBsR43bdsd)] + +### OmitByKeys + +Returns same map type filtered by given keys. + +```go +m := lo.OmitByKeys(map[string]int{"foo": 1, "bar": 2, "baz": 3}, []string{"foo", "baz"}) +// map[string]int{"bar": 2} +``` + +[[play](https://go.dev/play/p/t1QjCrs-ysk)] + +### OmitByValues + +Returns same map type filtered by given values. + +```go +m := lo.OmitByValues(map[string]int{"foo": 1, "bar": 2, "baz": 3}, []int{1, 3}) +// map[string]int{"bar": 2} +``` + +[[play](https://go.dev/play/p/9UYZi-hrs8j)] + +### Entries (alias: ToPairs) + +Transforms a map into array of key/value pairs. + +```go +entries := lo.Entries(map[string]int{"foo": 1, "bar": 2}) +// []lo.Entry[string, int]{ +// { +// Key: "foo", +// Value: 1, +// }, +// { +// Key: "bar", +// Value: 2, +// }, +// } +``` + +[[play](https://go.dev/play/p/3Dhgx46gawJ)] + +### FromEntries (alias: FromPairs) + +Transforms an array of key/value pairs into a map. + +```go +m := lo.FromEntries([]lo.Entry[string, int]{ + { + Key: "foo", + Value: 1, + }, + { + Key: "bar", + Value: 2, + }, +}) +// map[string]int{"foo": 1, "bar": 2} +``` + +[[play](https://go.dev/play/p/oIr5KHFGCEN)] + +### Invert + +Creates a map composed of the inverted keys and values. If map contains duplicate values, subsequent values overwrite property assignments of previous values. + +```go +m1 := lo.Invert(map[string]int{"a": 1, "b": 2}) +// map[int]string{1: "a", 2: "b"} + +m2 := lo.Invert(map[string]int{"a": 1, "b": 2, "c": 1}) +// map[int]string{1: "c", 2: "b"} +``` + +[[play](https://go.dev/play/p/rFQ4rak6iA1)] + +### Assign + +Merges multiple maps from left to right. + +```go +mergedMaps := lo.Assign[string, int]( + map[string]int{"a": 1, "b": 2}, + map[string]int{"b": 3, "c": 4}, +) +// map[string]int{"a": 1, "b": 3, "c": 4} +``` + +[[play](https://go.dev/play/p/VhwfJOyxf5o)] + +### MapKeys + +Manipulates a map keys and transforms it to a map of another type. + +```go +m2 := lo.MapKeys(map[int]int{1: 1, 2: 2, 3: 3, 4: 4}, func(_ int, v int) string { + return strconv.FormatInt(int64(v), 10) +}) +// map[string]int{"1": 1, "2": 2, "3": 3, "4": 4} +``` + +[[play](https://go.dev/play/p/9_4WPIqOetJ)] + +### MapValues + +Manipulates a map values and transforms it to a map of another type. + +```go +m1 := map[int]int64{1: 1, 2: 2, 3: 3} + +m2 := lo.MapValues(m1, func(x int64, _ int) string { + return strconv.FormatInt(x, 10) +}) +// map[int]string{1: "1", 2: "2", 3: "3"} +``` + +[[play](https://go.dev/play/p/T_8xAfvcf0W)] + +### MapEntries + +Manipulates a map entries and transforms it to a map of another type. + +```go +in := map[string]int{"foo": 1, "bar": 2} + +out := lo.MapEntries(in, func(k string, v int) (int, string) { + return v,k +}) +// map[int]string{1: "foo", 2: "bar"} +``` + +[[play](https://go.dev/play/p/VuvNQzxKimT)] + +### MapToSlice + +Transforms a map into a slice based on specific iteratee. + +```go +m := map[int]int64{1: 4, 2: 5, 3: 6} + +s := lo.MapToSlice(m, func(k int, v int64) string { + return fmt.Sprintf("%d_%d", k, v) +}) +// []string{"1_4", "2_5", "3_6"} +``` + +[[play](https://go.dev/play/p/ZuiCZpDt6LD)] + +### Range / RangeFrom / RangeWithSteps + +Creates an array of numbers (positive and/or negative) progressing from start up to, but not including end. + +```go +result := lo.Range(4) +// [0, 1, 2, 3] + +result := lo.Range(-4) +// [0, -1, -2, -3] + +result := lo.RangeFrom(1, 5) +// [1, 2, 3, 4, 5] + +result := lo.RangeFrom[float64](1.0, 5) +// [1.0, 2.0, 3.0, 4.0, 5.0] + +result := lo.RangeWithSteps(0, 20, 5) +// [0, 5, 10, 15] + +result := lo.RangeWithSteps[float32](-1.0, -4.0, -1.0) +// [-1.0, -2.0, -3.0] + +result := lo.RangeWithSteps(1, 4, -1) +// [] + +result := lo.Range(0) +// [] +``` + +[[play](https://go.dev/play/p/0r6VimXAi9H)] + +### Clamp + +Clamps number within the inclusive lower and upper bounds. + +```go +r1 := lo.Clamp(0, -10, 10) +// 0 + +r2 := lo.Clamp(-42, -10, 10) +// -10 + +r3 := lo.Clamp(42, -10, 10) +// 10 +``` + +[[play](https://go.dev/play/p/RU4lJNC2hlI)] + +### Sum + +Sums the values in a collection. + +If collection is empty 0 is returned. + +```go +list := []int{1, 2, 3, 4, 5} +sum := lo.Sum(list) +// 15 +``` + +[[play](https://go.dev/play/p/upfeJVqs4Bt)] + +### SumBy + +Summarizes the values in a collection using the given return value from the iteration function. + +If collection is empty 0 is returned. + +```go +strings := []string{"foo", "bar"} +sum := lo.SumBy(strings, func(item string) int { + return len(item) +}) +// 6 +``` + +[[play](https://go.dev/play/p/Dz_a_7jN_ca)] + +### RandomString + +Returns a random string of the specified length and made of the specified charset. + +```go +str := lo.RandomString(5, lo.LettersCharset) +// example: "eIGbt" +``` + +[[play](https://go.dev/play/p/rRseOQVVum4)] + +### Substring + +Return part of a string. + +```go +sub := lo.Substring("hello", 2, 3) +// "llo" + +sub := lo.Substring("hello", -4, 3) +// "ell" + +sub := lo.Substring("hello", -2, math.MaxUint) +// "lo" +``` + +[[play](https://go.dev/play/p/TQlxQi82Lu1)] + +### ChunkString + +Returns an array of strings split into groups the length of size. If array can't be split evenly, the final chunk will be the remaining elements. + +```go +lo.ChunkString("123456", 2) +// []string{"12", "34", "56"} + +lo.ChunkString("1234567", 2) +// []string{"12", "34", "56", "7"} + +lo.ChunkString("", 2) +// []string{""} + +lo.ChunkString("1", 2) +// []string{"1"} +``` + +[[play](https://go.dev/play/p/__FLTuJVz54)] + +### RuneLength + +An alias to utf8.RuneCountInString which returns the number of runes in string. + +```go +sub := lo.RuneLength("hellô") +// 5 + +sub := len("hellô") +// 6 +``` + +[[play](https://go.dev/play/p/tuhgW_lWY8l)] + +### T2 -> T9 + +Creates a tuple from a list of values. + +```go +tuple1 := lo.T2("x", 1) +// Tuple2[string, int]{A: "x", B: 1} + +func example() (string, int) { return "y", 2 } +tuple2 := lo.T2(example()) +// Tuple2[string, int]{A: "y", B: 2} +``` + +[[play](https://go.dev/play/p/IllL3ZO4BQm)] + +### Unpack2 -> Unpack9 + +Returns values contained in tuple. + +```go +r1, r2 := lo.Unpack2(lo.Tuple2[string, int]{"a", 1}) +// "a", 1 +``` + +Unpack is also available as a method of TupleX. + +```go +tuple2 := lo.T2("a", 1) +a, b := tuple2.Unpack() +// "a" 1 +``` + +[[play](https://go.dev/play/p/xVP_k0kJ96W)] + +### Zip2 -> Zip9 + +Zip creates a slice of grouped elements, the first of which contains the first elements of the given arrays, the second of which contains the second elements of the given arrays, and so on. + +When collections have different size, the Tuple attributes are filled with zero value. + +```go +tuples := lo.Zip2([]string{"a", "b"}, []int{1, 2}) +// []Tuple2[string, int]{{A: "a", B: 1}, {A: "b", B: 2}} +``` + +[[play](https://go.dev/play/p/jujaA6GaJTp)] + +### Unzip2 -> Unzip9 + +Unzip accepts an array of grouped elements and creates an array regrouping the elements to their pre-zip configuration. + +```go +a, b := lo.Unzip2([]Tuple2[string, int]{{A: "a", B: 1}, {A: "b", B: 2}}) +// []string{"a", "b"} +// []int{1, 2} +``` + +[[play](https://go.dev/play/p/ciHugugvaAW)] + +### ChannelDispatcher + +Distributes messages from input channels into N child channels. Close events are propagated to children. + +Underlying channels can have a fixed buffer capacity or be unbuffered when cap is 0. + +```go +ch := make(chan int, 42) +for i := 0; i <= 10; i++ { + ch <- i +} + +children := lo.ChannelDispatcher(ch, 5, 10, DispatchingStrategyRoundRobin[int]) +// []<-chan int{...} + +consumer := func(c <-chan int) { + for { + msg, ok := <-c + if !ok { + println("closed") + + break + } + + println(msg) + } +} + +for i := range children { + go consumer(children[i]) +} +``` + +Many distributions strategies are available: + +- [lo.DispatchingStrategyRoundRobin](./channel.go): Distributes messages in a rotating sequential manner. +- [lo.DispatchingStrategyRandom](./channel.go): Distributes messages in a random manner. +- [lo.DispatchingStrategyWeightedRandom](./channel.go): Distributes messages in a weighted manner. +- [lo.DispatchingStrategyFirst](./channel.go): Distributes messages in the first non-full channel. +- [lo.DispatchingStrategyLeast](./channel.go): Distributes messages in the emptiest channel. +- [lo.DispatchingStrategyMost](./channel.go): Distributes to the fullest channel. + +Some strategies bring fallback, in order to favor non-blocking behaviors. See implementations. + +For custom strategies, just implement the `lo.DispatchingStrategy` prototype: + +```go +type DispatchingStrategy[T any] func(message T, messageIndex uint64, channels []<-chan T) int +``` + +Eg: + +```go +type Message struct { + TenantID uuid.UUID +} + +func hash(id uuid.UUID) int { + h := fnv.New32a() + h.Write([]byte(id.String())) + return int(h.Sum32()) +} + +// Routes messages per TenantID. +customStrategy := func(message string, messageIndex uint64, channels []<-chan string) int { + destination := hash(message) % len(channels) + + // check if channel is full + if len(channels[destination]) < cap(channels[destination]) { + return destination + } + + // fallback when child channel is full + return utils.DispatchingStrategyRoundRobin(message, uint64(destination), channels) +} + +children := lo.ChannelDispatcher(ch, 5, 10, customStrategy) +... +``` + +### SliceToChannel + +Returns a read-only channels of collection elements. Channel is closed after last element. Channel capacity can be customized. + +```go +list := []int{1, 2, 3, 4, 5} + +for v := range lo.SliceToChannel(2, list) { + println(v) +} +// prints 1, then 2, then 3, then 4, then 5 +``` + +### ChannelToSlice + +Returns a slice built from channels items. Blocks until channel closes. + +```go +list := []int{1, 2, 3, 4, 5} +ch := lo.SliceToChannel(2, list) + +items := ChannelToSlice(ch) +// []int{1, 2, 3, 4, 5} +``` + +### Generator + +Implements the generator design pattern. Channel is closed after last element. Channel capacity can be customized. + +```go +generator := func(yield func(int)) { + yield(1) + yield(2) + yield(3) +} + +for v := range lo.Generator(2, generator) { + println(v) +} +// prints 1, then 2, then 3 +``` + +### Buffer + +Creates a slice of n elements from a channel. Returns the slice, the slice length, the read time and the channel status (opened/closed). + +```go +ch := lo.SliceToChannel(2, []int{1, 2, 3, 4, 5}) + +items1, length1, duration1, ok1 := lo.Buffer(ch, 3) +// []int{1, 2, 3}, 3, 0s, true +items2, length2, duration2, ok2 := lo.Buffer(ch, 3) +// []int{4, 5}, 2, 0s, false +``` + +Example: RabbitMQ consumer 👇 + +```go +ch := readFromQueue() + +for { + // read 1k items + items, length, _, ok := lo.Buffer(ch, 1000) + + // do batching stuff + + if !ok { + break + } +} +``` + +### BufferWithTimeout + +Creates a slice of n elements from a channel, with timeout. Returns the slice, the slice length, the read time and the channel status (opened/closed). + +```go +generator := func(yield func(int)) { + for i := 0; i < 5; i++ { + yield(i) + time.Sleep(35*time.Millisecond) + } +} + +ch := lo.Generator(0, generator) + +items1, length1, duration1, ok1 := lo.BufferWithTimeout(ch, 3, 100*time.Millisecond) +// []int{1, 2}, 2, 100ms, true +items2, length2, duration2, ok2 := lo.BufferWithTimeout(ch, 3, 100*time.Millisecond) +// []int{3, 4, 5}, 3, 75ms, true +items3, length3, duration2, ok3 := lo.BufferWithTimeout(ch, 3, 100*time.Millisecond) +// []int{}, 0, 10ms, false +``` + +Example: RabbitMQ consumer 👇 + +```go +ch := readFromQueue() + +for { + // read 1k items + // wait up to 1 second + items, length, _, ok := lo.BufferWithTimeout(ch, 1000, 1*time.Second) + + // do batching stuff + + if !ok { + break + } +} +``` + +Example: Multithreaded RabbitMQ consumer 👇 + +```go +ch := readFromQueue() + +// 5 workers +// prefetch 1k messages per worker +children := lo.ChannelDispatcher(ch, 5, 1000, lo.DispatchingStrategyFirst[int]) + +consumer := func(c <-chan int) { + for { + // read 1k items + // wait up to 1 second + items, length, _, ok := lo.BufferWithTimeout(ch, 1000, 1*time.Second) + + // do batching stuff + + if !ok { + break + } + } +} + +for i := range children { + go consumer(children[i]) +} +``` + +### FanIn + +Merge messages from multiple input channels into a single buffered channel. Output messages has no priority. When all upstream channels reach EOF, downstream channel closes. + +```go +stream1 := make(chan int, 42) +stream2 := make(chan int, 42) +stream3 := make(chan int, 42) + +all := lo.FanIn(100, stream1, stream2, stream3) +// <-chan int +``` + +### FanOut + +Broadcasts all the upstream messages to multiple downstream channels. When upstream channel reach EOF, downstream channels close. If any downstream channels is full, broadcasting is paused. + +```go +stream := make(chan int, 42) + +all := lo.FanOut(5, 100, stream) +// [5]<-chan int +``` + +### Contains + +Returns true if an element is present in a collection. + +```go +present := lo.Contains([]int{0, 1, 2, 3, 4, 5}, 5) +// true +``` + +### ContainsBy + +Returns true if the predicate function returns `true`. + +```go +present := lo.ContainsBy([]int{0, 1, 2, 3, 4, 5}, func(x int) bool { + return x == 3 +}) +// true +``` + +### Every + +Returns true if all elements of a subset are contained into a collection or if the subset is empty. + +```go +ok := lo.Every([]int{0, 1, 2, 3, 4, 5}, []int{0, 2}) +// true + +ok := lo.Every([]int{0, 1, 2, 3, 4, 5}, []int{0, 6}) +// false +``` + +### EveryBy + +Returns true if the predicate returns true for all of the elements in the collection or if the collection is empty. + +```go +b := EveryBy([]int{1, 2, 3, 4}, func(x int) bool { + return x < 5 +}) +// true +``` + +### Some + +Returns true if at least 1 element of a subset is contained into a collection. +If the subset is empty Some returns false. + +```go +ok := lo.Some([]int{0, 1, 2, 3, 4, 5}, []int{0, 2}) +// true + +ok := lo.Some([]int{0, 1, 2, 3, 4, 5}, []int{-1, 6}) +// false +``` + +### SomeBy + +Returns true if the predicate returns true for any of the elements in the collection. +If the collection is empty SomeBy returns false. + +```go +b := SomeBy([]int{1, 2, 3, 4}, func(x int) bool { + return x < 3 +}) +// true +``` + +### None + +Returns true if no element of a subset are contained into a collection or if the subset is empty. + +```go +b := None([]int{0, 1, 2, 3, 4, 5}, []int{0, 2}) +// false +b := None([]int{0, 1, 2, 3, 4, 5}, []int{-1, 6}) +// true +``` + +### NoneBy + +Returns true if the predicate returns true for none of the elements in the collection or if the collection is empty. + +```go +b := NoneBy([]int{1, 2, 3, 4}, func(x int) bool { + return x < 0 +}) +// true +``` + +### Intersect + +Returns the intersection between two collections. + +```go +result1 := lo.Intersect([]int{0, 1, 2, 3, 4, 5}, []int{0, 2}) +// []int{0, 2} + +result2 := lo.Intersect([]int{0, 1, 2, 3, 4, 5}, []int{0, 6}) +// []int{0} + +result3 := lo.Intersect([]int{0, 1, 2, 3, 4, 5}, []int{-1, 6}) +// []int{} +``` + +### Difference + +Returns the difference between two collections. + +- The first value is the collection of element absent of list2. +- The second value is the collection of element absent of list1. + +```go +left, right := lo.Difference([]int{0, 1, 2, 3, 4, 5}, []int{0, 2, 6}) +// []int{1, 3, 4, 5}, []int{6} + +left, right := lo.Difference([]int{0, 1, 2, 3, 4, 5}, []int{0, 1, 2, 3, 4, 5}) +// []int{}, []int{} +``` + +### Union + +Returns all distinct elements from given collections. Result will not change the order of elements relatively. + +```go +union := lo.Union([]int{0, 1, 2, 3, 4, 5}, []int{0, 2}, []int{0, 10}) +// []int{0, 1, 2, 3, 4, 5, 10} +``` + +### Without + +Returns slice excluding all given values. + +```go +subset := lo.Without([]int{0, 2, 10}, 2) +// []int{0, 10} + +subset := lo.Without([]int{0, 2, 10}, 0, 1, 2, 3, 4, 5) +// []int{10} +``` + +### WithoutEmpty + +Returns slice excluding empty values. + +```go +subset := lo.WithoutEmpty([]int{0, 2, 10}) +// []int{2, 10} +``` + +### IndexOf + +Returns the index at which the first occurrence of a value is found in an array or return -1 if the value cannot be found. + +```go +found := lo.IndexOf([]int{0, 1, 2, 1, 2, 3}, 2) +// 2 + +notFound := lo.IndexOf([]int{0, 1, 2, 1, 2, 3}, 6) +// -1 +``` + +### LastIndexOf + +Returns the index at which the last occurrence of a value is found in an array or return -1 if the value cannot be found. + +```go +found := lo.LastIndexOf([]int{0, 1, 2, 1, 2, 3}, 2) +// 4 + +notFound := lo.LastIndexOf([]int{0, 1, 2, 1, 2, 3}, 6) +// -1 +``` + +### Find + +Search an element in a slice based on a predicate. It returns element and true if element was found. + +```go +str, ok := lo.Find([]string{"a", "b", "c", "d"}, func(i string) bool { + return i == "b" +}) +// "b", true + +str, ok := lo.Find([]string{"foobar"}, func(i string) bool { + return i == "b" +}) +// "", false +``` + +### FindIndexOf + +FindIndexOf searches an element in a slice based on a predicate and returns the index and true. It returns -1 and false if the element is not found. + +```go +str, index, ok := lo.FindIndexOf([]string{"a", "b", "a", "b"}, func(i string) bool { + return i == "b" +}) +// "b", 1, true + +str, index, ok := lo.FindIndexOf([]string{"foobar"}, func(i string) bool { + return i == "b" +}) +// "", -1, false +``` + +### FindLastIndexOf + +FindLastIndexOf searches an element in a slice based on a predicate and returns the index and true. It returns -1 and false if the element is not found. + +```go +str, index, ok := lo.FindLastIndexOf([]string{"a", "b", "a", "b"}, func(i string) bool { + return i == "b" +}) +// "b", 4, true + +str, index, ok := lo.FindLastIndexOf([]string{"foobar"}, func(i string) bool { + return i == "b" +}) +// "", -1, false +``` + +### FindOrElse + +Search an element in a slice based on a predicate. It returns element and true if element was found. + +```go +str := lo.FindOrElse([]string{"a", "b", "c", "d"}, "x", func(i string) bool { + return i == "b" +}) +// "b" + +str := lo.FindOrElse([]string{"foobar"}, "x", func(i string) bool { + return i == "b" +}) +// "x" +``` + +### FindKey + +Returns the key of the first value matching. + +```go +result1, ok1 := lo.FindKey(map[string]int{"foo": 1, "bar": 2, "baz": 3}, 2) +// "bar", true + +result2, ok2 := lo.FindKey(map[string]int{"foo": 1, "bar": 2, "baz": 3}, 42) +// "", false + +type test struct { + foobar string +} +result3, ok3 := lo.FindKey(map[string]test{"foo": test{"foo"}, "bar": test{"bar"}, "baz": test{"baz"}}, test{"foo"}) +// "foo", true +``` + +### FindKeyBy + +Returns the key of the first element predicate returns truthy for. + +```go +result1, ok1 := lo.FindKeyBy(map[string]int{"foo": 1, "bar": 2, "baz": 3}, func(k string, v int) bool { + return k == "foo" +}) +// "foo", true + +result2, ok2 := lo.FindKeyBy(map[string]int{"foo": 1, "bar": 2, "baz": 3}, func(k string, v int) bool { + return false +}) +// "", false +``` + +### FindUniques + +Returns a slice with all the unique elements of the collection. The order of result values is determined by the order they occur in the array. + +```go +uniqueValues := lo.FindUniques([]int{1, 2, 2, 1, 2, 3}) +// []int{3} +``` + +### FindUniquesBy + +Returns a slice with all the unique elements of the collection. The order of result values is determined by the order they occur in the array. It accepts `iteratee` which is invoked for each element in array to generate the criterion by which uniqueness is computed. + +```go +uniqueValues := lo.FindUniquesBy([]int{3, 4, 5, 6, 7}, func(i int) int { + return i%3 +}) +// []int{5} +``` + +### FindDuplicates + +Returns a slice with the first occurrence of each duplicated elements of the collection. The order of result values is determined by the order they occur in the array. + +```go +duplicatedValues := lo.FindDuplicates([]int{1, 2, 2, 1, 2, 3}) +// []int{1, 2} +``` + +### FindDuplicatesBy + +Returns a slice with the first occurrence of each duplicated elements of the collection. The order of result values is determined by the order they occur in the array. It accepts `iteratee` which is invoked for each element in array to generate the criterion by which uniqueness is computed. + +```go +duplicatedValues := lo.FindDuplicatesBy([]int{3, 4, 5, 6, 7}, func(i int) int { + return i%3 +}) +// []int{3, 4} +``` + +### Min + +Search the minimum value of a collection. + +Returns zero value when collection is empty. + +```go +min := lo.Min([]int{1, 2, 3}) +// 1 + +min := lo.Min([]int{}) +// 0 +``` + +### MinBy + +Search the minimum value of a collection using the given comparison function. + +If several values of the collection are equal to the smallest value, returns the first such value. + +Returns zero value when collection is empty. + +```go +min := lo.MinBy([]string{"s1", "string2", "s3"}, func(item string, min string) bool { + return len(item) < len(min) +}) +// "s1" + +min := lo.MinBy([]string{}, func(item string, min string) bool { + return len(item) < len(min) +}) +// "" +``` + +### Max + +Search the maximum value of a collection. + +Returns zero value when collection is empty. + +```go +max := lo.Max([]int{1, 2, 3}) +// 3 + +max := lo.Max([]int{}) +// 0 +``` + +### MaxBy + +Search the maximum value of a collection using the given comparison function. + +If several values of the collection are equal to the greatest value, returns the first such value. + +Returns zero value when collection is empty. + +```go +max := lo.MaxBy([]string{"string1", "s2", "string3"}, func(item string, max string) bool { + return len(item) > len(max) +}) +// "string1" + +max := lo.MaxBy([]string{}, func(item string, max string) bool { + return len(item) > len(max) +}) +// "" +``` + +### Last + +Returns the last element of a collection or error if empty. + +```go +last, err := lo.Last([]int{1, 2, 3}) +// 3 +``` + +### Nth + +Returns the element at index `nth` of collection. If `nth` is negative, the nth element from the end is returned. An error is returned when nth is out of slice bounds. + +```go +nth, err := lo.Nth([]int{0, 1, 2, 3}, 2) +// 2 + +nth, err := lo.Nth([]int{0, 1, 2, 3}, -2) +// 2 +``` + +### Sample + +Returns a random item from collection. + +```go +lo.Sample([]string{"a", "b", "c"}) +// a random string from []string{"a", "b", "c"} + +lo.Sample([]string{}) +// "" +``` + +### Samples + +Returns N random unique items from collection. + +```go +lo.Samples([]string{"a", "b", "c"}, 3) +// []string{"a", "b", "c"} in random order +``` + +### Ternary + +A 1 line if/else statement. + +```go +result := lo.Ternary(true, "a", "b") +// "a" + +result := lo.Ternary(false, "a", "b") +// "b" +``` + +[[play](https://go.dev/play/p/t-D7WBL44h2)] + +### TernaryF + +A 1 line if/else statement whose options are functions. + +```go +result := lo.TernaryF(true, func() string { return "a" }, func() string { return "b" }) +// "a" + +result := lo.TernaryF(false, func() string { return "a" }, func() string { return "b" }) +// "b" +``` + +Useful to avoid nil-pointer dereferencing in intializations, or avoid running unnecessary code + +```go +var s *string + +someStr := TernaryF(s == nil, func() string { return uuid.New().String() }, func() string { return *s }) +// ef782193-c30c-4e2e-a7ae-f8ab5e125e02 +``` + +[[play](https://go.dev/play/p/AO4VW20JoqM)] + +### If / ElseIf / Else + +```go +result := lo.If(true, 1). + ElseIf(false, 2). + Else(3) +// 1 + +result := lo.If(false, 1). + ElseIf(true, 2). + Else(3) +// 2 + +result := lo.If(false, 1). + ElseIf(false, 2). + Else(3) +// 3 +``` + +Using callbacks: + +```go +result := lo.IfF(true, func () int { + return 1 + }). + ElseIfF(false, func () int { + return 2 + }). + ElseF(func () int { + return 3 + }) +// 1 +``` + +Mixed: + +```go +result := lo.IfF(true, func () int { + return 1 + }). + Else(42) +// 1 +``` + +[[play](https://go.dev/play/p/WSw3ApMxhyW)] + +### Switch / Case / Default + +```go +result := lo.Switch(1). + Case(1, "1"). + Case(2, "2"). + Default("3") +// "1" + +result := lo.Switch(2). + Case(1, "1"). + Case(2, "2"). + Default("3") +// "2" + +result := lo.Switch(42). + Case(1, "1"). + Case(2, "2"). + Default("3") +// "3" +``` + +Using callbacks: + +```go +result := lo.Switch(1). + CaseF(1, func() string { + return "1" + }). + CaseF(2, func() string { + return "2" + }). + DefaultF(func() string { + return "3" + }) +// "1" +``` + +Mixed: + +```go +result := lo.Switch(1). + CaseF(1, func() string { + return "1" + }). + Default("42") +// "1" +``` + +[[play](https://go.dev/play/p/TGbKUMAeRUd)] + +### ToPtr + +Returns a pointer copy of value. + +```go +ptr := lo.ToPtr("hello world") +// *string{"hello world"} +``` + +### EmptyableToPtr + +Returns a pointer copy of value if it's nonzero. +Otherwise, returns nil pointer. + +```go +ptr := lo.EmptyableToPtr[[]int](nil) +// nil + +ptr := lo.EmptyableToPtr[string]("") +// nil + +ptr := lo.EmptyableToPtr[[]int]([]int{}) +// *[]int{} + +ptr := lo.EmptyableToPtr[string]("hello world") +// *string{"hello world"} +``` + +### FromPtr + +Returns the pointer value or empty. + +```go +str := "hello world" +value := lo.FromPtr(&str) +// "hello world" + +value := lo.FromPtr[string](nil) +// "" +``` + +### FromPtrOr + +Returns the pointer value or the fallback value. + +```go +str := "hello world" +value := lo.FromPtrOr(&str, "empty") +// "hello world" + +value := lo.FromPtrOr[string](nil, "empty") +// "empty" +``` + +### ToSlicePtr + +Returns a slice of pointer copy of value. + +```go +ptr := lo.ToSlicePtr([]string{"hello", "world"}) +// []*string{"hello", "world"} +``` + +### ToAnySlice + +Returns a slice with all elements mapped to `any` type. + +```go +elements := lo.ToAnySlice([]int{1, 5, 1}) +// []any{1, 5, 1} +``` + +### FromAnySlice + +Returns an `any` slice with all elements mapped to a type. Returns false in case of type conversion failure. + +```go +elements, ok := lo.FromAnySlice([]any{"foobar", 42}) +// []string{}, false + +elements, ok := lo.FromAnySlice([]any{"foobar", "42"}) +// []string{"foobar", "42"}, true +``` + +### Empty + +Returns an empty value. + +```go +lo.Empty[int]() +// 0 +lo.Empty[string]() +// "" +lo.Empty[bool]() +// false +``` + +### IsEmpty + +Returns true if argument is a zero value. + +```go +lo.IsEmpty(0) +// true +lo.IsEmpty(42) +// false + +lo.IsEmpty("") +// true +lo.IsEmpty("foobar") +// false + +type test struct { + foobar string +} + +lo.IsEmpty(test{foobar: ""}) +// true +lo.IsEmpty(test{foobar: "foobar"}) +// false +``` + +### IsNotEmpty + +Returns true if argument is a zero value. + +```go +lo.IsNotEmpty(0) +// false +lo.IsNotEmpty(42) +// true + +lo.IsNotEmpty("") +// false +lo.IsNotEmpty("foobar") +// true + +type test struct { + foobar string +} + +lo.IsNotEmpty(test{foobar: ""}) +// false +lo.IsNotEmpty(test{foobar: "foobar"}) +// true +``` + +### Coalesce + +Returns the first non-empty arguments. Arguments must be comparable. + +```go +result, ok := lo.Coalesce(0, 1, 2, 3) +// 1 true + +result, ok := lo.Coalesce("") +// "" false + +var nilStr *string +str := "foobar" +result, ok := lo.Coalesce[*string](nil, nilStr, &str) +// &"foobar" true +``` + +### Partial + +Returns new function that, when called, has its first argument set to the provided value. + +```go +add := func(x, y int) int { return x + y } +f := lo.Partial(add, 5) + +f(10) +// 15 + +f(42) +// 47 +``` + +### Partial2 -> Partial5 + +Returns new function that, when called, has its first argument set to the provided value. + +```go +add := func(x, y, z int) int { return x + y + z } +f := lo.Partial2(add, 42) + +f(10, 5) +// 57 + +f(42, -4) +// 80 +``` + +### Attempt + +Invokes a function N times until it returns valid output. Returning either the caught error or nil. When first argument is less than `1`, the function runs until a successful response is returned. + +```go +iter, err := lo.Attempt(42, func(i int) error { + if i == 5 { + return nil + } + + return fmt.Errorf("failed") +}) +// 6 +// nil + +iter, err := lo.Attempt(2, func(i int) error { + if i == 5 { + return nil + } + + return fmt.Errorf("failed") +}) +// 2 +// error "failed" + +iter, err := lo.Attempt(0, func(i int) error { + if i < 42 { + return fmt.Errorf("failed") + } + + return nil +}) +// 43 +// nil +``` + +For more advanced retry strategies (delay, exponential backoff...), please take a look on [cenkalti/backoff](https://github.com/cenkalti/backoff). + +[[play](https://go.dev/play/p/3ggJZ2ZKcMj)] + +### AttemptWithDelay + +Invokes a function N times until it returns valid output, with a pause between each call. Returning either the caught error or nil. + +When first argument is less than `1`, the function runs until a successful response is returned. + +```go +iter, duration, err := lo.AttemptWithDelay(5, 2*time.Second, func(i int, duration time.Duration) error { + if i == 2 { + return nil + } + + return fmt.Errorf("failed") +}) +// 3 +// ~ 4 seconds +// nil +``` + +For more advanced retry strategies (delay, exponential backoff...), please take a look on [cenkalti/backoff](https://github.com/cenkalti/backoff). + +[[play](https://go.dev/play/p/tVs6CygC7m1)] + +### AttemptWhile + +Invokes a function N times until it returns valid output. Returning either the caught error or nil, and along with a bool value to identifying whether it needs invoke function continuously. It will terminate the invoke immediately if second bool value is returned with falsy value. + +When first argument is less than `1`, the function runs until a successful response is returned. + +```go +count1, err1 := lo.AttemptWhile(5, func(i int) (error, bool) { + err := doMockedHTTPRequest(i) + if err != nil { + if errors.Is(err, ErrBadRequest) { // lets assume ErrBadRequest is a critical error that needs to terminate the invoke + return err, false // flag the second return value as false to terminate the invoke + } + + return err, true + } + + return nil, false +}) +``` + +For more advanced retry strategies (delay, exponential backoff...), please take a look on [cenkalti/backoff](https://github.com/cenkalti/backoff). + +[[play](https://go.dev/play/p/M2wVq24PaZM)] + +### AttemptWhileWithDelay + +Invokes a function N times until it returns valid output, with a pause between each call. Returning either the caught error or nil, and along with a bool value to identifying whether it needs to invoke function continuously. It will terminate the invoke immediately if second bool value is returned with falsy value. + +When first argument is less than `1`, the function runs until a successful response is returned. + +```go +count1, time1, err1 := lo.AttemptWhileWithDelay(5, time.Millisecond, func(i int, d time.Duration) (error, bool) { + err := doMockedHTTPRequest(i) + if err != nil { + if errors.Is(err, ErrBadRequest) { // lets assume ErrBadRequest is a critical error that needs to terminate the invoke + return err, false // flag the second return value as false to terminate the invoke + } + + return err, true + } + + return nil, false +}) +``` + +For more advanced retry strategies (delay, exponential backoff...), please take a look on [cenkalti/backoff](https://github.com/cenkalti/backoff). + +[[play](https://go.dev/play/p/cfcmhvLO-nv)] + +### Debounce + +`NewDebounce` creates a debounced instance that delays invoking functions given until after wait milliseconds have elapsed, until `cancel` is called. + +```go +f := func() { + println("Called once after 100ms when debounce stopped invoking!") +} + +debounce, cancel := lo.NewDebounce(100 * time.Millisecond, f) +for j := 0; j < 10; j++ { + debounce() +} + +time.Sleep(1 * time.Second) +cancel() +``` + +[[play](https://go.dev/play/p/mz32VMK2nqe)] + +### DebounceBy + +`NewDebounceBy` creates a debounced instance for each distinct key, that delays invoking functions given until after wait milliseconds have elapsed, until `cancel` is called. + +```go +f := func(key string, count int) { + println(key + ": Called once after 100ms when debounce stopped invoking!") +} + +debounce, cancel := lo.NewDebounceBy(100 * time.Millisecond, f) +for j := 0; j < 10; j++ { + debounce("first key") + debounce("second key") +} + +time.Sleep(1 * time.Second) +cancel("first key") +cancel("second key") +``` + +[[play](https://go.dev/play/p/d3Vpt6pxhY8)] + +### Synchronize + +Wraps the underlying callback in a mutex. It receives an optional mutex. + +```go +s := lo.Synchronize() + +for i := 0; i < 10; i++ { + go s.Do(func () { + println("will be called sequentially") + }) +} +``` + +It is equivalent to: + +```go +mu := sync.Mutex{} + +func foobar() { + mu.Lock() + defer mu.Unlock() + + // ... +} +``` + +### Async + +Executes a function in a goroutine and returns the result in a channel. + +```go +ch := lo.Async(func() error { time.Sleep(10 * time.Second); return nil }) +// chan error (nil) +``` + +### Async{0->6} + +Executes a function in a goroutine and returns the result in a channel. +For function with multiple return values, the results will be returned as a tuple inside the channel. +For function without return, struct{} will be returned in the channel. + +```go +ch := lo.Async0(func() { time.Sleep(10 * time.Second) }) +// chan struct{} + +ch := lo.Async1(func() int { + time.Sleep(10 * time.Second); + return 42 +}) +// chan int (42) + +ch := lo.Async2(func() (int, string) { + time.Sleep(10 * time.Second); + return 42, "Hello" +}) +// chan lo.Tuple2[int, string] ({42, "Hello"}) +``` + +### Transaction + +Implements a Saga pattern. + +```go +transaction := NewTransaction[int](). + Then( + func(state int) (int, error) { + fmt.Println("step 1") + return state + 10, nil + }, + func(state int) int { + fmt.Println("rollback 1") + return state - 10 + }, + ). + Then( + func(state int) (int, error) { + fmt.Println("step 2") + return state + 15, nil + }, + func(state int) int { + fmt.Println("rollback 2") + return state - 15 + }, + ). + Then( + func(state int) (int, error) { + fmt.Println("step 3") + + if true { + return state, fmt.Errorf("error") + } + + return state + 42, nil + }, + func(state int) int { + fmt.Println("rollback 3") + return state - 42 + }, + ) + +_, _ = transaction.Process(-5) + +// Output: +// step 1 +// step 2 +// step 3 +// rollback 2 +// rollback 1 +``` + +### Validate + +Helper function that creates an error when a condition is not met. + +```go +slice := []string{"a"} +val := lo.Validate(len(slice) == 0, "Slice should be empty but contains %v", slice) +// error("Slice should be empty but contains [a]") + +slice := []string{} +val := lo.Validate(len(slice) == 0, "Slice should be empty but contains %v", slice) +// nil +``` + +[[play](https://go.dev/play/p/vPyh51XpCBt)] + +### Must + +Wraps a function call to panics if second argument is `error` or `false`, returns the value otherwise. + +```go +val := lo.Must(time.Parse("2006-01-02", "2022-01-15")) +// 2022-01-15 + +val := lo.Must(time.Parse("2006-01-02", "bad-value")) +// panics +``` + +[[play](https://go.dev/play/p/TMoWrRp3DyC)] + +### Must{0->6} + +Must\* has the same behavior as Must, but returns multiple values. + +```go +func example0() (error) +func example1() (int, error) +func example2() (int, string, error) +func example3() (int, string, time.Date, error) +func example4() (int, string, time.Date, bool, error) +func example5() (int, string, time.Date, bool, float64, error) +func example6() (int, string, time.Date, bool, float64, byte, error) + +lo.Must0(example0()) +val1 := lo.Must1(example1()) // alias to Must +val1, val2 := lo.Must2(example2()) +val1, val2, val3 := lo.Must3(example3()) +val1, val2, val3, val4 := lo.Must4(example4()) +val1, val2, val3, val4, val5 := lo.Must5(example5()) +val1, val2, val3, val4, val5, val6 := lo.Must6(example6()) +``` + +You can wrap functions like `func (...) (..., ok bool)`. + +```go +// math.Signbit(float64) bool +lo.Must0(math.Signbit(v)) + +// bytes.Cut([]byte,[]byte) ([]byte, []byte, bool) +before, after := lo.Must2(bytes.Cut(s, sep)) +``` + +You can give context to the panic message by adding some printf-like arguments. + +```go +val, ok := lo.Find(myString, func(i string) bool { + return i == requiredChar +}) +lo.Must0(ok, "'%s' must always contain '%s'", myString, requiredChar) + +list := []int{0, 1, 2} +item := 5 +lo.Must0(lo.Contains[int](list, item), "'%s' must always contain '%s'", list, item) +... +``` + +[[play](https://go.dev/play/p/TMoWrRp3DyC)] + +### Try + +Calls the function and return false in case of error and on panic. + +```go +ok := lo.Try(func() error { + panic("error") + return nil +}) +// false + +ok := lo.Try(func() error { + return nil +}) +// true + +ok := lo.Try(func() error { + return fmt.Errorf("error") +}) +// false +``` + +[[play](https://go.dev/play/p/mTyyWUvn9u4)] + +### Try{0->6} + +The same behavior than `Try`, but callback returns 2 variables. + +```go +ok := lo.Try2(func() (string, error) { + panic("error") + return "", nil +}) +// false +``` + +[[play](https://go.dev/play/p/mTyyWUvn9u4)] + +### TryOr + +Calls the function and return a default value in case of error and on panic. + +```go +str, ok := lo.TryOr(func() (string, error) { + panic("error") + return "hello", nil +}, "world") +// world +// false + +str, ok := lo.TryOr(func() error { + return "hello", nil +}, "world") +// hello +// true + +str, ok := lo.TryOr(func() error { + return "hello", fmt.Errorf("error") +}, "world") +// world +// false +``` + +[[play](https://go.dev/play/p/B4F7Wg2Zh9X)] + +### TryOr{0->6} + +The same behavior than `TryOr`, but callback returns `X` variables. + +```go +str, nbr, ok := lo.TryOr2(func() (string, int, error) { + panic("error") + return "hello", 42, nil +}, "world", 21) +// world +// 21 +// false +``` + +[[play](https://go.dev/play/p/B4F7Wg2Zh9X)] + +### TryWithErrorValue + +The same behavior than `Try`, but also returns value passed to panic. + +```go +err, ok := lo.TryWithErrorValue(func() error { + panic("error") + return nil +}) +// "error", false +``` + +[[play](https://go.dev/play/p/Kc7afQIT2Fs)] + +### TryCatch + +The same behavior than `Try`, but calls the catch function in case of error. + +```go +caught := false + +ok := lo.TryCatch(func() error { + panic("error") + return nil +}, func() { + caught = true +}) +// false +// caught == true +``` + +[[play](https://go.dev/play/p/PnOON-EqBiU)] + +### TryCatchWithErrorValue + +The same behavior than `TryWithErrorValue`, but calls the catch function in case of error. + +```go +caught := false + +ok := lo.TryCatchWithErrorValue(func() error { + panic("error") + return nil +}, func(val any) { + caught = val == "error" +}) +// false +// caught == true +``` + +[[play](https://go.dev/play/p/8Pc9gwX_GZO)] + +### ErrorsAs + +A shortcut for: + +```go +err := doSomething() + +var rateLimitErr *RateLimitError +if ok := errors.As(err, &rateLimitErr); ok { + // retry later +} +``` + +1 line `lo` helper: + +```go +err := doSomething() + +if rateLimitErr, ok := lo.ErrorsAs[*RateLimitError](err); ok { + // retry later +} +``` + +[[play](https://go.dev/play/p/8wk5rH8UfrE)] + +## 🛩 Benchmark + +We executed a simple benchmark with the a dead-simple `lo.Map` loop: + +See the full implementation [here](./benchmark_test.go). + +```go +_ = lo.Map[int64](arr, func(x int64, i int) string { + return strconv.FormatInt(x, 10) +}) +``` + +**Result:** + +Here is a comparison between `lo.Map`, `lop.Map`, `go-funk` library and a simple Go `for` loop. + +```shell +$ go test -benchmem -bench ./... +goos: linux +goarch: amd64 +pkg: github.com/samber/lo +cpu: Intel(R) Core(TM) i5-7267U CPU @ 3.10GHz +cpu: Intel(R) Core(TM) i7 CPU 920 @ 2.67GHz +BenchmarkMap/lo.Map-8 8 132728237 ns/op 39998945 B/op 1000002 allocs/op +BenchmarkMap/lop.Map-8 2 503947830 ns/op 119999956 B/op 3000007 allocs/op +BenchmarkMap/reflect-8 2 826400560 ns/op 170326512 B/op 4000042 allocs/op +BenchmarkMap/for-8 9 126252954 ns/op 39998674 B/op 1000001 allocs/op +PASS +ok github.com/samber/lo 6.657s +``` + +- `lo.Map` is way faster (x7) than `go-funk`, a reflection-based Map implementation. +- `lo.Map` have the same allocation profile than `for`. +- `lo.Map` is 4% slower than `for`. +- `lop.Map` is slower than `lo.Map` because it implies more memory allocation and locks. `lop.Map` will be useful for long-running callbacks, such as i/o bound processing. +- `for` beats other implementations for memory and CPU. + +## 🤝 Contributing + +- Ping me on twitter [@samuelberthe](https://twitter.com/samuelberthe) (DMs, mentions, whatever :)) +- Fork the [project](https://github.com/samber/lo) +- Fix [open issues](https://github.com/samber/lo/issues) or request new features + +Don't hesitate ;) + +Helper naming: helpers must be self explanatory and respect standards (other languages, libraries...). Feel free to suggest many names in your contributions. + +### With Docker + +```bash +docker-compose run --rm dev +``` + +### Without Docker + +```bash +# Install some dev dependencies +make tools + +# Run tests +make test +# or +make watch-test +``` + +## 👤 Contributors + +![Contributors](https://contrib.rocks/image?repo=samber/lo) + +## 💫 Show your support + +Give a ⭐️ if this project helped you! + +[![support us](https://c5.patreon.com/external/logo/become_a_patron_button.png)](https://www.patreon.com/samber) + +## 📝 License + +Copyright © 2022 [Samuel Berthe](https://github.com/samber). + +This project is [MIT](./LICENSE) licensed. diff --git a/vendor/github.com/samber/lo/channel.go b/vendor/github.com/samber/lo/channel.go new file mode 100644 index 0000000000..5dcac328a8 --- /dev/null +++ b/vendor/github.com/samber/lo/channel.go @@ -0,0 +1,309 @@ +package lo + +import ( + "math/rand" + "sync" + "time" +) + +type DispatchingStrategy[T any] func(msg T, index uint64, channels []<-chan T) int + +// ChannelDispatcher distributes messages from input channels into N child channels. +// Close events are propagated to children. +// Underlying channels can have a fixed buffer capacity or be unbuffered when cap is 0. +func ChannelDispatcher[T any](stream <-chan T, count int, channelBufferCap int, strategy DispatchingStrategy[T]) []<-chan T { + children := createChannels[T](count, channelBufferCap) + + roChildren := channelsToReadOnly(children) + + go func() { + // propagate channel closing to children + defer closeChannels(children) + + var i uint64 = 0 + + for { + msg, ok := <-stream + if !ok { + return + } + + destination := strategy(msg, i, roChildren) % count + children[destination] <- msg + + i++ + } + }() + + return roChildren +} + +func createChannels[T any](count int, channelBufferCap int) []chan T { + children := make([]chan T, 0, count) + + for i := 0; i < count; i++ { + children = append(children, make(chan T, channelBufferCap)) + } + + return children +} + +func channelsToReadOnly[T any](children []chan T) []<-chan T { + roChildren := make([]<-chan T, 0, len(children)) + + for i := range children { + roChildren = append(roChildren, children[i]) + } + + return roChildren +} + +func closeChannels[T any](children []chan T) { + for i := 0; i < len(children); i++ { + close(children[i]) + } +} + +func channelIsNotFull[T any](ch <-chan T) bool { + return cap(ch) == 0 || len(ch) < cap(ch) +} + +// DispatchingStrategyRoundRobin distributes messages in a rotating sequential manner. +// If the channel capacity is exceeded, the next channel will be selected and so on. +func DispatchingStrategyRoundRobin[T any](msg T, index uint64, channels []<-chan T) int { + for { + i := int(index % uint64(len(channels))) + if channelIsNotFull(channels[i]) { + return i + } + + index++ + time.Sleep(10 * time.Microsecond) // prevent CPU from burning 🔥 + } +} + +// DispatchingStrategyRandom distributes messages in a random manner. +// If the channel capacity is exceeded, another random channel will be selected and so on. +func DispatchingStrategyRandom[T any](msg T, index uint64, channels []<-chan T) int { + for { + i := rand.Intn(len(channels)) + if channelIsNotFull(channels[i]) { + return i + } + + time.Sleep(10 * time.Microsecond) // prevent CPU from burning 🔥 + } +} + +// DispatchingStrategyWeightedRandom distributes messages in a weighted manner. +// If the channel capacity is exceeded, another random channel will be selected and so on. +func DispatchingStrategyWeightedRandom[T any](weights []int) DispatchingStrategy[T] { + seq := []int{} + + for i := 0; i < len(weights); i++ { + for j := 0; j < weights[i]; j++ { + seq = append(seq, i) + } + } + + return func(msg T, index uint64, channels []<-chan T) int { + for { + i := seq[rand.Intn(len(seq))] + if channelIsNotFull(channels[i]) { + return i + } + + time.Sleep(10 * time.Microsecond) // prevent CPU from burning 🔥 + } + } +} + +// DispatchingStrategyFirst distributes messages in the first non-full channel. +// If the capacity of the first channel is exceeded, the second channel will be selected and so on. +func DispatchingStrategyFirst[T any](msg T, index uint64, channels []<-chan T) int { + for { + for i := range channels { + if channelIsNotFull(channels[i]) { + return i + } + } + + time.Sleep(10 * time.Microsecond) // prevent CPU from burning 🔥 + } +} + +// DispatchingStrategyLeast distributes messages in the emptiest channel. +func DispatchingStrategyLeast[T any](msg T, index uint64, channels []<-chan T) int { + seq := Range(len(channels)) + + return MinBy(seq, func(item int, min int) bool { + return len(channels[item]) < len(channels[min]) + }) +} + +// DispatchingStrategyMost distributes messages in the fullest channel. +// If the channel capacity is exceeded, the next channel will be selected and so on. +func DispatchingStrategyMost[T any](msg T, index uint64, channels []<-chan T) int { + seq := Range(len(channels)) + + return MaxBy(seq, func(item int, max int) bool { + return len(channels[item]) > len(channels[max]) && channelIsNotFull(channels[item]) + }) +} + +// SliceToChannel returns a read-only channels of collection elements. +func SliceToChannel[T any](bufferSize int, collection []T) <-chan T { + ch := make(chan T, bufferSize) + + go func() { + for _, item := range collection { + ch <- item + } + + close(ch) + }() + + return ch +} + +// ChannelToSlice returns a slice built from channels items. Blocks until channel closes. +func ChannelToSlice[T any](ch <-chan T) []T { + collection := []T{} + + for item := range ch { + collection = append(collection, item) + } + + return collection +} + +// Generator implements the generator design pattern. +func Generator[T any](bufferSize int, generator func(yield func(T))) <-chan T { + ch := make(chan T, bufferSize) + + go func() { + // WARNING: infinite loop + generator(func(t T) { + ch <- t + }) + + close(ch) + }() + + return ch +} + +// Buffer creates a slice of n elements from a channel. Returns the slice and the slice length. +// @TODO: we should probably provide an helper that reuse the same buffer. +func Buffer[T any](ch <-chan T, size int) (collection []T, length int, readTime time.Duration, ok bool) { + buffer := make([]T, 0, size) + index := 0 + now := time.Now() + + for ; index < size; index++ { + item, ok := <-ch + if !ok { + return buffer, index, time.Since(now), false + } + + buffer = append(buffer, item) + } + + return buffer, index, time.Since(now), true +} + +// Batch creates a slice of n elements from a channel. Returns the slice and the slice length. +// +// Deprecated: Use [Buffer] instead. +func Batch[T any](ch <-chan T, size int) (collection []T, length int, readTime time.Duration, ok bool) { + return Buffer(ch, size) +} + +// BufferWithTimeout creates a slice of n elements from a channel, with timeout. Returns the slice and the slice length. +// @TODO: we should probably provide an helper that reuse the same buffer. +func BufferWithTimeout[T any](ch <-chan T, size int, timeout time.Duration) (collection []T, length int, readTime time.Duration, ok bool) { + expire := time.NewTimer(timeout) + defer expire.Stop() + + buffer := make([]T, 0, size) + index := 0 + now := time.Now() + + for ; index < size; index++ { + select { + case item, ok := <-ch: + if !ok { + return buffer, index, time.Since(now), false + } + + buffer = append(buffer, item) + + case <-expire.C: + return buffer, index, time.Since(now), true + } + } + + return buffer, index, time.Since(now), true +} + +// BatchWithTimeout creates a slice of n elements from a channel, with timeout. Returns the slice and the slice length. +// +// Deprecated: Use [BufferWithTimeout] instead. +func BatchWithTimeout[T any](ch <-chan T, size int, timeout time.Duration) (collection []T, length int, readTime time.Duration, ok bool) { + return BufferWithTimeout(ch, size, timeout) +} + +// FanIn collects messages from multiple input channels into a single buffered channel. +// Output messages has no priority. When all upstream channels reach EOF, downstream channel closes. +func FanIn[T any](channelBufferCap int, upstreams ...<-chan T) <-chan T { + out := make(chan T, channelBufferCap) + var wg sync.WaitGroup + + // Start an output goroutine for each input channel in upstreams. + wg.Add(len(upstreams)) + for _, c := range upstreams { + go func(c <-chan T) { + for n := range c { + out <- n + } + wg.Done() + }(c) + } + + // Start a goroutine to close out once all the output goroutines are done. + go func() { + wg.Wait() + close(out) + }() + return out +} + +// ChannelMerge collects messages from multiple input channels into a single buffered channel. +// Output messages has no priority. When all upstream channels reach EOF, downstream channel closes. +// +// Deprecated: Use [FanIn] instead. +func ChannelMerge[T any](channelBufferCap int, upstreams ...<-chan T) <-chan T { + return FanIn(channelBufferCap, upstreams...) +} + +// FanOut broadcasts all the upstream messages to multiple downstream channels. +// When upstream channel reach EOF, downstream channels close. If any downstream +// channels is full, broadcasting is paused. +func FanOut[T any](count int, channelsBufferCap int, upstream <-chan T) []<-chan T { + downstreams := createChannels[T](count, channelsBufferCap) + + go func() { + for msg := range upstream { + for i := range downstreams { + downstreams[i] <- msg + } + } + + // Close out once all the output goroutines are done. + for i := range downstreams { + close(downstreams[i]) + } + }() + + return channelsToReadOnly(downstreams) +} diff --git a/vendor/github.com/samber/lo/concurrency.go b/vendor/github.com/samber/lo/concurrency.go new file mode 100644 index 0000000000..d0aca2aa28 --- /dev/null +++ b/vendor/github.com/samber/lo/concurrency.go @@ -0,0 +1,95 @@ +package lo + +import "sync" + +type synchronize struct { + locker sync.Locker +} + +func (s *synchronize) Do(cb func()) { + s.locker.Lock() + Try0(cb) + s.locker.Unlock() +} + +// Synchronize wraps the underlying callback in a mutex. It receives an optional mutex. +func Synchronize(opt ...sync.Locker) *synchronize { + if len(opt) > 1 { + panic("unexpected arguments") + } else if len(opt) == 0 { + opt = append(opt, &sync.Mutex{}) + } + + return &synchronize{ + locker: opt[0], + } +} + +// Async executes a function in a goroutine and returns the result in a channel. +func Async[A any](f func() A) <-chan A { + ch := make(chan A, 1) + go func() { + ch <- f() + }() + return ch +} + +// Async0 executes a function in a goroutine and returns a channel set once the function finishes. +func Async0(f func()) <-chan struct{} { + ch := make(chan struct{}, 1) + go func() { + f() + ch <- struct{}{} + }() + return ch +} + +// Async1 is an alias to Async. +func Async1[A any](f func() A) <-chan A { + return Async(f) +} + +// Async2 has the same behavior as Async, but returns the 2 results as a tuple inside the channel. +func Async2[A any, B any](f func() (A, B)) <-chan Tuple2[A, B] { + ch := make(chan Tuple2[A, B], 1) + go func() { + ch <- T2(f()) + }() + return ch +} + +// Async3 has the same behavior as Async, but returns the 3 results as a tuple inside the channel. +func Async3[A any, B any, C any](f func() (A, B, C)) <-chan Tuple3[A, B, C] { + ch := make(chan Tuple3[A, B, C], 1) + go func() { + ch <- T3(f()) + }() + return ch +} + +// Async4 has the same behavior as Async, but returns the 4 results as a tuple inside the channel. +func Async4[A any, B any, C any, D any](f func() (A, B, C, D)) <-chan Tuple4[A, B, C, D] { + ch := make(chan Tuple4[A, B, C, D], 1) + go func() { + ch <- T4(f()) + }() + return ch +} + +// Async5 has the same behavior as Async, but returns the 5 results as a tuple inside the channel. +func Async5[A any, B any, C any, D any, E any](f func() (A, B, C, D, E)) <-chan Tuple5[A, B, C, D, E] { + ch := make(chan Tuple5[A, B, C, D, E], 1) + go func() { + ch <- T5(f()) + }() + return ch +} + +// Async6 has the same behavior as Async, but returns the 6 results as a tuple inside the channel. +func Async6[A any, B any, C any, D any, E any, F any](f func() (A, B, C, D, E, F)) <-chan Tuple6[A, B, C, D, E, F] { + ch := make(chan Tuple6[A, B, C, D, E, F], 1) + go func() { + ch <- T6(f()) + }() + return ch +} diff --git a/vendor/github.com/samber/lo/condition.go b/vendor/github.com/samber/lo/condition.go new file mode 100644 index 0000000000..1d4e75d25e --- /dev/null +++ b/vendor/github.com/samber/lo/condition.go @@ -0,0 +1,150 @@ +package lo + +// Ternary is a 1 line if/else statement. +// Play: https://go.dev/play/p/t-D7WBL44h2 +func Ternary[T any](condition bool, ifOutput T, elseOutput T) T { + if condition { + return ifOutput + } + + return elseOutput +} + +// TernaryF is a 1 line if/else statement whose options are functions +// Play: https://go.dev/play/p/AO4VW20JoqM +func TernaryF[T any](condition bool, ifFunc func() T, elseFunc func() T) T { + if condition { + return ifFunc() + } + + return elseFunc() +} + +type ifElse[T any] struct { + result T + done bool +} + +// If. +// Play: https://go.dev/play/p/WSw3ApMxhyW +func If[T any](condition bool, result T) *ifElse[T] { + if condition { + return &ifElse[T]{result, true} + } + + var t T + return &ifElse[T]{t, false} +} + +// IfF. +// Play: https://go.dev/play/p/WSw3ApMxhyW +func IfF[T any](condition bool, resultF func() T) *ifElse[T] { + if condition { + return &ifElse[T]{resultF(), true} + } + + var t T + return &ifElse[T]{t, false} +} + +// ElseIf. +// Play: https://go.dev/play/p/WSw3ApMxhyW +func (i *ifElse[T]) ElseIf(condition bool, result T) *ifElse[T] { + if !i.done && condition { + i.result = result + i.done = true + } + + return i +} + +// ElseIfF. +// Play: https://go.dev/play/p/WSw3ApMxhyW +func (i *ifElse[T]) ElseIfF(condition bool, resultF func() T) *ifElse[T] { + if !i.done && condition { + i.result = resultF() + i.done = true + } + + return i +} + +// Else. +// Play: https://go.dev/play/p/WSw3ApMxhyW +func (i *ifElse[T]) Else(result T) T { + if i.done { + return i.result + } + + return result +} + +// ElseF. +// Play: https://go.dev/play/p/WSw3ApMxhyW +func (i *ifElse[T]) ElseF(resultF func() T) T { + if i.done { + return i.result + } + + return resultF() +} + +type switchCase[T comparable, R any] struct { + predicate T + result R + done bool +} + +// Switch is a pure functional switch/case/default statement. +// Play: https://go.dev/play/p/TGbKUMAeRUd +func Switch[T comparable, R any](predicate T) *switchCase[T, R] { + var result R + + return &switchCase[T, R]{ + predicate, + result, + false, + } +} + +// Case. +// Play: https://go.dev/play/p/TGbKUMAeRUd +func (s *switchCase[T, R]) Case(val T, result R) *switchCase[T, R] { + if !s.done && s.predicate == val { + s.result = result + s.done = true + } + + return s +} + +// CaseF. +// Play: https://go.dev/play/p/TGbKUMAeRUd +func (s *switchCase[T, R]) CaseF(val T, cb func() R) *switchCase[T, R] { + if !s.done && s.predicate == val { + s.result = cb() + s.done = true + } + + return s +} + +// Default. +// Play: https://go.dev/play/p/TGbKUMAeRUd +func (s *switchCase[T, R]) Default(result R) R { + if !s.done { + s.result = result + } + + return s.result +} + +// DefaultF. +// Play: https://go.dev/play/p/TGbKUMAeRUd +func (s *switchCase[T, R]) DefaultF(cb func() R) R { + if !s.done { + s.result = cb() + } + + return s.result +} diff --git a/vendor/github.com/samber/lo/constraints.go b/vendor/github.com/samber/lo/constraints.go new file mode 100644 index 0000000000..c1f3529685 --- /dev/null +++ b/vendor/github.com/samber/lo/constraints.go @@ -0,0 +1,6 @@ +package lo + +// Clonable defines a constraint of types having Clone() T method. +type Clonable[T any] interface { + Clone() T +} diff --git a/vendor/github.com/samber/lo/errors.go b/vendor/github.com/samber/lo/errors.go new file mode 100644 index 0000000000..a99013d950 --- /dev/null +++ b/vendor/github.com/samber/lo/errors.go @@ -0,0 +1,354 @@ +package lo + +import ( + "errors" + "fmt" + "reflect" +) + +// Validate is a helper that creates an error when a condition is not met. +// Play: https://go.dev/play/p/vPyh51XpCBt +func Validate(ok bool, format string, args ...any) error { + if !ok { + return fmt.Errorf(fmt.Sprintf(format, args...)) + } + return nil +} + +func messageFromMsgAndArgs(msgAndArgs ...interface{}) string { + if len(msgAndArgs) == 1 { + if msgAsStr, ok := msgAndArgs[0].(string); ok { + return msgAsStr + } + return fmt.Sprintf("%+v", msgAndArgs[0]) + } + if len(msgAndArgs) > 1 { + return fmt.Sprintf(msgAndArgs[0].(string), msgAndArgs[1:]...) + } + return "" +} + +// must panics if err is error or false. +func must(err any, messageArgs ...interface{}) { + if err == nil { + return + } + + switch e := err.(type) { + case bool: + if !e { + message := messageFromMsgAndArgs(messageArgs...) + if message == "" { + message = "not ok" + } + + panic(message) + } + + case error: + message := messageFromMsgAndArgs(messageArgs...) + if message != "" { + panic(message + ": " + e.Error()) + } else { + panic(e.Error()) + } + + default: + panic("must: invalid err type '" + reflect.TypeOf(err).Name() + "', should either be a bool or an error") + } +} + +// Must is a helper that wraps a call to a function returning a value and an error +// and panics if err is error or false. +// Play: https://go.dev/play/p/TMoWrRp3DyC +func Must[T any](val T, err any, messageArgs ...interface{}) T { + must(err, messageArgs...) + return val +} + +// Must0 has the same behavior as Must, but callback returns no variable. +// Play: https://go.dev/play/p/TMoWrRp3DyC +func Must0(err any, messageArgs ...interface{}) { + must(err, messageArgs...) +} + +// Must1 is an alias to Must +// Play: https://go.dev/play/p/TMoWrRp3DyC +func Must1[T any](val T, err any, messageArgs ...interface{}) T { + return Must(val, err, messageArgs...) +} + +// Must2 has the same behavior as Must, but callback returns 2 variables. +// Play: https://go.dev/play/p/TMoWrRp3DyC +func Must2[T1 any, T2 any](val1 T1, val2 T2, err any, messageArgs ...interface{}) (T1, T2) { + must(err, messageArgs...) + return val1, val2 +} + +// Must3 has the same behavior as Must, but callback returns 3 variables. +// Play: https://go.dev/play/p/TMoWrRp3DyC +func Must3[T1 any, T2 any, T3 any](val1 T1, val2 T2, val3 T3, err any, messageArgs ...interface{}) (T1, T2, T3) { + must(err, messageArgs...) + return val1, val2, val3 +} + +// Must4 has the same behavior as Must, but callback returns 4 variables. +// Play: https://go.dev/play/p/TMoWrRp3DyC +func Must4[T1 any, T2 any, T3 any, T4 any](val1 T1, val2 T2, val3 T3, val4 T4, err any, messageArgs ...interface{}) (T1, T2, T3, T4) { + must(err, messageArgs...) + return val1, val2, val3, val4 +} + +// Must5 has the same behavior as Must, but callback returns 5 variables. +// Play: https://go.dev/play/p/TMoWrRp3DyC +func Must5[T1 any, T2 any, T3 any, T4 any, T5 any](val1 T1, val2 T2, val3 T3, val4 T4, val5 T5, err any, messageArgs ...interface{}) (T1, T2, T3, T4, T5) { + must(err, messageArgs...) + return val1, val2, val3, val4, val5 +} + +// Must6 has the same behavior as Must, but callback returns 6 variables. +// Play: https://go.dev/play/p/TMoWrRp3DyC +func Must6[T1 any, T2 any, T3 any, T4 any, T5 any, T6 any](val1 T1, val2 T2, val3 T3, val4 T4, val5 T5, val6 T6, err any, messageArgs ...interface{}) (T1, T2, T3, T4, T5, T6) { + must(err, messageArgs...) + return val1, val2, val3, val4, val5, val6 +} + +// Try calls the function and return false in case of error. +func Try(callback func() error) (ok bool) { + ok = true + + defer func() { + if r := recover(); r != nil { + ok = false + } + }() + + err := callback() + if err != nil { + ok = false + } + + return +} + +// Try0 has the same behavior as Try, but callback returns no variable. +// Play: https://go.dev/play/p/mTyyWUvn9u4 +func Try0(callback func()) bool { + return Try(func() error { + callback() + return nil + }) +} + +// Try1 is an alias to Try. +// Play: https://go.dev/play/p/mTyyWUvn9u4 +func Try1(callback func() error) bool { + return Try(callback) +} + +// Try2 has the same behavior as Try, but callback returns 2 variables. +// Play: https://go.dev/play/p/mTyyWUvn9u4 +func Try2[T any](callback func() (T, error)) bool { + return Try(func() error { + _, err := callback() + return err + }) +} + +// Try3 has the same behavior as Try, but callback returns 3 variables. +// Play: https://go.dev/play/p/mTyyWUvn9u4 +func Try3[T, R any](callback func() (T, R, error)) bool { + return Try(func() error { + _, _, err := callback() + return err + }) +} + +// Try4 has the same behavior as Try, but callback returns 4 variables. +// Play: https://go.dev/play/p/mTyyWUvn9u4 +func Try4[T, R, S any](callback func() (T, R, S, error)) bool { + return Try(func() error { + _, _, _, err := callback() + return err + }) +} + +// Try5 has the same behavior as Try, but callback returns 5 variables. +// Play: https://go.dev/play/p/mTyyWUvn9u4 +func Try5[T, R, S, Q any](callback func() (T, R, S, Q, error)) bool { + return Try(func() error { + _, _, _, _, err := callback() + return err + }) +} + +// Try6 has the same behavior as Try, but callback returns 6 variables. +// Play: https://go.dev/play/p/mTyyWUvn9u4 +func Try6[T, R, S, Q, U any](callback func() (T, R, S, Q, U, error)) bool { + return Try(func() error { + _, _, _, _, _, err := callback() + return err + }) +} + +// TryOr has the same behavior as Must, but returns a default value in case of error. +// Play: https://go.dev/play/p/B4F7Wg2Zh9X +func TryOr[A any](callback func() (A, error), fallbackA A) (A, bool) { + return TryOr1(callback, fallbackA) +} + +// TryOr1 has the same behavior as Must, but returns a default value in case of error. +// Play: https://go.dev/play/p/B4F7Wg2Zh9X +func TryOr1[A any](callback func() (A, error), fallbackA A) (A, bool) { + ok := false + + Try0(func() { + a, err := callback() + if err == nil { + fallbackA = a + ok = true + } + }) + + return fallbackA, ok +} + +// TryOr2 has the same behavior as Must, but returns a default value in case of error. +// Play: https://go.dev/play/p/B4F7Wg2Zh9X +func TryOr2[A any, B any](callback func() (A, B, error), fallbackA A, fallbackB B) (A, B, bool) { + ok := false + + Try0(func() { + a, b, err := callback() + if err == nil { + fallbackA = a + fallbackB = b + ok = true + } + }) + + return fallbackA, fallbackB, ok +} + +// TryOr3 has the same behavior as Must, but returns a default value in case of error. +// Play: https://go.dev/play/p/B4F7Wg2Zh9X +func TryOr3[A any, B any, C any](callback func() (A, B, C, error), fallbackA A, fallbackB B, fallbackC C) (A, B, C, bool) { + ok := false + + Try0(func() { + a, b, c, err := callback() + if err == nil { + fallbackA = a + fallbackB = b + fallbackC = c + ok = true + } + }) + + return fallbackA, fallbackB, fallbackC, ok +} + +// TryOr4 has the same behavior as Must, but returns a default value in case of error. +// Play: https://go.dev/play/p/B4F7Wg2Zh9X +func TryOr4[A any, B any, C any, D any](callback func() (A, B, C, D, error), fallbackA A, fallbackB B, fallbackC C, fallbackD D) (A, B, C, D, bool) { + ok := false + + Try0(func() { + a, b, c, d, err := callback() + if err == nil { + fallbackA = a + fallbackB = b + fallbackC = c + fallbackD = d + ok = true + } + }) + + return fallbackA, fallbackB, fallbackC, fallbackD, ok +} + +// TryOr5 has the same behavior as Must, but returns a default value in case of error. +// Play: https://go.dev/play/p/B4F7Wg2Zh9X +func TryOr5[A any, B any, C any, D any, E any](callback func() (A, B, C, D, E, error), fallbackA A, fallbackB B, fallbackC C, fallbackD D, fallbackE E) (A, B, C, D, E, bool) { + ok := false + + Try0(func() { + a, b, c, d, e, err := callback() + if err == nil { + fallbackA = a + fallbackB = b + fallbackC = c + fallbackD = d + fallbackE = e + ok = true + } + }) + + return fallbackA, fallbackB, fallbackC, fallbackD, fallbackE, ok +} + +// TryOr6 has the same behavior as Must, but returns a default value in case of error. +// Play: https://go.dev/play/p/B4F7Wg2Zh9X +func TryOr6[A any, B any, C any, D any, E any, F any](callback func() (A, B, C, D, E, F, error), fallbackA A, fallbackB B, fallbackC C, fallbackD D, fallbackE E, fallbackF F) (A, B, C, D, E, F, bool) { + ok := false + + Try0(func() { + a, b, c, d, e, f, err := callback() + if err == nil { + fallbackA = a + fallbackB = b + fallbackC = c + fallbackD = d + fallbackE = e + fallbackF = f + ok = true + } + }) + + return fallbackA, fallbackB, fallbackC, fallbackD, fallbackE, fallbackF, ok +} + +// TryWithErrorValue has the same behavior as Try, but also returns value passed to panic. +// Play: https://go.dev/play/p/Kc7afQIT2Fs +func TryWithErrorValue(callback func() error) (errorValue any, ok bool) { + ok = true + + defer func() { + if r := recover(); r != nil { + ok = false + errorValue = r + } + }() + + err := callback() + if err != nil { + ok = false + errorValue = err + } + + return +} + +// TryCatch has the same behavior as Try, but calls the catch function in case of error. +// Play: https://go.dev/play/p/PnOON-EqBiU +func TryCatch(callback func() error, catch func()) { + if !Try(callback) { + catch() + } +} + +// TryCatchWithErrorValue has the same behavior as TryWithErrorValue, but calls the catch function in case of error. +// Play: https://go.dev/play/p/8Pc9gwX_GZO +func TryCatchWithErrorValue(callback func() error, catch func(any)) { + if err, ok := TryWithErrorValue(callback); !ok { + catch(err) + } +} + +// ErrorsAs is a shortcut for errors.As(err, &&T). +// Play: https://go.dev/play/p/8wk5rH8UfrE +func ErrorsAs[T error](err error) (T, bool) { + var t T + ok := errors.As(err, &t) + return t, ok +} diff --git a/vendor/github.com/samber/lo/find.go b/vendor/github.com/samber/lo/find.go new file mode 100644 index 0000000000..f8caeb8959 --- /dev/null +++ b/vendor/github.com/samber/lo/find.go @@ -0,0 +1,372 @@ +package lo + +import ( + "fmt" + "math/rand" + + "golang.org/x/exp/constraints" +) + +// import "golang.org/x/exp/constraints" + +// IndexOf returns the index at which the first occurrence of a value is found in an array or return -1 +// if the value cannot be found. +func IndexOf[T comparable](collection []T, element T) int { + for i, item := range collection { + if item == element { + return i + } + } + + return -1 +} + +// LastIndexOf returns the index at which the last occurrence of a value is found in an array or return -1 +// if the value cannot be found. +func LastIndexOf[T comparable](collection []T, element T) int { + length := len(collection) + + for i := length - 1; i >= 0; i-- { + if collection[i] == element { + return i + } + } + + return -1 +} + +// Find search an element in a slice based on a predicate. It returns element and true if element was found. +func Find[T any](collection []T, predicate func(item T) bool) (T, bool) { + for _, item := range collection { + if predicate(item) { + return item, true + } + } + + var result T + return result, false +} + +// FindIndexOf searches an element in a slice based on a predicate and returns the index and true. +// It returns -1 and false if the element is not found. +func FindIndexOf[T any](collection []T, predicate func(item T) bool) (T, int, bool) { + for i, item := range collection { + if predicate(item) { + return item, i, true + } + } + + var result T + return result, -1, false +} + +// FindLastIndexOf searches last element in a slice based on a predicate and returns the index and true. +// It returns -1 and false if the element is not found. +func FindLastIndexOf[T any](collection []T, predicate func(item T) bool) (T, int, bool) { + length := len(collection) + + for i := length - 1; i >= 0; i-- { + if predicate(collection[i]) { + return collection[i], i, true + } + } + + var result T + return result, -1, false +} + +// FindOrElse search an element in a slice based on a predicate. It returns the element if found or a given fallback value otherwise. +func FindOrElse[T any](collection []T, fallback T, predicate func(item T) bool) T { + for _, item := range collection { + if predicate(item) { + return item + } + } + + return fallback +} + +// FindKey returns the key of the first value matching. +func FindKey[K comparable, V comparable](object map[K]V, value V) (K, bool) { + for k, v := range object { + if v == value { + return k, true + } + } + + return Empty[K](), false +} + +// FindKeyBy returns the key of the first element predicate returns truthy for. +func FindKeyBy[K comparable, V any](object map[K]V, predicate func(key K, value V) bool) (K, bool) { + for k, v := range object { + if predicate(k, v) { + return k, true + } + } + + return Empty[K](), false +} + +// FindUniques returns a slice with all the unique elements of the collection. +// The order of result values is determined by the order they occur in the collection. +func FindUniques[T comparable](collection []T) []T { + isDupl := make(map[T]bool, len(collection)) + + for _, item := range collection { + duplicated, ok := isDupl[item] + if !ok { + isDupl[item] = false + } else if !duplicated { + isDupl[item] = true + } + } + + result := make([]T, 0, len(collection)-len(isDupl)) + + for _, item := range collection { + if duplicated := isDupl[item]; !duplicated { + result = append(result, item) + } + } + + return result +} + +// FindUniquesBy returns a slice with all the unique elements of the collection. +// The order of result values is determined by the order they occur in the array. It accepts `iteratee` which is +// invoked for each element in array to generate the criterion by which uniqueness is computed. +func FindUniquesBy[T any, U comparable](collection []T, iteratee func(item T) U) []T { + isDupl := make(map[U]bool, len(collection)) + + for _, item := range collection { + key := iteratee(item) + + duplicated, ok := isDupl[key] + if !ok { + isDupl[key] = false + } else if !duplicated { + isDupl[key] = true + } + } + + result := make([]T, 0, len(collection)-len(isDupl)) + + for _, item := range collection { + key := iteratee(item) + + if duplicated := isDupl[key]; !duplicated { + result = append(result, item) + } + } + + return result +} + +// FindDuplicates returns a slice with the first occurrence of each duplicated elements of the collection. +// The order of result values is determined by the order they occur in the collection. +func FindDuplicates[T comparable](collection []T) []T { + isDupl := make(map[T]bool, len(collection)) + + for _, item := range collection { + duplicated, ok := isDupl[item] + if !ok { + isDupl[item] = false + } else if !duplicated { + isDupl[item] = true + } + } + + result := make([]T, 0, len(collection)-len(isDupl)) + + for _, item := range collection { + if duplicated := isDupl[item]; duplicated { + result = append(result, item) + isDupl[item] = false + } + } + + return result +} + +// FindDuplicatesBy returns a slice with the first occurrence of each duplicated elements of the collection. +// The order of result values is determined by the order they occur in the array. It accepts `iteratee` which is +// invoked for each element in array to generate the criterion by which uniqueness is computed. +func FindDuplicatesBy[T any, U comparable](collection []T, iteratee func(item T) U) []T { + isDupl := make(map[U]bool, len(collection)) + + for _, item := range collection { + key := iteratee(item) + + duplicated, ok := isDupl[key] + if !ok { + isDupl[key] = false + } else if !duplicated { + isDupl[key] = true + } + } + + result := make([]T, 0, len(collection)-len(isDupl)) + + for _, item := range collection { + key := iteratee(item) + + if duplicated := isDupl[key]; duplicated { + result = append(result, item) + isDupl[key] = false + } + } + + return result +} + +// Min search the minimum value of a collection. +// Returns zero value when collection is empty. +func Min[T constraints.Ordered](collection []T) T { + var min T + + if len(collection) == 0 { + return min + } + + min = collection[0] + + for i := 1; i < len(collection); i++ { + item := collection[i] + + if item < min { + min = item + } + } + + return min +} + +// MinBy search the minimum value of a collection using the given comparison function. +// If several values of the collection are equal to the smallest value, returns the first such value. +// Returns zero value when collection is empty. +func MinBy[T any](collection []T, comparison func(a T, b T) bool) T { + var min T + + if len(collection) == 0 { + return min + } + + min = collection[0] + + for i := 1; i < len(collection); i++ { + item := collection[i] + + if comparison(item, min) { + min = item + } + } + + return min +} + +// Max searches the maximum value of a collection. +// Returns zero value when collection is empty. +func Max[T constraints.Ordered](collection []T) T { + var max T + + if len(collection) == 0 { + return max + } + + max = collection[0] + + for i := 1; i < len(collection); i++ { + item := collection[i] + + if item > max { + max = item + } + } + + return max +} + +// MaxBy search the maximum value of a collection using the given comparison function. +// If several values of the collection are equal to the greatest value, returns the first such value. +// Returns zero value when collection is empty. +func MaxBy[T any](collection []T, comparison func(a T, b T) bool) T { + var max T + + if len(collection) == 0 { + return max + } + + max = collection[0] + + for i := 1; i < len(collection); i++ { + item := collection[i] + + if comparison(item, max) { + max = item + } + } + + return max +} + +// Last returns the last element of a collection or error if empty. +func Last[T any](collection []T) (T, error) { + length := len(collection) + + if length == 0 { + var t T + return t, fmt.Errorf("last: cannot extract the last element of an empty slice") + } + + return collection[length-1], nil +} + +// Nth returns the element at index `nth` of collection. If `nth` is negative, the nth element +// from the end is returned. An error is returned when nth is out of slice bounds. +func Nth[T any, N constraints.Integer](collection []T, nth N) (T, error) { + n := int(nth) + l := len(collection) + if n >= l || -n > l { + var t T + return t, fmt.Errorf("nth: %d out of slice bounds", n) + } + + if n >= 0 { + return collection[n], nil + } + return collection[l+n], nil +} + +// Sample returns a random item from collection. +func Sample[T any](collection []T) T { + size := len(collection) + if size == 0 { + return Empty[T]() + } + + return collection[rand.Intn(size)] +} + +// Samples returns N random unique items from collection. +func Samples[T any](collection []T, count int) []T { + size := len(collection) + + copy := append([]T{}, collection...) + + results := []T{} + + for i := 0; i < size && i < count; i++ { + copyLength := size - i + + index := rand.Intn(size - i) + results = append(results, copy[index]) + + // Removes element. + // It is faster to swap with last element and remove it. + copy[index] = copy[copyLength-1] + copy = copy[:copyLength-1] + } + + return results +} diff --git a/vendor/github.com/samber/lo/func.go b/vendor/github.com/samber/lo/func.go new file mode 100644 index 0000000000..5fa1cbc871 --- /dev/null +++ b/vendor/github.com/samber/lo/func.go @@ -0,0 +1,41 @@ +package lo + +// Partial returns new function that, when called, has its first argument set to the provided value. +func Partial[T1, T2, R any](f func(a T1, b T2) R, arg1 T1) func(T2) R { + return func(t2 T2) R { + return f(arg1, t2) + } +} + +// Partial1 returns new function that, when called, has its first argument set to the provided value. +func Partial1[T1, T2, R any](f func(T1, T2) R, arg1 T1) func(T2) R { + return Partial(f, arg1) +} + +// Partial2 returns new function that, when called, has its first argument set to the provided value. +func Partial2[T1, T2, T3, R any](f func(T1, T2, T3) R, arg1 T1) func(T2, T3) R { + return func(t2 T2, t3 T3) R { + return f(arg1, t2, t3) + } +} + +// Partial3 returns new function that, when called, has its first argument set to the provided value. +func Partial3[T1, T2, T3, T4, R any](f func(T1, T2, T3, T4) R, arg1 T1) func(T2, T3, T4) R { + return func(t2 T2, t3 T3, t4 T4) R { + return f(arg1, t2, t3, t4) + } +} + +// Partial4 returns new function that, when called, has its first argument set to the provided value. +func Partial4[T1, T2, T3, T4, T5, R any](f func(T1, T2, T3, T4, T5) R, arg1 T1) func(T2, T3, T4, T5) R { + return func(t2 T2, t3 T3, t4 T4, t5 T5) R { + return f(arg1, t2, t3, t4, t5) + } +} + +// Partial5 returns new function that, when called, has its first argument set to the provided value +func Partial5[T1, T2, T3, T4, T5, T6, R any](f func(T1, T2, T3, T4, T5, T6) R, arg1 T1) func(T2, T3, T4, T5, T6) R { + return func(t2 T2, t3 T3, t4 T4, t5 T5, t6 T6) R { + return f(arg1, t2, t3, t4, t5, t6) + } +} diff --git a/vendor/github.com/samber/lo/intersect.go b/vendor/github.com/samber/lo/intersect.go new file mode 100644 index 0000000000..cf6cab3d13 --- /dev/null +++ b/vendor/github.com/samber/lo/intersect.go @@ -0,0 +1,185 @@ +package lo + +// Contains returns true if an element is present in a collection. +func Contains[T comparable](collection []T, element T) bool { + for _, item := range collection { + if item == element { + return true + } + } + + return false +} + +// ContainsBy returns true if predicate function return true. +func ContainsBy[T any](collection []T, predicate func(item T) bool) bool { + for _, item := range collection { + if predicate(item) { + return true + } + } + + return false +} + +// Every returns true if all elements of a subset are contained into a collection or if the subset is empty. +func Every[T comparable](collection []T, subset []T) bool { + for _, elem := range subset { + if !Contains(collection, elem) { + return false + } + } + + return true +} + +// EveryBy returns true if the predicate returns true for all of the elements in the collection or if the collection is empty. +func EveryBy[T any](collection []T, predicate func(item T) bool) bool { + for _, v := range collection { + if !predicate(v) { + return false + } + } + + return true +} + +// Some returns true if at least 1 element of a subset is contained into a collection. +// If the subset is empty Some returns false. +func Some[T comparable](collection []T, subset []T) bool { + for _, elem := range subset { + if Contains(collection, elem) { + return true + } + } + + return false +} + +// SomeBy returns true if the predicate returns true for any of the elements in the collection. +// If the collection is empty SomeBy returns false. +func SomeBy[T any](collection []T, predicate func(item T) bool) bool { + for _, v := range collection { + if predicate(v) { + return true + } + } + + return false +} + +// None returns true if no element of a subset are contained into a collection or if the subset is empty. +func None[T comparable](collection []T, subset []T) bool { + for _, elem := range subset { + if Contains(collection, elem) { + return false + } + } + + return true +} + +// NoneBy returns true if the predicate returns true for none of the elements in the collection or if the collection is empty. +func NoneBy[T any](collection []T, predicate func(item T) bool) bool { + for _, v := range collection { + if predicate(v) { + return false + } + } + + return true +} + +// Intersect returns the intersection between two collections. +func Intersect[T comparable](list1 []T, list2 []T) []T { + result := []T{} + seen := map[T]struct{}{} + + for _, elem := range list1 { + seen[elem] = struct{}{} + } + + for _, elem := range list2 { + if _, ok := seen[elem]; ok { + result = append(result, elem) + } + } + + return result +} + +// Difference returns the difference between two collections. +// The first value is the collection of element absent of list2. +// The second value is the collection of element absent of list1. +func Difference[T comparable](list1 []T, list2 []T) ([]T, []T) { + left := []T{} + right := []T{} + + seenLeft := map[T]struct{}{} + seenRight := map[T]struct{}{} + + for _, elem := range list1 { + seenLeft[elem] = struct{}{} + } + + for _, elem := range list2 { + seenRight[elem] = struct{}{} + } + + for _, elem := range list1 { + if _, ok := seenRight[elem]; !ok { + left = append(left, elem) + } + } + + for _, elem := range list2 { + if _, ok := seenLeft[elem]; !ok { + right = append(right, elem) + } + } + + return left, right +} + +// Union returns all distinct elements from given collections. +// result returns will not change the order of elements relatively. +func Union[T comparable](lists ...[]T) []T { + result := []T{} + seen := map[T]struct{}{} + + for _, list := range lists { + for _, e := range list { + if _, ok := seen[e]; !ok { + seen[e] = struct{}{} + result = append(result, e) + } + } + } + + return result +} + +// Without returns slice excluding all given values. +func Without[T comparable](collection []T, exclude ...T) []T { + result := make([]T, 0, len(collection)) + for _, e := range collection { + if !Contains(exclude, e) { + result = append(result, e) + } + } + return result +} + +// WithoutEmpty returns slice excluding empty values. +func WithoutEmpty[T comparable](collection []T) []T { + var empty T + + result := make([]T, 0, len(collection)) + for _, e := range collection { + if e != empty { + result = append(result, e) + } + } + + return result +} diff --git a/vendor/github.com/samber/lo/map.go b/vendor/github.com/samber/lo/map.go new file mode 100644 index 0000000000..9c0ac4826b --- /dev/null +++ b/vendor/github.com/samber/lo/map.go @@ -0,0 +1,224 @@ +package lo + +// Keys creates an array of the map keys. +// Play: https://go.dev/play/p/Uu11fHASqrU +func Keys[K comparable, V any](in map[K]V) []K { + result := make([]K, 0, len(in)) + + for k := range in { + result = append(result, k) + } + + return result +} + +// Values creates an array of the map values. +// Play: https://go.dev/play/p/nnRTQkzQfF6 +func Values[K comparable, V any](in map[K]V) []V { + result := make([]V, 0, len(in)) + + for _, v := range in { + result = append(result, v) + } + + return result +} + +// ValueOr returns the value of the given key or the fallback value if the key is not present. +// Play: https://go.dev/play/p/bAq9mHErB4V +func ValueOr[K comparable, V any](in map[K]V, key K, fallback V) V { + if v, ok := in[key]; ok { + return v + } + return fallback +} + +// PickBy returns same map type filtered by given predicate. +// Play: https://go.dev/play/p/kdg8GR_QMmf +func PickBy[K comparable, V any](in map[K]V, predicate func(key K, value V) bool) map[K]V { + r := map[K]V{} + for k, v := range in { + if predicate(k, v) { + r[k] = v + } + } + return r +} + +// PickByKeys returns same map type filtered by given keys. +// Play: https://go.dev/play/p/R1imbuci9qU +func PickByKeys[K comparable, V any](in map[K]V, keys []K) map[K]V { + r := map[K]V{} + for k, v := range in { + if Contains(keys, k) { + r[k] = v + } + } + return r +} + +// PickByValues returns same map type filtered by given values. +// Play: https://go.dev/play/p/1zdzSvbfsJc +func PickByValues[K comparable, V comparable](in map[K]V, values []V) map[K]V { + r := map[K]V{} + for k, v := range in { + if Contains(values, v) { + r[k] = v + } + } + return r +} + +// OmitBy returns same map type filtered by given predicate. +// Play: https://go.dev/play/p/EtBsR43bdsd +func OmitBy[K comparable, V any](in map[K]V, predicate func(key K, value V) bool) map[K]V { + r := map[K]V{} + for k, v := range in { + if !predicate(k, v) { + r[k] = v + } + } + return r +} + +// OmitByKeys returns same map type filtered by given keys. +// Play: https://go.dev/play/p/t1QjCrs-ysk +func OmitByKeys[K comparable, V any](in map[K]V, keys []K) map[K]V { + r := map[K]V{} + for k, v := range in { + if !Contains(keys, k) { + r[k] = v + } + } + return r +} + +// OmitByValues returns same map type filtered by given values. +// Play: https://go.dev/play/p/9UYZi-hrs8j +func OmitByValues[K comparable, V comparable](in map[K]V, values []V) map[K]V { + r := map[K]V{} + for k, v := range in { + if !Contains(values, v) { + r[k] = v + } + } + return r +} + +// Entries transforms a map into array of key/value pairs. +// Play: +func Entries[K comparable, V any](in map[K]V) []Entry[K, V] { + entries := make([]Entry[K, V], 0, len(in)) + + for k, v := range in { + entries = append(entries, Entry[K, V]{ + Key: k, + Value: v, + }) + } + + return entries +} + +// ToPairs transforms a map into array of key/value pairs. +// Alias of Entries(). +// Play: https://go.dev/play/p/3Dhgx46gawJ +func ToPairs[K comparable, V any](in map[K]V) []Entry[K, V] { + return Entries(in) +} + +// FromEntries transforms an array of key/value pairs into a map. +// Play: https://go.dev/play/p/oIr5KHFGCEN +func FromEntries[K comparable, V any](entries []Entry[K, V]) map[K]V { + out := make(map[K]V, len(entries)) + + for _, v := range entries { + out[v.Key] = v.Value + } + + return out +} + +// FromPairs transforms an array of key/value pairs into a map. +// Alias of FromEntries(). +// Play: https://go.dev/play/p/oIr5KHFGCEN +func FromPairs[K comparable, V any](entries []Entry[K, V]) map[K]V { + return FromEntries(entries) +} + +// Invert creates a map composed of the inverted keys and values. If map +// contains duplicate values, subsequent values overwrite property assignments +// of previous values. +// Play: https://go.dev/play/p/rFQ4rak6iA1 +func Invert[K comparable, V comparable](in map[K]V) map[V]K { + out := make(map[V]K, len(in)) + + for k, v := range in { + out[v] = k + } + + return out +} + +// Assign merges multiple maps from left to right. +// Play: https://go.dev/play/p/VhwfJOyxf5o +func Assign[K comparable, V any](maps ...map[K]V) map[K]V { + out := map[K]V{} + + for _, m := range maps { + for k, v := range m { + out[k] = v + } + } + + return out +} + +// MapKeys manipulates a map keys and transforms it to a map of another type. +// Play: https://go.dev/play/p/9_4WPIqOetJ +func MapKeys[K comparable, V any, R comparable](in map[K]V, iteratee func(value V, key K) R) map[R]V { + result := make(map[R]V, len(in)) + + for k, v := range in { + result[iteratee(v, k)] = v + } + + return result +} + +// MapValues manipulates a map values and transforms it to a map of another type. +// Play: https://go.dev/play/p/T_8xAfvcf0W +func MapValues[K comparable, V any, R any](in map[K]V, iteratee func(value V, key K) R) map[K]R { + result := make(map[K]R, len(in)) + + for k, v := range in { + result[k] = iteratee(v, k) + } + + return result +} + +// MapEntries manipulates a map entries and transforms it to a map of another type. +// Play: https://go.dev/play/p/VuvNQzxKimT +func MapEntries[K1 comparable, V1 any, K2 comparable, V2 any](in map[K1]V1, iteratee func(key K1, value V1) (K2, V2)) map[K2]V2 { + result := make(map[K2]V2, len(in)) + + for k1, v1 := range in { + k2, v2 := iteratee(k1, v1) + result[k2] = v2 + } + + return result +} + +// MapToSlice transforms a map into a slice based on specific iteratee +// Play: https://go.dev/play/p/ZuiCZpDt6LD +func MapToSlice[K comparable, V any, R any](in map[K]V, iteratee func(key K, value V) R) []R { + result := make([]R, 0, len(in)) + + for k, v := range in { + result = append(result, iteratee(k, v)) + } + + return result +} diff --git a/vendor/github.com/samber/lo/math.go b/vendor/github.com/samber/lo/math.go new file mode 100644 index 0000000000..9dce28cf83 --- /dev/null +++ b/vendor/github.com/samber/lo/math.go @@ -0,0 +1,84 @@ +package lo + +import "golang.org/x/exp/constraints" + +// Range creates an array of numbers (positive and/or negative) with given length. +// Play: https://go.dev/play/p/0r6VimXAi9H +func Range(elementNum int) []int { + length := If(elementNum < 0, -elementNum).Else(elementNum) + result := make([]int, length) + step := If(elementNum < 0, -1).Else(1) + for i, j := 0, 0; i < length; i, j = i+1, j+step { + result[i] = j + } + return result +} + +// RangeFrom creates an array of numbers from start with specified length. +// Play: https://go.dev/play/p/0r6VimXAi9H +func RangeFrom[T constraints.Integer | constraints.Float](start T, elementNum int) []T { + length := If(elementNum < 0, -elementNum).Else(elementNum) + result := make([]T, length) + step := If(elementNum < 0, -1).Else(1) + for i, j := 0, start; i < length; i, j = i+1, j+T(step) { + result[i] = j + } + return result +} + +// RangeWithSteps creates an array of numbers (positive and/or negative) progressing from start up to, but not including end. +// step set to zero will return empty array. +// Play: https://go.dev/play/p/0r6VimXAi9H +func RangeWithSteps[T constraints.Integer | constraints.Float](start, end, step T) []T { + result := []T{} + if start == end || step == 0 { + return result + } + if start < end { + if step < 0 { + return result + } + for i := start; i < end; i += step { + result = append(result, i) + } + return result + } + if step > 0 { + return result + } + for i := start; i > end; i += step { + result = append(result, i) + } + return result +} + +// Clamp clamps number within the inclusive lower and upper bounds. +// Play: https://go.dev/play/p/RU4lJNC2hlI +func Clamp[T constraints.Ordered](value T, min T, max T) T { + if value < min { + return min + } else if value > max { + return max + } + return value +} + +// Sum sums the values in a collection. If collection is empty 0 is returned. +// Play: https://go.dev/play/p/upfeJVqs4Bt +func Sum[T constraints.Float | constraints.Integer | constraints.Complex](collection []T) T { + var sum T = 0 + for _, val := range collection { + sum += val + } + return sum +} + +// SumBy summarizes the values in a collection using the given return value from the iteration function. If collection is empty 0 is returned. +// Play: https://go.dev/play/p/Dz_a_7jN_ca +func SumBy[T any, R constraints.Float | constraints.Integer | constraints.Complex](collection []T, iteratee func(item T) R) R { + var sum R = 0 + for _, item := range collection { + sum = sum + iteratee(item) + } + return sum +} diff --git a/vendor/github.com/samber/lo/retry.go b/vendor/github.com/samber/lo/retry.go new file mode 100644 index 0000000000..c3c264fff9 --- /dev/null +++ b/vendor/github.com/samber/lo/retry.go @@ -0,0 +1,290 @@ +package lo + +import ( + "sync" + "time" +) + +type debounce struct { + after time.Duration + mu *sync.Mutex + timer *time.Timer + done bool + callbacks []func() +} + +func (d *debounce) reset() { + d.mu.Lock() + defer d.mu.Unlock() + + if d.done { + return + } + + if d.timer != nil { + d.timer.Stop() + } + + d.timer = time.AfterFunc(d.after, func() { + for _, f := range d.callbacks { + f() + } + }) +} + +func (d *debounce) cancel() { + d.mu.Lock() + defer d.mu.Unlock() + + if d.timer != nil { + d.timer.Stop() + d.timer = nil + } + + d.done = true +} + +// NewDebounce creates a debounced instance that delays invoking functions given until after wait milliseconds have elapsed. +// Play: https://go.dev/play/p/mz32VMK2nqe +func NewDebounce(duration time.Duration, f ...func()) (func(), func()) { + d := &debounce{ + after: duration, + mu: new(sync.Mutex), + timer: nil, + done: false, + callbacks: f, + } + + return func() { + d.reset() + }, d.cancel +} + +type debounceByItem struct { + mu *sync.Mutex + timer *time.Timer + count int +} + +type debounceBy[T comparable] struct { + after time.Duration + mu *sync.Mutex + items map[T]*debounceByItem + callbacks []func(key T, count int) +} + +func (d *debounceBy[T]) reset(key T) { + d.mu.Lock() + if _, ok := d.items[key]; !ok { + d.items[key] = &debounceByItem{ + mu: new(sync.Mutex), + timer: nil, + } + } + + item := d.items[key] + + d.mu.Unlock() + + item.mu.Lock() + defer item.mu.Unlock() + + item.count++ + + if item.timer != nil { + item.timer.Stop() + } + + item.timer = time.AfterFunc(d.after, func() { + item.mu.Lock() + count := item.count + item.count = 0 + item.mu.Unlock() + + for _, f := range d.callbacks { + f(key, count) + } + + }) +} + +func (d *debounceBy[T]) cancel(key T) { + d.mu.Lock() + defer d.mu.Unlock() + + if item, ok := d.items[key]; ok { + item.mu.Lock() + + if item.timer != nil { + item.timer.Stop() + item.timer = nil + } + + item.mu.Unlock() + + delete(d.items, key) + } +} + +// NewDebounceBy creates a debounced instance for each distinct key, that delays invoking functions given until after wait milliseconds have elapsed. +// Play: https://go.dev/play/p/d3Vpt6pxhY8 +func NewDebounceBy[T comparable](duration time.Duration, f ...func(key T, count int)) (func(key T), func(key T)) { + d := &debounceBy[T]{ + after: duration, + mu: new(sync.Mutex), + items: map[T]*debounceByItem{}, + callbacks: f, + } + + return func(key T) { + d.reset(key) + }, d.cancel +} + +// Attempt invokes a function N times until it returns valid output. Returning either the caught error or nil. When first argument is less than `1`, the function runs until a successful response is returned. +// Play: https://go.dev/play/p/3ggJZ2ZKcMj +func Attempt(maxIteration int, f func(index int) error) (int, error) { + var err error + + for i := 0; maxIteration <= 0 || i < maxIteration; i++ { + // for retries >= 0 { + err = f(i) + if err == nil { + return i + 1, nil + } + } + + return maxIteration, err +} + +// AttemptWithDelay invokes a function N times until it returns valid output, +// with a pause between each call. Returning either the caught error or nil. +// When first argument is less than `1`, the function runs until a successful +// response is returned. +// Play: https://go.dev/play/p/tVs6CygC7m1 +func AttemptWithDelay(maxIteration int, delay time.Duration, f func(index int, duration time.Duration) error) (int, time.Duration, error) { + var err error + + start := time.Now() + + for i := 0; maxIteration <= 0 || i < maxIteration; i++ { + err = f(i, time.Since(start)) + if err == nil { + return i + 1, time.Since(start), nil + } + + if maxIteration <= 0 || i+1 < maxIteration { + time.Sleep(delay) + } + } + + return maxIteration, time.Since(start), err +} + +// AttemptWhile invokes a function N times until it returns valid output. +// Returning either the caught error or nil, and along with a bool value to identify +// whether it needs invoke function continuously. It will terminate the invoke +// immediately if second bool value is returned with falsy value. When first +// argument is less than `1`, the function runs until a successful response is +// returned. +func AttemptWhile(maxIteration int, f func(int) (error, bool)) (int, error) { + var err error + var shouldContinueInvoke bool + + for i := 0; maxIteration <= 0 || i < maxIteration; i++ { + // for retries >= 0 { + err, shouldContinueInvoke = f(i) + if !shouldContinueInvoke { // if shouldContinueInvoke is false, then return immediately + return i + 1, err + } + if err == nil { + return i + 1, nil + } + } + + return maxIteration, err +} + +// AttemptWhileWithDelay invokes a function N times until it returns valid output, +// with a pause between each call. Returning either the caught error or nil, and along +// with a bool value to identify whether it needs to invoke function continuously. +// It will terminate the invoke immediately if second bool value is returned with falsy +// value. When first argument is less than `1`, the function runs until a successful +// response is returned. +func AttemptWhileWithDelay(maxIteration int, delay time.Duration, f func(int, time.Duration) (error, bool)) (int, time.Duration, error) { + var err error + var shouldContinueInvoke bool + + start := time.Now() + + for i := 0; maxIteration <= 0 || i < maxIteration; i++ { + err, shouldContinueInvoke = f(i, time.Since(start)) + if !shouldContinueInvoke { // if shouldContinueInvoke is false, then return immediately + return i + 1, time.Since(start), err + } + if err == nil { + return i + 1, time.Since(start), nil + } + + if maxIteration <= 0 || i+1 < maxIteration { + time.Sleep(delay) + } + } + + return maxIteration, time.Since(start), err +} + +type transactionStep[T any] struct { + exec func(T) (T, error) + onRollback func(T) T +} + +// NewTransaction instanciate a new transaction. +func NewTransaction[T any]() *Transaction[T] { + return &Transaction[T]{ + steps: []transactionStep[T]{}, + } +} + +// Transaction implements a Saga pattern +type Transaction[T any] struct { + steps []transactionStep[T] +} + +// Then adds a step to the chain of callbacks. It returns the same Transaction. +func (t *Transaction[T]) Then(exec func(T) (T, error), onRollback func(T) T) *Transaction[T] { + t.steps = append(t.steps, transactionStep[T]{ + exec: exec, + onRollback: onRollback, + }) + + return t +} + +// Process runs the Transaction steps and rollbacks in case of errors. +func (t *Transaction[T]) Process(state T) (T, error) { + var i int + var err error + + for i < len(t.steps) { + state, err = t.steps[i].exec(state) + if err != nil { + break + } + + i++ + } + + if err == nil { + return state, nil + } + + for i > 0 { + i-- + state = t.steps[i].onRollback(state) + } + + return state, err +} + +// throttle ? diff --git a/vendor/github.com/samber/lo/slice.go b/vendor/github.com/samber/lo/slice.go new file mode 100644 index 0000000000..49c991f894 --- /dev/null +++ b/vendor/github.com/samber/lo/slice.go @@ -0,0 +1,594 @@ +package lo + +import ( + "math/rand" + + "golang.org/x/exp/constraints" +) + +// Filter iterates over elements of collection, returning an array of all elements predicate returns truthy for. +// Play: https://go.dev/play/p/Apjg3WeSi7K +func Filter[V any](collection []V, predicate func(item V, index int) bool) []V { + result := make([]V, 0, len(collection)) + + for i, item := range collection { + if predicate(item, i) { + result = append(result, item) + } + } + + return result +} + +// Map manipulates a slice and transforms it to a slice of another type. +// Play: https://go.dev/play/p/OkPcYAhBo0D +func Map[T any, R any](collection []T, iteratee func(item T, index int) R) []R { + result := make([]R, len(collection)) + + for i, item := range collection { + result[i] = iteratee(item, i) + } + + return result +} + +// FilterMap returns a slice which obtained after both filtering and mapping using the given callback function. +// The callback function should return two values: +// - the result of the mapping operation and +// - whether the result element should be included or not. +// +// Play: https://go.dev/play/p/-AuYXfy7opz +func FilterMap[T any, R any](collection []T, callback func(item T, index int) (R, bool)) []R { + result := []R{} + + for i, item := range collection { + if r, ok := callback(item, i); ok { + result = append(result, r) + } + } + + return result +} + +// FlatMap manipulates a slice and transforms and flattens it to a slice of another type. +// The transform function can either return a slice or a `nil`, and in the `nil` case +// no value is added to the final slice. +// Play: https://go.dev/play/p/YSoYmQTA8-U +func FlatMap[T any, R any](collection []T, iteratee func(item T, index int) []R) []R { + result := make([]R, 0, len(collection)) + + for i, item := range collection { + result = append(result, iteratee(item, i)...) + } + + return result +} + +// Reduce reduces collection to a value which is the accumulated result of running each element in collection +// through accumulator, where each successive invocation is supplied the return value of the previous. +// Play: https://go.dev/play/p/R4UHXZNaaUG +func Reduce[T any, R any](collection []T, accumulator func(agg R, item T, index int) R, initial R) R { + for i, item := range collection { + initial = accumulator(initial, item, i) + } + + return initial +} + +// ReduceRight helper is like Reduce except that it iterates over elements of collection from right to left. +// Play: https://go.dev/play/p/Fq3W70l7wXF +func ReduceRight[T any, R any](collection []T, accumulator func(agg R, item T, index int) R, initial R) R { + for i := len(collection) - 1; i >= 0; i-- { + initial = accumulator(initial, collection[i], i) + } + + return initial +} + +// ForEach iterates over elements of collection and invokes iteratee for each element. +// Play: https://go.dev/play/p/oofyiUPRf8t +func ForEach[T any](collection []T, iteratee func(item T, index int)) { + for i, item := range collection { + iteratee(item, i) + } +} + +// Times invokes the iteratee n times, returning an array of the results of each invocation. +// The iteratee is invoked with index as argument. +// Play: https://go.dev/play/p/vgQj3Glr6lT +func Times[T any](count int, iteratee func(index int) T) []T { + result := make([]T, count) + + for i := 0; i < count; i++ { + result[i] = iteratee(i) + } + + return result +} + +// Uniq returns a duplicate-free version of an array, in which only the first occurrence of each element is kept. +// The order of result values is determined by the order they occur in the array. +// Play: https://go.dev/play/p/DTzbeXZ6iEN +func Uniq[T comparable](collection []T) []T { + result := make([]T, 0, len(collection)) + seen := make(map[T]struct{}, len(collection)) + + for _, item := range collection { + if _, ok := seen[item]; ok { + continue + } + + seen[item] = struct{}{} + result = append(result, item) + } + + return result +} + +// UniqBy returns a duplicate-free version of an array, in which only the first occurrence of each element is kept. +// The order of result values is determined by the order they occur in the array. It accepts `iteratee` which is +// invoked for each element in array to generate the criterion by which uniqueness is computed. +// Play: https://go.dev/play/p/g42Z3QSb53u +func UniqBy[T any, U comparable](collection []T, iteratee func(item T) U) []T { + result := make([]T, 0, len(collection)) + seen := make(map[U]struct{}, len(collection)) + + for _, item := range collection { + key := iteratee(item) + + if _, ok := seen[key]; ok { + continue + } + + seen[key] = struct{}{} + result = append(result, item) + } + + return result +} + +// GroupBy returns an object composed of keys generated from the results of running each element of collection through iteratee. +// Play: https://go.dev/play/p/XnQBd_v6brd +func GroupBy[T any, U comparable](collection []T, iteratee func(item T) U) map[U][]T { + result := map[U][]T{} + + for _, item := range collection { + key := iteratee(item) + + result[key] = append(result[key], item) + } + + return result +} + +// Chunk returns an array of elements split into groups the length of size. If array can't be split evenly, +// the final chunk will be the remaining elements. +// Play: https://go.dev/play/p/EeKl0AuTehH +func Chunk[T any](collection []T, size int) [][]T { + if size <= 0 { + panic("Second parameter must be greater than 0") + } + + chunksNum := len(collection) / size + if len(collection)%size != 0 { + chunksNum += 1 + } + + result := make([][]T, 0, chunksNum) + + for i := 0; i < chunksNum; i++ { + last := (i + 1) * size + if last > len(collection) { + last = len(collection) + } + result = append(result, collection[i*size:last]) + } + + return result +} + +// PartitionBy returns an array of elements split into groups. The order of grouped values is +// determined by the order they occur in collection. The grouping is generated from the results +// of running each element of collection through iteratee. +// Play: https://go.dev/play/p/NfQ_nGjkgXW +func PartitionBy[T any, K comparable](collection []T, iteratee func(item T) K) [][]T { + result := [][]T{} + seen := map[K]int{} + + for _, item := range collection { + key := iteratee(item) + + resultIndex, ok := seen[key] + if !ok { + resultIndex = len(result) + seen[key] = resultIndex + result = append(result, []T{}) + } + + result[resultIndex] = append(result[resultIndex], item) + } + + return result + + // unordered: + // groups := GroupBy[T, K](collection, iteratee) + // return Values[K, []T](groups) +} + +// Flatten returns an array a single level deep. +// Play: https://go.dev/play/p/rbp9ORaMpjw +func Flatten[T any](collection [][]T) []T { + totalLen := 0 + for i := range collection { + totalLen += len(collection[i]) + } + + result := make([]T, 0, totalLen) + for i := range collection { + result = append(result, collection[i]...) + } + + return result +} + +// Interleave round-robin alternating input slices and sequentially appending value at index into result +// Play: https://go.dev/play/p/DDhlwrShbwe +func Interleave[T any](collections ...[]T) []T { + if len(collections) == 0 { + return []T{} + } + + maxSize := 0 + totalSize := 0 + for _, c := range collections { + size := len(c) + totalSize += size + if size > maxSize { + maxSize = size + } + } + + if maxSize == 0 { + return []T{} + } + + result := make([]T, totalSize) + + resultIdx := 0 + for i := 0; i < maxSize; i++ { + for j := range collections { + if len(collections[j])-1 < i { + continue + } + + result[resultIdx] = collections[j][i] + resultIdx++ + } + } + + return result +} + +// Shuffle returns an array of shuffled values. Uses the Fisher-Yates shuffle algorithm. +// Play: https://go.dev/play/p/Qp73bnTDnc7 +func Shuffle[T any](collection []T) []T { + rand.Shuffle(len(collection), func(i, j int) { + collection[i], collection[j] = collection[j], collection[i] + }) + + return collection +} + +// Reverse reverses array so that the first element becomes the last, the second element becomes the second to last, and so on. +// Play: https://go.dev/play/p/fhUMLvZ7vS6 +func Reverse[T any](collection []T) []T { + length := len(collection) + half := length / 2 + + for i := 0; i < half; i = i + 1 { + j := length - 1 - i + collection[i], collection[j] = collection[j], collection[i] + } + + return collection +} + +// Fill fills elements of array with `initial` value. +// Play: https://go.dev/play/p/VwR34GzqEub +func Fill[T Clonable[T]](collection []T, initial T) []T { + result := make([]T, 0, len(collection)) + + for range collection { + result = append(result, initial.Clone()) + } + + return result +} + +// Repeat builds a slice with N copies of initial value. +// Play: https://go.dev/play/p/g3uHXbmc3b6 +func Repeat[T Clonable[T]](count int, initial T) []T { + result := make([]T, 0, count) + + for i := 0; i < count; i++ { + result = append(result, initial.Clone()) + } + + return result +} + +// RepeatBy builds a slice with values returned by N calls of callback. +// Play: https://go.dev/play/p/ozZLCtX_hNU +func RepeatBy[T any](count int, predicate func(index int) T) []T { + result := make([]T, 0, count) + + for i := 0; i < count; i++ { + result = append(result, predicate(i)) + } + + return result +} + +// KeyBy transforms a slice or an array of structs to a map based on a pivot callback. +// Play: https://go.dev/play/p/mdaClUAT-zZ +func KeyBy[K comparable, V any](collection []V, iteratee func(item V) K) map[K]V { + result := make(map[K]V, len(collection)) + + for _, v := range collection { + k := iteratee(v) + result[k] = v + } + + return result +} + +// Associate returns a map containing key-value pairs provided by transform function applied to elements of the given slice. +// If any of two pairs would have the same key the last one gets added to the map. +// The order of keys in returned map is not specified and is not guaranteed to be the same from the original array. +// Play: https://go.dev/play/p/WHa2CfMO3Lr +func Associate[T any, K comparable, V any](collection []T, transform func(item T) (K, V)) map[K]V { + result := make(map[K]V, len(collection)) + + for _, t := range collection { + k, v := transform(t) + result[k] = v + } + + return result +} + +// SliceToMap returns a map containing key-value pairs provided by transform function applied to elements of the given slice. +// If any of two pairs would have the same key the last one gets added to the map. +// The order of keys in returned map is not specified and is not guaranteed to be the same from the original array. +// Alias of Associate(). +// Play: https://go.dev/play/p/WHa2CfMO3Lr +func SliceToMap[T any, K comparable, V any](collection []T, transform func(item T) (K, V)) map[K]V { + return Associate(collection, transform) +} + +// Drop drops n elements from the beginning of a slice or array. +// Play: https://go.dev/play/p/JswS7vXRJP2 +func Drop[T any](collection []T, n int) []T { + if len(collection) <= n { + return make([]T, 0) + } + + result := make([]T, 0, len(collection)-n) + + return append(result, collection[n:]...) +} + +// DropRight drops n elements from the end of a slice or array. +// Play: https://go.dev/play/p/GG0nXkSJJa3 +func DropRight[T any](collection []T, n int) []T { + if len(collection) <= n { + return []T{} + } + + result := make([]T, 0, len(collection)-n) + return append(result, collection[:len(collection)-n]...) +} + +// DropWhile drops elements from the beginning of a slice or array while the predicate returns true. +// Play: https://go.dev/play/p/7gBPYw2IK16 +func DropWhile[T any](collection []T, predicate func(item T) bool) []T { + i := 0 + for ; i < len(collection); i++ { + if !predicate(collection[i]) { + break + } + } + + result := make([]T, 0, len(collection)-i) + return append(result, collection[i:]...) +} + +// DropRightWhile drops elements from the end of a slice or array while the predicate returns true. +// Play: https://go.dev/play/p/3-n71oEC0Hz +func DropRightWhile[T any](collection []T, predicate func(item T) bool) []T { + i := len(collection) - 1 + for ; i >= 0; i-- { + if !predicate(collection[i]) { + break + } + } + + result := make([]T, 0, i+1) + return append(result, collection[:i+1]...) +} + +// Reject is the opposite of Filter, this method returns the elements of collection that predicate does not return truthy for. +// Play: https://go.dev/play/p/YkLMODy1WEL +func Reject[V any](collection []V, predicate func(item V, index int) bool) []V { + result := []V{} + + for i, item := range collection { + if !predicate(item, i) { + result = append(result, item) + } + } + + return result +} + +// Count counts the number of elements in the collection that compare equal to value. +// Play: https://go.dev/play/p/Y3FlK54yveC +func Count[T comparable](collection []T, value T) (count int) { + for _, item := range collection { + if item == value { + count++ + } + } + + return count +} + +// CountBy counts the number of elements in the collection for which predicate is true. +// Play: https://go.dev/play/p/ByQbNYQQi4X +func CountBy[T any](collection []T, predicate func(item T) bool) (count int) { + for _, item := range collection { + if predicate(item) { + count++ + } + } + + return count +} + +// CountValues counts the number of each element in the collection. +// Play: https://go.dev/play/p/-p-PyLT4dfy +func CountValues[T comparable](collection []T) map[T]int { + result := make(map[T]int) + + for _, item := range collection { + result[item]++ + } + + return result +} + +// CountValuesBy counts the number of each element return from mapper function. +// Is equivalent to chaining lo.Map and lo.CountValues. +// Play: https://go.dev/play/p/2U0dG1SnOmS +func CountValuesBy[T any, U comparable](collection []T, mapper func(item T) U) map[U]int { + result := make(map[U]int) + + for _, item := range collection { + result[mapper(item)]++ + } + + return result +} + +// Subset returns a copy of a slice from `offset` up to `length` elements. Like `slice[start:start+length]`, but does not panic on overflow. +// Play: https://go.dev/play/p/tOQu1GhFcog +func Subset[T any](collection []T, offset int, length uint) []T { + size := len(collection) + + if offset < 0 { + offset = size + offset + if offset < 0 { + offset = 0 + } + } + + if offset > size { + return []T{} + } + + if length > uint(size)-uint(offset) { + length = uint(size - offset) + } + + return collection[offset : offset+int(length)] +} + +// Slice returns a copy of a slice from `start` up to, but not including `end`. Like `slice[start:end]`, but does not panic on overflow. +// Play: https://go.dev/play/p/8XWYhfMMA1h +func Slice[T any](collection []T, start int, end int) []T { + size := len(collection) + + if start >= end { + return []T{} + } + + if start > size { + start = size + } + if start < 0 { + start = 0 + } + + if end > size { + end = size + } + if end < 0 { + end = 0 + } + + return collection[start:end] +} + +// Replace returns a copy of the slice with the first n non-overlapping instances of old replaced by new. +// Play: https://go.dev/play/p/XfPzmf9gql6 +func Replace[T comparable](collection []T, old T, new T, n int) []T { + result := make([]T, len(collection)) + copy(result, collection) + + for i := range result { + if result[i] == old && n != 0 { + result[i] = new + n-- + } + } + + return result +} + +// ReplaceAll returns a copy of the slice with all non-overlapping instances of old replaced by new. +// Play: https://go.dev/play/p/a9xZFUHfYcV +func ReplaceAll[T comparable](collection []T, old T, new T) []T { + return Replace(collection, old, new, -1) +} + +// Compact returns a slice of all non-zero elements. +// Play: https://go.dev/play/p/tXiy-iK6PAc +func Compact[T comparable](collection []T) []T { + var zero T + + result := make([]T, 0, len(collection)) + + for _, item := range collection { + if item != zero { + result = append(result, item) + } + } + + return result +} + +// IsSorted checks if a slice is sorted. +// Play: https://go.dev/play/p/mc3qR-t4mcx +func IsSorted[T constraints.Ordered](collection []T) bool { + for i := 1; i < len(collection); i++ { + if collection[i-1] > collection[i] { + return false + } + } + + return true +} + +// IsSortedByKey checks if a slice is sorted by iteratee. +// Play: https://go.dev/play/p/wiG6XyBBu49 +func IsSortedByKey[T any, K constraints.Ordered](collection []T, iteratee func(item T) K) bool { + size := len(collection) + + for i := 0; i < size-1; i++ { + if iteratee(collection[i]) > iteratee(collection[i+1]) { + return false + } + } + + return true +} diff --git a/vendor/github.com/samber/lo/string.go b/vendor/github.com/samber/lo/string.go new file mode 100644 index 0000000000..a7a959a395 --- /dev/null +++ b/vendor/github.com/samber/lo/string.go @@ -0,0 +1,96 @@ +package lo + +import ( + "math/rand" + "strings" + "unicode/utf8" +) + +var ( + LowerCaseLettersCharset = []rune("abcdefghijklmnopqrstuvwxyz") + UpperCaseLettersCharset = []rune("ABCDEFGHIJKLMNOPQRSTUVWXYZ") + LettersCharset = append(LowerCaseLettersCharset, UpperCaseLettersCharset...) + NumbersCharset = []rune("0123456789") + AlphanumericCharset = append(LettersCharset, NumbersCharset...) + SpecialCharset = []rune("!@#$%^&*()_+-=[]{}|;':\",./<>?") + AllCharset = append(AlphanumericCharset, SpecialCharset...) +) + +// RandomString return a random string. +// Play: https://go.dev/play/p/rRseOQVVum4 +func RandomString(size int, charset []rune) string { + if size <= 0 { + panic("lo.RandomString: Size parameter must be greater than 0") + } + if len(charset) <= 0 { + panic("lo.RandomString: Charset parameter must not be empty") + } + + b := make([]rune, size) + possibleCharactersCount := len(charset) + for i := range b { + b[i] = charset[rand.Intn(possibleCharactersCount)] + } + return string(b) +} + +// Substring return part of a string. +// Play: https://go.dev/play/p/TQlxQi82Lu1 +func Substring[T ~string](str T, offset int, length uint) T { + rs := []rune(str) + size := len(rs) + + if offset < 0 { + offset = size + offset + if offset < 0 { + offset = 0 + } + } + + if offset > size { + return Empty[T]() + } + + if length > uint(size)-uint(offset) { + length = uint(size - offset) + } + + return T(strings.Replace(string(rs[offset:offset+int(length)]), "\x00", "", -1)) +} + +// ChunkString returns an array of strings split into groups the length of size. If array can't be split evenly, +// the final chunk will be the remaining elements. +// Play: https://go.dev/play/p/__FLTuJVz54 +func ChunkString[T ~string](str T, size int) []T { + if size <= 0 { + panic("lo.ChunkString: Size parameter must be greater than 0") + } + + if len(str) == 0 { + return []T{""} + } + + if size >= len(str) { + return []T{str} + } + + var chunks []T = make([]T, 0, ((len(str)-1)/size)+1) + currentLen := 0 + currentStart := 0 + for i := range str { + if currentLen == size { + chunks = append(chunks, str[currentStart:i]) + currentLen = 0 + currentStart = i + } + currentLen++ + } + chunks = append(chunks, str[currentStart:]) + return chunks +} + +// RuneLength is an alias to utf8.RuneCountInString which returns the number of runes in string. +// Play: https://go.dev/play/p/tuhgW_lWY8l +func RuneLength(str string) int { + return utf8.RuneCountInString(str) +} diff --git a/vendor/github.com/samber/lo/tuples.go b/vendor/github.com/samber/lo/tuples.go new file mode 100644 index 0000000000..cdddf6afc1 --- /dev/null +++ b/vendor/github.com/samber/lo/tuples.go @@ -0,0 +1,513 @@ +package lo + +// T2 creates a tuple from a list of values. +// Play: https://go.dev/play/p/IllL3ZO4BQm +func T2[A any, B any](a A, b B) Tuple2[A, B] { + return Tuple2[A, B]{A: a, B: b} +} + +// T3 creates a tuple from a list of values. +// Play: https://go.dev/play/p/IllL3ZO4BQm +func T3[A any, B any, C any](a A, b B, c C) Tuple3[A, B, C] { + return Tuple3[A, B, C]{A: a, B: b, C: c} +} + +// T4 creates a tuple from a list of values. +// Play: https://go.dev/play/p/IllL3ZO4BQm +func T4[A any, B any, C any, D any](a A, b B, c C, d D) Tuple4[A, B, C, D] { + return Tuple4[A, B, C, D]{A: a, B: b, C: c, D: d} +} + +// T5 creates a tuple from a list of values. +// Play: https://go.dev/play/p/IllL3ZO4BQm +func T5[A any, B any, C any, D any, E any](a A, b B, c C, d D, e E) Tuple5[A, B, C, D, E] { + return Tuple5[A, B, C, D, E]{A: a, B: b, C: c, D: d, E: e} +} + +// T6 creates a tuple from a list of values. +// Play: https://go.dev/play/p/IllL3ZO4BQm +func T6[A any, B any, C any, D any, E any, F any](a A, b B, c C, d D, e E, f F) Tuple6[A, B, C, D, E, F] { + return Tuple6[A, B, C, D, E, F]{A: a, B: b, C: c, D: d, E: e, F: f} +} + +// T7 creates a tuple from a list of values. +// Play: https://go.dev/play/p/IllL3ZO4BQm +func T7[A any, B any, C any, D any, E any, F any, G any](a A, b B, c C, d D, e E, f F, g G) Tuple7[A, B, C, D, E, F, G] { + return Tuple7[A, B, C, D, E, F, G]{A: a, B: b, C: c, D: d, E: e, F: f, G: g} +} + +// T8 creates a tuple from a list of values. +// Play: https://go.dev/play/p/IllL3ZO4BQm +func T8[A any, B any, C any, D any, E any, F any, G any, H any](a A, b B, c C, d D, e E, f F, g G, h H) Tuple8[A, B, C, D, E, F, G, H] { + return Tuple8[A, B, C, D, E, F, G, H]{A: a, B: b, C: c, D: d, E: e, F: f, G: g, H: h} +} + +// T9 creates a tuple from a list of values. +// Play: https://go.dev/play/p/IllL3ZO4BQm +func T9[A any, B any, C any, D any, E any, F any, G any, H any, I any](a A, b B, c C, d D, e E, f F, g G, h H, i I) Tuple9[A, B, C, D, E, F, G, H, I] { + return Tuple9[A, B, C, D, E, F, G, H, I]{A: a, B: b, C: c, D: d, E: e, F: f, G: g, H: h, I: i} +} + +// Unpack2 returns values contained in tuple. +// Play: https://go.dev/play/p/xVP_k0kJ96W +func Unpack2[A any, B any](tuple Tuple2[A, B]) (A, B) { + return tuple.A, tuple.B +} + +// Unpack3 returns values contained in tuple. +// Play: https://go.dev/play/p/xVP_k0kJ96W +func Unpack3[A any, B any, C any](tuple Tuple3[A, B, C]) (A, B, C) { + return tuple.A, tuple.B, tuple.C +} + +// Unpack4 returns values contained in tuple. +// Play: https://go.dev/play/p/xVP_k0kJ96W +func Unpack4[A any, B any, C any, D any](tuple Tuple4[A, B, C, D]) (A, B, C, D) { + return tuple.A, tuple.B, tuple.C, tuple.D +} + +// Unpack5 returns values contained in tuple. +// Play: https://go.dev/play/p/xVP_k0kJ96W +func Unpack5[A any, B any, C any, D any, E any](tuple Tuple5[A, B, C, D, E]) (A, B, C, D, E) { + return tuple.A, tuple.B, tuple.C, tuple.D, tuple.E +} + +// Unpack6 returns values contained in tuple. +// Play: https://go.dev/play/p/xVP_k0kJ96W +func Unpack6[A any, B any, C any, D any, E any, F any](tuple Tuple6[A, B, C, D, E, F]) (A, B, C, D, E, F) { + return tuple.A, tuple.B, tuple.C, tuple.D, tuple.E, tuple.F +} + +// Unpack7 returns values contained in tuple. +// Play: https://go.dev/play/p/xVP_k0kJ96W +func Unpack7[A any, B any, C any, D any, E any, F any, G any](tuple Tuple7[A, B, C, D, E, F, G]) (A, B, C, D, E, F, G) { + return tuple.A, tuple.B, tuple.C, tuple.D, tuple.E, tuple.F, tuple.G +} + +// Unpack8 returns values contained in tuple. +// Play: https://go.dev/play/p/xVP_k0kJ96W +func Unpack8[A any, B any, C any, D any, E any, F any, G any, H any](tuple Tuple8[A, B, C, D, E, F, G, H]) (A, B, C, D, E, F, G, H) { + return tuple.A, tuple.B, tuple.C, tuple.D, tuple.E, tuple.F, tuple.G, tuple.H +} + +// Unpack9 returns values contained in tuple. +// Play: https://go.dev/play/p/xVP_k0kJ96W +func Unpack9[A any, B any, C any, D any, E any, F any, G any, H any, I any](tuple Tuple9[A, B, C, D, E, F, G, H, I]) (A, B, C, D, E, F, G, H, I) { + return tuple.A, tuple.B, tuple.C, tuple.D, tuple.E, tuple.F, tuple.G, tuple.H, tuple.I +} + +// Zip2 creates a slice of grouped elements, the first of which contains the first elements +// of the given arrays, the second of which contains the second elements of the given arrays, and so on. +// When collections have different size, the Tuple attributes are filled with zero value. +// Play: https://go.dev/play/p/jujaA6GaJTp +func Zip2[A any, B any](a []A, b []B) []Tuple2[A, B] { + size := Max([]int{len(a), len(b)}) + + result := make([]Tuple2[A, B], 0, size) + + for index := 0; index < size; index++ { + _a, _ := Nth(a, index) + _b, _ := Nth(b, index) + + result = append(result, Tuple2[A, B]{ + A: _a, + B: _b, + }) + } + + return result +} + +// Zip3 creates a slice of grouped elements, the first of which contains the first elements +// of the given arrays, the second of which contains the second elements of the given arrays, and so on. +// When collections have different size, the Tuple attributes are filled with zero value. +// Play: https://go.dev/play/p/jujaA6GaJTp +func Zip3[A any, B any, C any](a []A, b []B, c []C) []Tuple3[A, B, C] { + size := Max([]int{len(a), len(b), len(c)}) + + result := make([]Tuple3[A, B, C], 0, size) + + for index := 0; index < size; index++ { + _a, _ := Nth(a, index) + _b, _ := Nth(b, index) + _c, _ := Nth(c, index) + + result = append(result, Tuple3[A, B, C]{ + A: _a, + B: _b, + C: _c, + }) + } + + return result +} + +// Zip4 creates a slice of grouped elements, the first of which contains the first elements +// of the given arrays, the second of which contains the second elements of the given arrays, and so on. +// When collections have different size, the Tuple attributes are filled with zero value. +// Play: https://go.dev/play/p/jujaA6GaJTp +func Zip4[A any, B any, C any, D any](a []A, b []B, c []C, d []D) []Tuple4[A, B, C, D] { + size := Max([]int{len(a), len(b), len(c), len(d)}) + + result := make([]Tuple4[A, B, C, D], 0, size) + + for index := 0; index < size; index++ { + _a, _ := Nth(a, index) + _b, _ := Nth(b, index) + _c, _ := Nth(c, index) + _d, _ := Nth(d, index) + + result = append(result, Tuple4[A, B, C, D]{ + A: _a, + B: _b, + C: _c, + D: _d, + }) + } + + return result +} + +// Zip5 creates a slice of grouped elements, the first of which contains the first elements +// of the given arrays, the second of which contains the second elements of the given arrays, and so on. +// When collections have different size, the Tuple attributes are filled with zero value. +// Play: https://go.dev/play/p/jujaA6GaJTp +func Zip5[A any, B any, C any, D any, E any](a []A, b []B, c []C, d []D, e []E) []Tuple5[A, B, C, D, E] { + size := Max([]int{len(a), len(b), len(c), len(d), len(e)}) + + result := make([]Tuple5[A, B, C, D, E], 0, size) + + for index := 0; index < size; index++ { + _a, _ := Nth(a, index) + _b, _ := Nth(b, index) + _c, _ := Nth(c, index) + _d, _ := Nth(d, index) + _e, _ := Nth(e, index) + + result = append(result, Tuple5[A, B, C, D, E]{ + A: _a, + B: _b, + C: _c, + D: _d, + E: _e, + }) + } + + return result +} + +// Zip6 creates a slice of grouped elements, the first of which contains the first elements +// of the given arrays, the second of which contains the second elements of the given arrays, and so on. +// When collections have different size, the Tuple attributes are filled with zero value. +// Play: https://go.dev/play/p/jujaA6GaJTp +func Zip6[A any, B any, C any, D any, E any, F any](a []A, b []B, c []C, d []D, e []E, f []F) []Tuple6[A, B, C, D, E, F] { + size := Max([]int{len(a), len(b), len(c), len(d), len(e), len(f)}) + + result := make([]Tuple6[A, B, C, D, E, F], 0, size) + + for index := 0; index < size; index++ { + _a, _ := Nth(a, index) + _b, _ := Nth(b, index) + _c, _ := Nth(c, index) + _d, _ := Nth(d, index) + _e, _ := Nth(e, index) + _f, _ := Nth(f, index) + + result = append(result, Tuple6[A, B, C, D, E, F]{ + A: _a, + B: _b, + C: _c, + D: _d, + E: _e, + F: _f, + }) + } + + return result +} + +// Zip7 creates a slice of grouped elements, the first of which contains the first elements +// of the given arrays, the second of which contains the second elements of the given arrays, and so on. +// When collections have different size, the Tuple attributes are filled with zero value. +// Play: https://go.dev/play/p/jujaA6GaJTp +func Zip7[A any, B any, C any, D any, E any, F any, G any](a []A, b []B, c []C, d []D, e []E, f []F, g []G) []Tuple7[A, B, C, D, E, F, G] { + size := Max([]int{len(a), len(b), len(c), len(d), len(e), len(f), len(g)}) + + result := make([]Tuple7[A, B, C, D, E, F, G], 0, size) + + for index := 0; index < size; index++ { + _a, _ := Nth(a, index) + _b, _ := Nth(b, index) + _c, _ := Nth(c, index) + _d, _ := Nth(d, index) + _e, _ := Nth(e, index) + _f, _ := Nth(f, index) + _g, _ := Nth(g, index) + + result = append(result, Tuple7[A, B, C, D, E, F, G]{ + A: _a, + B: _b, + C: _c, + D: _d, + E: _e, + F: _f, + G: _g, + }) + } + + return result +} + +// Zip8 creates a slice of grouped elements, the first of which contains the first elements +// of the given arrays, the second of which contains the second elements of the given arrays, and so on. +// When collections have different size, the Tuple attributes are filled with zero value. +// Play: https://go.dev/play/p/jujaA6GaJTp +func Zip8[A any, B any, C any, D any, E any, F any, G any, H any](a []A, b []B, c []C, d []D, e []E, f []F, g []G, h []H) []Tuple8[A, B, C, D, E, F, G, H] { + size := Max([]int{len(a), len(b), len(c), len(d), len(e), len(f), len(g), len(h)}) + + result := make([]Tuple8[A, B, C, D, E, F, G, H], 0, size) + + for index := 0; index < size; index++ { + _a, _ := Nth(a, index) + _b, _ := Nth(b, index) + _c, _ := Nth(c, index) + _d, _ := Nth(d, index) + _e, _ := Nth(e, index) + _f, _ := Nth(f, index) + _g, _ := Nth(g, index) + _h, _ := Nth(h, index) + + result = append(result, Tuple8[A, B, C, D, E, F, G, H]{ + A: _a, + B: _b, + C: _c, + D: _d, + E: _e, + F: _f, + G: _g, + H: _h, + }) + } + + return result +} + +// Zip9 creates a slice of grouped elements, the first of which contains the first elements +// of the given arrays, the second of which contains the second elements of the given arrays, and so on. +// When collections have different size, the Tuple attributes are filled with zero value. +// Play: https://go.dev/play/p/jujaA6GaJTp +func Zip9[A any, B any, C any, D any, E any, F any, G any, H any, I any](a []A, b []B, c []C, d []D, e []E, f []F, g []G, h []H, i []I) []Tuple9[A, B, C, D, E, F, G, H, I] { + size := Max([]int{len(a), len(b), len(c), len(d), len(e), len(f), len(g), len(h), len(i)}) + + result := make([]Tuple9[A, B, C, D, E, F, G, H, I], 0, size) + + for index := 0; index < size; index++ { + _a, _ := Nth(a, index) + _b, _ := Nth(b, index) + _c, _ := Nth(c, index) + _d, _ := Nth(d, index) + _e, _ := Nth(e, index) + _f, _ := Nth(f, index) + _g, _ := Nth(g, index) + _h, _ := Nth(h, index) + _i, _ := Nth(i, index) + + result = append(result, Tuple9[A, B, C, D, E, F, G, H, I]{ + A: _a, + B: _b, + C: _c, + D: _d, + E: _e, + F: _f, + G: _g, + H: _h, + I: _i, + }) + } + + return result +} + +// Unzip2 accepts an array of grouped elements and creates an array regrouping the elements +// to their pre-zip configuration. +// Play: https://go.dev/play/p/ciHugugvaAW +func Unzip2[A any, B any](tuples []Tuple2[A, B]) ([]A, []B) { + size := len(tuples) + r1 := make([]A, 0, size) + r2 := make([]B, 0, size) + + for _, tuple := range tuples { + r1 = append(r1, tuple.A) + r2 = append(r2, tuple.B) + } + + return r1, r2 +} + +// Unzip3 accepts an array of grouped elements and creates an array regrouping the elements +// to their pre-zip configuration. +// Play: https://go.dev/play/p/ciHugugvaAW +func Unzip3[A any, B any, C any](tuples []Tuple3[A, B, C]) ([]A, []B, []C) { + size := len(tuples) + r1 := make([]A, 0, size) + r2 := make([]B, 0, size) + r3 := make([]C, 0, size) + + for _, tuple := range tuples { + r1 = append(r1, tuple.A) + r2 = append(r2, tuple.B) + r3 = append(r3, tuple.C) + } + + return r1, r2, r3 +} + +// Unzip4 accepts an array of grouped elements and creates an array regrouping the elements +// to their pre-zip configuration. +// Play: https://go.dev/play/p/ciHugugvaAW +func Unzip4[A any, B any, C any, D any](tuples []Tuple4[A, B, C, D]) ([]A, []B, []C, []D) { + size := len(tuples) + r1 := make([]A, 0, size) + r2 := make([]B, 0, size) + r3 := make([]C, 0, size) + r4 := make([]D, 0, size) + + for _, tuple := range tuples { + r1 = append(r1, tuple.A) + r2 = append(r2, tuple.B) + r3 = append(r3, tuple.C) + r4 = append(r4, tuple.D) + } + + return r1, r2, r3, r4 +} + +// Unzip5 accepts an array of grouped elements and creates an array regrouping the elements +// to their pre-zip configuration. +// Play: https://go.dev/play/p/ciHugugvaAW +func Unzip5[A any, B any, C any, D any, E any](tuples []Tuple5[A, B, C, D, E]) ([]A, []B, []C, []D, []E) { + size := len(tuples) + r1 := make([]A, 0, size) + r2 := make([]B, 0, size) + r3 := make([]C, 0, size) + r4 := make([]D, 0, size) + r5 := make([]E, 0, size) + + for _, tuple := range tuples { + r1 = append(r1, tuple.A) + r2 = append(r2, tuple.B) + r3 = append(r3, tuple.C) + r4 = append(r4, tuple.D) + r5 = append(r5, tuple.E) + } + + return r1, r2, r3, r4, r5 +} + +// Unzip6 accepts an array of grouped elements and creates an array regrouping the elements +// to their pre-zip configuration. +// Play: https://go.dev/play/p/ciHugugvaAW +func Unzip6[A any, B any, C any, D any, E any, F any](tuples []Tuple6[A, B, C, D, E, F]) ([]A, []B, []C, []D, []E, []F) { + size := len(tuples) + r1 := make([]A, 0, size) + r2 := make([]B, 0, size) + r3 := make([]C, 0, size) + r4 := make([]D, 0, size) + r5 := make([]E, 0, size) + r6 := make([]F, 0, size) + + for _, tuple := range tuples { + r1 = append(r1, tuple.A) + r2 = append(r2, tuple.B) + r3 = append(r3, tuple.C) + r4 = append(r4, tuple.D) + r5 = append(r5, tuple.E) + r6 = append(r6, tuple.F) + } + + return r1, r2, r3, r4, r5, r6 +} + +// Unzip7 accepts an array of grouped elements and creates an array regrouping the elements +// to their pre-zip configuration. +// Play: https://go.dev/play/p/ciHugugvaAW +func Unzip7[A any, B any, C any, D any, E any, F any, G any](tuples []Tuple7[A, B, C, D, E, F, G]) ([]A, []B, []C, []D, []E, []F, []G) { + size := len(tuples) + r1 := make([]A, 0, size) + r2 := make([]B, 0, size) + r3 := make([]C, 0, size) + r4 := make([]D, 0, size) + r5 := make([]E, 0, size) + r6 := make([]F, 0, size) + r7 := make([]G, 0, size) + + for _, tuple := range tuples { + r1 = append(r1, tuple.A) + r2 = append(r2, tuple.B) + r3 = append(r3, tuple.C) + r4 = append(r4, tuple.D) + r5 = append(r5, tuple.E) + r6 = append(r6, tuple.F) + r7 = append(r7, tuple.G) + } + + return r1, r2, r3, r4, r5, r6, r7 +} + +// Unzip8 accepts an array of grouped elements and creates an array regrouping the elements +// to their pre-zip configuration. +// Play: https://go.dev/play/p/ciHugugvaAW +func Unzip8[A any, B any, C any, D any, E any, F any, G any, H any](tuples []Tuple8[A, B, C, D, E, F, G, H]) ([]A, []B, []C, []D, []E, []F, []G, []H) { + size := len(tuples) + r1 := make([]A, 0, size) + r2 := make([]B, 0, size) + r3 := make([]C, 0, size) + r4 := make([]D, 0, size) + r5 := make([]E, 0, size) + r6 := make([]F, 0, size) + r7 := make([]G, 0, size) + r8 := make([]H, 0, size) + + for _, tuple := range tuples { + r1 = append(r1, tuple.A) + r2 = append(r2, tuple.B) + r3 = append(r3, tuple.C) + r4 = append(r4, tuple.D) + r5 = append(r5, tuple.E) + r6 = append(r6, tuple.F) + r7 = append(r7, tuple.G) + r8 = append(r8, tuple.H) + } + + return r1, r2, r3, r4, r5, r6, r7, r8 +} + +// Unzip9 accepts an array of grouped elements and creates an array regrouping the elements +// to their pre-zip configuration. +// Play: https://go.dev/play/p/ciHugugvaAW +func Unzip9[A any, B any, C any, D any, E any, F any, G any, H any, I any](tuples []Tuple9[A, B, C, D, E, F, G, H, I]) ([]A, []B, []C, []D, []E, []F, []G, []H, []I) { + size := len(tuples) + r1 := make([]A, 0, size) + r2 := make([]B, 0, size) + r3 := make([]C, 0, size) + r4 := make([]D, 0, size) + r5 := make([]E, 0, size) + r6 := make([]F, 0, size) + r7 := make([]G, 0, size) + r8 := make([]H, 0, size) + r9 := make([]I, 0, size) + + for _, tuple := range tuples { + r1 = append(r1, tuple.A) + r2 = append(r2, tuple.B) + r3 = append(r3, tuple.C) + r4 = append(r4, tuple.D) + r5 = append(r5, tuple.E) + r6 = append(r6, tuple.F) + r7 = append(r7, tuple.G) + r8 = append(r8, tuple.H) + r9 = append(r9, tuple.I) + } + + return r1, r2, r3, r4, r5, r6, r7, r8, r9 +} diff --git a/vendor/github.com/samber/lo/type_manipulation.go b/vendor/github.com/samber/lo/type_manipulation.go new file mode 100644 index 0000000000..45d8fe2037 --- /dev/null +++ b/vendor/github.com/samber/lo/type_manipulation.go @@ -0,0 +1,102 @@ +package lo + +import "reflect" + +// ToPtr returns a pointer copy of value. +func ToPtr[T any](x T) *T { + return &x +} + +// EmptyableToPtr returns a pointer copy of value if it's nonzero. +// Otherwise, returns nil pointer. +func EmptyableToPtr[T any](x T) *T { + // 🤮 + isZero := reflect.ValueOf(&x).Elem().IsZero() + if isZero { + return nil + } + + return &x +} + +// FromPtr returns the pointer value or empty. +func FromPtr[T any](x *T) T { + if x == nil { + return Empty[T]() + } + + return *x +} + +// FromPtrOr returns the pointer value or the fallback value. +func FromPtrOr[T any](x *T, fallback T) T { + if x == nil { + return fallback + } + + return *x +} + +// ToSlicePtr returns a slice of pointer copy of value. +func ToSlicePtr[T any](collection []T) []*T { + return Map(collection, func(x T, _ int) *T { + return &x + }) +} + +// ToAnySlice returns a slice with all elements mapped to `any` type +func ToAnySlice[T any](collection []T) []any { + result := make([]any, len(collection)) + for i, item := range collection { + result[i] = item + } + return result +} + +// FromAnySlice returns an `any` slice with all elements mapped to a type. +// Returns false in case of type conversion failure. +func FromAnySlice[T any](in []any) (out []T, ok bool) { + defer func() { + if r := recover(); r != nil { + out = []T{} + ok = false + } + }() + + result := make([]T, len(in)) + for i, item := range in { + result[i] = item.(T) + } + return result, true +} + +// Empty returns an empty value. +func Empty[T any]() T { + var zero T + return zero +} + +// IsEmpty returns true if argument is a zero value. +func IsEmpty[T comparable](v T) bool { + var zero T + return zero == v +} + +// IsNotEmpty returns true if argument is not a zero value. +func IsNotEmpty[T comparable](v T) bool { + var zero T + return zero != v +} + +// Coalesce returns the first non-empty arguments. Arguments must be comparable. +func Coalesce[T comparable](v ...T) (result T, ok bool) { + for _, e := range v { + if e != result { + result = e + ok = true + return + } + } + + return +} diff --git a/vendor/github.com/samber/lo/types.go b/vendor/github.com/samber/lo/types.go new file mode 100644 index 0000000000..271c5b4fdf --- /dev/null +++ b/vendor/github.com/samber/lo/types.go @@ -0,0 +1,123 @@ +package lo + +// Entry defines a key/value pairs. +type Entry[K comparable, V any] struct { + Key K + Value V +} + +// Tuple2 is a group of 2 elements (pair). +type Tuple2[A any, B any] struct { + A A + B B +} + +// Unpack returns values contained in tuple. +func (t Tuple2[A, B]) Unpack() (A, B) { + return t.A, t.B +} + +// Tuple3 is a group of 3 elements. +type Tuple3[A any, B any, C any] struct { + A A + B B + C C +} + +// Unpack returns values contained in tuple. +func (t Tuple3[A, B, C]) Unpack() (A, B, C) { + return t.A, t.B, t.C +} + +// Tuple4 is a group of 4 elements. +type Tuple4[A any, B any, C any, D any] struct { + A A + B B + C C + D D +} + +// Unpack returns values contained in tuple. +func (t Tuple4[A, B, C, D]) Unpack() (A, B, C, D) { + return t.A, t.B, t.C, t.D +} + +// Tuple5 is a group of 5 elements. +type Tuple5[A any, B any, C any, D any, E any] struct { + A A + B B + C C + D D + E E +} + +// Unpack returns values contained in tuple. +func (t Tuple5[A, B, C, D, E]) Unpack() (A, B, C, D, E) { + return t.A, t.B, t.C, t.D, t.E +} + +// Tuple6 is a group of 6 elements. +type Tuple6[A any, B any, C any, D any, E any, F any] struct { + A A + B B + C C + D D + E E + F F +} + +// Unpack returns values contained in tuple. +func (t Tuple6[A, B, C, D, E, F]) Unpack() (A, B, C, D, E, F) { + return t.A, t.B, t.C, t.D, t.E, t.F +} + +// Tuple7 is a group of 7 elements. +type Tuple7[A any, B any, C any, D any, E any, F any, G any] struct { + A A + B B + C C + D D + E E + F F + G G +} + +// Unpack returns values contained in tuple. +func (t Tuple7[A, B, C, D, E, F, G]) Unpack() (A, B, C, D, E, F, G) { + return t.A, t.B, t.C, t.D, t.E, t.F, t.G +} + +// Tuple8 is a group of 8 elements. +type Tuple8[A any, B any, C any, D any, E any, F any, G any, H any] struct { + A A + B B + C C + D D + E E + F F + G G + H H +} + +// Unpack returns values contained in tuple. +func (t Tuple8[A, B, C, D, E, F, G, H]) Unpack() (A, B, C, D, E, F, G, H) { + return t.A, t.B, t.C, t.D, t.E, t.F, t.G, t.H +} + +// Tuple9 is a group of 9 elements. +type Tuple9[A any, B any, C any, D any, E any, F any, G any, H any, I any] struct { + A A + B B + C C + D D + E E + F F + G G + H H + I I +} + +// Unpack returns values contained in tuple. +func (t Tuple9[A, B, C, D, E, F, G, H, I]) Unpack() (A, B, C, D, E, F, G, H, I) { + return t.A, t.B, t.C, t.D, t.E, t.F, t.G, t.H, t.I +} diff --git a/vendor/modules.txt b/vendor/modules.txt index 5b4d496363..9d38d44312 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -870,6 +870,9 @@ github.com/robfig/cron/v3 # github.com/russross/blackfriday v1.5.2 ## explicit github.com/russross/blackfriday +# github.com/samber/lo v1.38.1 +## explicit; go 1.18 +github.com/samber/lo # github.com/satori/go.uuid v1.2.0 ## explicit github.com/satori/go.uuid From 15b3ba130081a43ffa2611fb7a623de3fa998680 Mon Sep 17 00:00:00 2001 From: ayushmaheshwari Date: Mon, 23 Oct 2023 11:15:44 +0530 Subject: [PATCH 046/143] migration script fix --- ...77_custom_image_tag.down.sql => 182_custom_image_tag.down.sql} | 0 .../{177_custom_image_tag.up.sql => 182_custom_image_tag.up.sql} | 0 2 files changed, 0 insertions(+), 0 deletions(-) rename scripts/sql/{177_custom_image_tag.down.sql => 182_custom_image_tag.down.sql} (100%) rename scripts/sql/{177_custom_image_tag.up.sql => 182_custom_image_tag.up.sql} (100%) diff --git a/scripts/sql/177_custom_image_tag.down.sql b/scripts/sql/182_custom_image_tag.down.sql similarity index 100% rename from scripts/sql/177_custom_image_tag.down.sql rename to scripts/sql/182_custom_image_tag.down.sql diff --git a/scripts/sql/177_custom_image_tag.up.sql b/scripts/sql/182_custom_image_tag.up.sql similarity index 100% rename from scripts/sql/177_custom_image_tag.up.sql rename to scripts/sql/182_custom_image_tag.up.sql From 2ff5cf8965ca8e7bfc724c583ca44cb73b81b8a0 Mon Sep 17 00:00:00 2001 From: ayushmaheshwari Date: Mon, 23 Oct 2023 12:22:21 +0530 Subject: [PATCH 047/143] fix get api --- pkg/pipeline/BuildPipelineConfigService.go | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/pkg/pipeline/BuildPipelineConfigService.go b/pkg/pipeline/BuildPipelineConfigService.go index b2c968a4d6..950d67a72a 100644 --- a/pkg/pipeline/BuildPipelineConfigService.go +++ b/pkg/pipeline/BuildPipelineConfigService.go @@ -758,6 +758,16 @@ func (impl *CiPipelineConfigServiceImpl) GetCiPipelineById(pipelineId int) (ciPi IsDockerConfigOverridden: pipeline.IsDockerConfigOverridden, PipelineType: bean.PipelineType(pipeline.PipelineType), } + customTag, err := impl.customTagService.GetActiveCustomTagByEntityKeyAndValue(pkg.EntityTypeCiPipelineId, strconv.Itoa(pipeline.Id)) + if err != nil && err != pg.ErrNoRows { + return nil, err + } + if customTag.Id != 0 { + ciPipeline.CustomTagObject = &bean.CustomTagData{ + TagPattern: customTag.TagPattern, + CounterX: customTag.AutoIncreasingNumber, + } + } ciEnvMapping, err := impl.ciPipelineRepository.FindCiEnvMappingByCiPipelineId(pipelineId) if err != nil && err != pg.ErrNoRows { impl.logger.Errorw("error in fetching ci env mapping", "pipelineId", pipelineId, "err", err) From badac184e0a06fe2cb9eefbcba2d38b7adbc0307 Mon Sep 17 00:00:00 2001 From: ayushmaheshwari Date: Mon, 23 Oct 2023 16:55:43 +0530 Subject: [PATCH 048/143] tag validation updated --- pkg/CustomTagService.go | 42 +++++++++++++++++++---------------------- 1 file changed, 19 insertions(+), 23 deletions(-) diff --git a/pkg/CustomTagService.go b/pkg/CustomTagService.go index 7fe7f2c962..8b4c58f4f6 100644 --- a/pkg/CustomTagService.go +++ b/pkg/CustomTagService.go @@ -21,6 +21,7 @@ const ( ImageTagUnavailableMessage = "Desired image tag already exists" REGEX_PATTERN_FOR_ENSURING_ONLY_ONE_VARIABLE_BETWEEN_BRACKETS = `\{.{2,}\}` REGEX_PATTERN_FOR_CHARACTER_OTHER_THEN_X_OR_x = `\{[^xX]|{}\}` + REGEX_PATTERN_FOR_IMAGE_TAG = `^[a-zA-Z0-9][a-zA-Z0-9._-]{0,126}[a-zA-Z0-9]$` ) var ( @@ -148,38 +149,33 @@ func validateTagPattern(customTagPattern string) error { return fmt.Errorf("tag length can not be zero") } - if IsInvalidVariableFormat(customTagPattern) { - return fmt.Errorf("only one variable is allowed. Allowed variable format : {x} or {X}") + count := 0 + count = count + strings.Count(customTagPattern, ".{x}") + count = count + strings.Count(customTagPattern, ".{X}") + + if count == 0 { + return fmt.Errorf("variable with format {x} or {X} not found") + } else if count > 1 { + return fmt.Errorf("only one variable with format {x} or {X} found") } - remainingString := strings.ReplaceAll(customTagPattern, ".{x}", "") - remainingString = strings.ReplaceAll(remainingString, ".{X}", "") - if len(remainingString) == 0 { + tagWithoutVariable := strings.ReplaceAll(customTagPattern, ".{x}", "") + tagWithoutVariable = strings.ReplaceAll(tagWithoutVariable, ".{X}", "") + if len(tagWithoutVariable) == 0 { return nil } - n := len(remainingString) - if remainingString[0] == '.' || remainingString[0] == '-' { - return fmt.Errorf("tag can not start with an hyphen or a period") - } - if n != 0 && (remainingString[n-1] == '.' || remainingString[n-1] == '-') { - return fmt.Errorf("tag can not end with an hyphen or a period") + if isValidDockerImageTag(tagWithoutVariable) { + return fmt.Errorf("not a valid image tag") } + return nil } -func IsInvalidVariableFormat(customTagPattern string) bool { - regex := regexp.MustCompile(REGEX_PATTERN_FOR_ENSURING_ONLY_ONE_VARIABLE_BETWEEN_BRACKETS) - matches := regex.FindAllString(customTagPattern, -1) - if len(matches) > 0 { - return true - } - regex = regexp.MustCompile(REGEX_PATTERN_FOR_CHARACTER_OTHER_THEN_X_OR_x) - matches = regex.FindAllString(customTagPattern, -1) - if len(matches) > 0 { - return true - } - return false +func isValidDockerImageTag(tag string) bool { + // Define the regular expression for a valid Docker image tag + re := regexp.MustCompile(REGEX_PATTERN_FOR_IMAGE_TAG) + return re.MatchString(tag) } func validateTag(imageTag string) error { From 849ebfed4a5a15f6c8d85e897f67d97e0e19d28f Mon Sep 17 00:00:00 2001 From: ayushmaheshwari Date: Mon, 23 Oct 2023 17:03:08 +0530 Subject: [PATCH 049/143] wip: validaion using regex --- pkg/CustomTagService.go | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/pkg/CustomTagService.go b/pkg/CustomTagService.go index 8b4c58f4f6..66103e1483 100644 --- a/pkg/CustomTagService.go +++ b/pkg/CustomTagService.go @@ -137,9 +137,8 @@ func validateAndConstructTag(customTagData *repository.CustomTag) (string, error return "", fmt.Errorf("counter {x} can not be negative") } dockerImageTag := strings.ReplaceAll(customTagData.TagPattern, "{x}", strconv.Itoa(customTagData.AutoIncreasingNumber-1)) //-1 because number is already incremented, current value will be used next time - err = validateTag(dockerImageTag) - if err != nil { - return "", err + if isValidDockerImageTag(dockerImageTag) { + return dockerImageTag, fmt.Errorf("invalid docker tag") } return dockerImageTag, nil } From 87420ff77d0e12830a1a18ee2458512dffea9858 Mon Sep 17 00:00:00 2001 From: ayushmaheshwari Date: Mon, 23 Oct 2023 18:40:39 +0530 Subject: [PATCH 050/143] wip --- Wire.go | 3 + .../sql/repository/CiArtifactRepository.go | 50 ++++-- .../DeploymentPipelineConfigService.go | 62 +++++++- pkg/pipeline/StageServiceUtil.go | 11 +- pkg/pipeline/WorkflowDagExecutor.go | 122 ++++++++++----- pkg/pipeline/pipelineStageVariableParser.go | 147 ++++++++++++++++++ pkg/plugin/GlobalPluginService.go | 39 +++-- pkg/plugin/bean.go | 2 + scripts/sql/181_ci_artifact_refactor.down.sql | 0 scripts/sql/181_ci_artifact_refactor.up.sql | 3 + scripts/sql/182_skopeo_plugin.down.sql | 0 scripts/sql/182_skopeo_plugin.up.sql | 33 ++++ wire_gen.go | 11 +- 13 files changed, 400 insertions(+), 83 deletions(-) create mode 100644 pkg/pipeline/pipelineStageVariableParser.go create mode 100644 scripts/sql/181_ci_artifact_refactor.down.sql create mode 100644 scripts/sql/181_ci_artifact_refactor.up.sql create mode 100644 scripts/sql/182_skopeo_plugin.down.sql create mode 100644 scripts/sql/182_skopeo_plugin.up.sql diff --git a/Wire.go b/Wire.go index 8c3be0c450..daa6421273 100644 --- a/Wire.go +++ b/Wire.go @@ -950,6 +950,9 @@ func InitializeApp() (*App, error) { devtronResource.NewDevtronResourceSearchableKeyServiceImpl, wire.Bind(new(devtronResource.DevtronResourceService), new(*devtronResource.DevtronResourceSearchableKeyServiceImpl)), + + pipeline.NewPluginInputVariableParserImpl, + wire.Bind(new(pipeline.PluginInputVariableParser), new(*pipeline.PluginInputVariableParserImpl)), ) return &App{}, nil } diff --git a/internal/sql/repository/CiArtifactRepository.go b/internal/sql/repository/CiArtifactRepository.go index 7c82b6248b..6e4057c18e 100644 --- a/internal/sql/repository/CiArtifactRepository.go +++ b/internal/sql/repository/CiArtifactRepository.go @@ -31,24 +31,40 @@ import ( "go.uber.org/zap" ) +type credentialsSource = string +type artifactsSourceType = string + +const ( + GLOBAL_CONTAINER_REGISTRY credentialsSource = "global_container_registry" +) +const ( + CI_RUNNER artifactsSourceType = "ci_runner" + WEBHOOK artifactsSourceType = "ext" + PRE_CD artifactsSourceType = "pre_cd" + POST_CD artifactsSourceType = "post_cd" +) + type CiArtifact struct { - tableName struct{} `sql:"ci_artifact" pg:",discard_unknown_columns"` - Id int `sql:"id,pk"` - PipelineId int `sql:"pipeline_id"` //id of the ci pipeline from which this webhook was triggered - Image string `sql:"image,notnull"` - ImageDigest string `sql:"image_digest,notnull"` - MaterialInfo string `sql:"material_info"` //git material metadata json array string - DataSource string `sql:"data_source,notnull"` - WorkflowId *int `sql:"ci_workflow_id"` - ParentCiArtifact int `sql:"parent_ci_artifact"` - ScanEnabled bool `sql:"scan_enabled,notnull"` - Scanned bool `sql:"scanned,notnull"` - ExternalCiPipelineId int `sql:"external_ci_pipeline_id"` - IsArtifactUploaded bool `sql:"is_artifact_uploaded"` - DeployedTime time.Time `sql:"-"` - Deployed bool `sql:"-"` - Latest bool `sql:"-"` - RunningOnParent bool `sql:"-"` + tableName struct{} `sql:"ci_artifact" pg:",discard_unknown_columns"` + Id int `sql:"id,pk"` + PipelineId int `sql:"pipeline_id"` //id of the ci pipeline from which this webhook was triggered + Image string `sql:"image,notnull"` + ImageDigest string `sql:"image_digest,notnull"` + MaterialInfo string `sql:"material_info"` //git material metadata json array string + DataSource string `sql:"data_source,notnull"` + WorkflowId *int `sql:"ci_workflow_id"` + ParentCiArtifact int `sql:"parent_ci_artifact"` + ScanEnabled bool `sql:"scan_enabled,notnull"` + Scanned bool `sql:"scanned,notnull"` + ExternalCiPipelineId int `sql:"external_ci_pipeline_id"` + IsArtifactUploaded bool `sql:"is_artifact_uploaded"` + CredentialsSourceType string `sql:"credentials_source_type"` + CredentialSourceValue string `sql:"credentials_source_value"` + ComponentId int `sql:"component_id"` + DeployedTime time.Time `sql:"-"` + Deployed bool `sql:"-"` + Latest bool `sql:"-"` + RunningOnParent bool `sql:"-"` sql.AuditLog } diff --git a/pkg/pipeline/DeploymentPipelineConfigService.go b/pkg/pipeline/DeploymentPipelineConfigService.go index e07c354c6f..44e6d19def 100644 --- a/pkg/pipeline/DeploymentPipelineConfigService.go +++ b/pkg/pipeline/DeploymentPipelineConfigService.go @@ -56,6 +56,7 @@ import ( "go.uber.org/zap" "k8s.io/apimachinery/pkg/api/errors" "net/http" + "strconv" "strings" "time" ) @@ -264,6 +265,7 @@ func (impl *CdPipelineConfigServiceImpl) GetCdPipelineById(pipelineId int) (cdPi return nil, err } } + if dbPipeline.PostStageConfigMapSecretNames != "" { err = json.Unmarshal([]byte(dbPipeline.PostStageConfigMapSecretNames), &postStageConfigmapSecrets) if err != nil { @@ -275,6 +277,31 @@ func (impl *CdPipelineConfigServiceImpl) GetCdPipelineById(pipelineId int) (cdPi if err != nil { return nil, err } + + var customTag *bean.CustomTagData + var customTagStage repository5.PipelineStageType + customTagPreCD, err := impl.customTagService.GetActiveCustomTagByEntityKeyAndValue(pkg.EntityTypePreCD, strconv.Itoa(pipelineId)) + if err != nil && err != pg.ErrNoRows { + impl.logger.Errorw("error in fetching custom Tag precd") + return nil, err + } + customTagPostCD, err := impl.customTagService.GetActiveCustomTagByEntityKeyAndValue(pkg.EntityTypePostCD, strconv.Itoa(pipelineId)) + if err != nil && err != pg.ErrNoRows { + impl.logger.Errorw("error in fetching custom Tag precd") + return nil, err + } + if customTagPreCD != nil && customTagPreCD.Id > 0 { + customTag = &bean.CustomTagData{TagPattern: customTagPreCD.TagPattern, + CounterX: customTagPreCD.AutoIncreasingNumber, + } + customTagStage = repository5.PIPELINE_STAGE_TYPE_PRE_CD + } else if customTagPostCD != nil && customTagPostCD.Id > 0 { + customTag = &bean.CustomTagData{TagPattern: customTagPostCD.TagPattern, + CounterX: customTagPostCD.AutoIncreasingNumber, + } + customTagStage = repository5.PIPELINE_STAGE_TYPE_POST_CD + } + cdPipeline = &bean.CDPipelineConfigObject{ Id: dbPipeline.Id, Name: dbPipeline.Name, @@ -296,6 +323,8 @@ func (impl *CdPipelineConfigServiceImpl) GetCdPipelineById(pipelineId int) (cdPi DeploymentAppType: dbPipeline.DeploymentAppType, DeploymentAppCreated: dbPipeline.DeploymentAppCreated, IsVirtualEnvironment: dbPipeline.Environment.IsVirtualEnvironment, + CustomTagObject: customTag, + CustomTagStage: &customTagStage, } var preDeployStage *bean3.PipelineStageDto var postDeployStage *bean3.PipelineStageDto @@ -442,7 +471,7 @@ func (impl *CdPipelineConfigServiceImpl) DeleteCustomTag(pipeline *bean.CDPipeli func (impl *CdPipelineConfigServiceImpl) DeleteCustomTagByPipelineStageType(pipelineStageType *repository5.PipelineStageType, pipelineId int) error { err := impl.customTagService.DeleteCustomTagIfExists( - bean2.CustomTag{EntityKey: getEntityTypeByPipelineStageType(pipelineStageType), + bean2.CustomTag{EntityKey: getEntityTypeByPipelineStageType(*pipelineStageType), EntityValue: fmt.Sprintf("%d", pipelineId), }) if err != nil { @@ -467,7 +496,7 @@ func (impl *CdPipelineConfigServiceImpl) SaveOrUpdateCustomTagForCDPipeline(pipe } func (impl *CdPipelineConfigServiceImpl) ParseCustomTagPatchRequest(pipelineId int, customTagData *bean.CustomTagData, pipelineStageType *repository5.PipelineStageType) (*bean2.CustomTag, error) { - entityType := getEntityTypeByPipelineStageType(pipelineStageType) + entityType := getEntityTypeByPipelineStageType(*pipelineStageType) if entityType == 0 { return nil, fmt.Errorf("invalid stage for cd pipeline custom tag; pipelineStageType: %s ", string(*pipelineStageType)) } @@ -481,8 +510,8 @@ func (impl *CdPipelineConfigServiceImpl) ParseCustomTagPatchRequest(pipelineId i return customTag, nil } -func getEntityTypeByPipelineStageType(pipelineStageType *repository5.PipelineStageType) (customTagEntityType int) { - switch *pipelineStageType { +func getEntityTypeByPipelineStageType(pipelineStageType repository5.PipelineStageType) (customTagEntityType int) { + switch pipelineStageType { case repository5.PIPELINE_STAGE_TYPE_PRE_CD: customTagEntityType = pkg.EntityTypePreCD case repository5.PIPELINE_STAGE_TYPE_POST_CD: @@ -995,6 +1024,29 @@ func (impl *CdPipelineConfigServiceImpl) GetCdPipelinesByEnvironment(request res } for _, dbPipeline := range authorizedPipelines { + var customTag *bean.CustomTagData + var customTagStage repository5.PipelineStageType + customTagPreCD, err := impl.customTagService.GetActiveCustomTagByEntityKeyAndValue(pkg.EntityTypePreCD, strconv.Itoa(dbPipeline.Id)) + if err != nil && err != pg.ErrNoRows { + impl.logger.Errorw("error in fetching custom Tag precd") + return nil, err + } + customTagPostCD, err := impl.customTagService.GetActiveCustomTagByEntityKeyAndValue(pkg.EntityTypePostCD, strconv.Itoa(dbPipeline.Id)) + if err != nil && err != pg.ErrNoRows { + impl.logger.Errorw("error in fetching custom Tag precd") + return nil, err + } + if customTagPreCD != nil && customTagPreCD.Id > 0 { + customTag = &bean.CustomTagData{TagPattern: customTagPreCD.TagPattern, + CounterX: customTagPreCD.AutoIncreasingNumber, + } + customTagStage = repository5.PIPELINE_STAGE_TYPE_PRE_CD + } else if customTagPostCD != nil && customTagPostCD.Id > 0 { + customTag = &bean.CustomTagData{TagPattern: customTagPostCD.TagPattern, + CounterX: customTagPostCD.AutoIncreasingNumber, + } + customTagStage = repository5.PIPELINE_STAGE_TYPE_POST_CD + } pipeline := &bean.CDPipelineConfigObject{ Id: dbPipeline.Id, Name: dbPipeline.Name, @@ -1017,6 +1069,8 @@ func (impl *CdPipelineConfigServiceImpl) GetCdPipelinesByEnvironment(request res IsVirtualEnvironment: dbPipeline.IsVirtualEnvironment, PreDeployStage: dbPipeline.PreDeployStage, PostDeployStage: dbPipeline.PostDeployStage, + CustomTagObject: customTag, + CustomTagStage: &customTagStage, } pipelines = append(pipelines, pipeline) } diff --git a/pkg/pipeline/StageServiceUtil.go b/pkg/pipeline/StageServiceUtil.go index 0a45a6f10f..44bdfa0b91 100644 --- a/pkg/pipeline/StageServiceUtil.go +++ b/pkg/pipeline/StageServiceUtil.go @@ -6,6 +6,7 @@ import ( bean2 "github.com/devtron-labs/devtron/pkg/bean" "github.com/devtron-labs/devtron/pkg/pipeline/bean" repository2 "github.com/devtron-labs/devtron/pkg/pipeline/repository" + "github.com/devtron-labs/devtron/pkg/plugin" "github.com/devtron-labs/devtron/pkg/plugin/repository" "gopkg.in/yaml.v2" "strings" @@ -28,7 +29,7 @@ type Task struct { RunStatus bool `json:"-,omitempty"` // task run was attempted or not } -var globalInputVariableList = []string{DOCKER_IMAGE, DEPLOYMENT_RELEASE_ID, DEPLOYMENT_UNIQUE_ID, DEVTRON_CD_TRIGGER_TIME, DEVTRON_CD_TRIGGERED_BY, CD_PIPELINE_ENV_NAME_KEY, CD_PIPELINE_CLUSTER_NAME_KEY, APP_NAME} +var globalInputVariableList = []string{plugin.DOCKER_IMAGE, plugin.DEPLOYMENT_RELEASE_ID, plugin.DEPLOYMENT_UNIQUE_ID, plugin.DEVTRON_CD_TRIGGER_TIME, plugin.DEVTRON_CD_TRIGGERED_BY, plugin.CD_PIPELINE_ENV_NAME_KEY, plugin.CD_PIPELINE_CLUSTER_NAME_KEY, plugin.APP_NAME} func ConvertStageYamlScriptsToPipelineStageSteps(cdPipeline *bean2.CDPipelineConfigObject) (*bean2.CDPipelineConfigObject, error) { if cdPipeline.PreDeployStage == nil && len(cdPipeline.PreStage.Config) > 0 { @@ -202,11 +203,11 @@ func constructGlobalInputVariablesUsedInScript(script string) []*bean.StepVariab VariableStepIndexInPlugin: 0, ReferenceVariableStage: "", } - if inputVariable == DEVTRON_CD_TRIGGER_TIME { - stepVariable.ReferenceVariableName = CD_TRIGGER_TIME + if inputVariable == plugin.DEVTRON_CD_TRIGGER_TIME { + stepVariable.ReferenceVariableName = plugin.CD_TRIGGER_TIME } - if inputVariable == DEVTRON_CD_TRIGGERED_BY { - stepVariable.ReferenceVariableName = CD_TRIGGERED_BY + if inputVariable == plugin.DEVTRON_CD_TRIGGERED_BY { + stepVariable.ReferenceVariableName = plugin.CD_TRIGGERED_BY } inputVariables = append(inputVariables, stepVariable) } diff --git a/pkg/pipeline/WorkflowDagExecutor.go b/pkg/pipeline/WorkflowDagExecutor.go index 7a7af416d1..67e032cc77 100644 --- a/pkg/pipeline/WorkflowDagExecutor.go +++ b/pkg/pipeline/WorkflowDagExecutor.go @@ -72,7 +72,7 @@ type WorkflowDagExecutor interface { HandleWebhookExternalCiEvent(artifact *repository.CiArtifact, triggeredBy int32, externalCiId int, auth func(email string, projectObject string, envObject string) bool) (bool, error) HandlePreStageSuccessEvent(cdStageCompleteEvent CdStageCompleteEvent) error HandleDeploymentSuccessEvent(pipelineOverride *chartConfig.PipelineOverride) error - HandlePostStageSuccessEvent(cdWorkflowId int, cdPipelineId int, triggeredBy int32) error + HandlePostStageSuccessEvent(cdWorkflowId int, cdPipelineId int, triggeredBy int32, pluginRegistryImageDetails map[string][]string) error Subscribe() error TriggerPostStage(cdWf *pipelineConfig.CdWorkflow, cdPipeline *pipelineConfig.Pipeline, triggeredBy int32, refCdWorkflowRunnerId int) error TriggerPreStage(ctx context.Context, cdWf *pipelineConfig.CdWorkflow, artifact *repository.CiArtifact, pipeline *pipelineConfig.Pipeline, triggeredBy int32, applyAuth bool, refCdWorkflowRunnerId int) error @@ -121,33 +121,21 @@ type WorkflowDagExecutorImpl struct { config *CdConfig globalPluginService plugin.GlobalPluginService variableSnapshotHistoryService variables.VariableSnapshotHistoryService - pluginInputVariableParser plugin.InputVariableParser + pluginInputVariableParser PluginInputVariableParser } const ( - CD_PIPELINE_ENV_NAME_KEY = "CD_PIPELINE_ENV_NAME" - CD_PIPELINE_CLUSTER_NAME_KEY = "CD_PIPELINE_CLUSTER_NAME" GIT_COMMIT_HASH_PREFIX = "GIT_COMMIT_HASH" GIT_SOURCE_TYPE_PREFIX = "GIT_SOURCE_TYPE" GIT_SOURCE_VALUE_PREFIX = "GIT_SOURCE_VALUE" - GIT_METADATA = "GIT_METADATA" GIT_SOURCE_COUNT = "GIT_SOURCE_COUNT" APP_LABEL_KEY_PREFIX = "APP_LABEL_KEY" APP_LABEL_VALUE_PREFIX = "APP_LABEL_VALUE" - APP_LABEL_METADATA = "APP_LABEL_METADATA" APP_LABEL_COUNT = "APP_LABEL_COUNT" CHILD_CD_ENV_NAME_PREFIX = "CHILD_CD_ENV_NAME" CHILD_CD_CLUSTER_NAME_PREFIX = "CHILD_CD_CLUSTER_NAME" - CHILD_CD_METADATA = "CHILD_CD_METADATA" CHILD_CD_COUNT = "CHILD_CD_COUNT" - DOCKER_IMAGE = "DOCKER_IMAGE" - DEPLOYMENT_RELEASE_ID = "DEPLOYMENT_RELEASE_ID" - DEPLOYMENT_UNIQUE_ID = "DEPLOYMENT_UNIQUE_ID" - CD_TRIGGERED_BY = "CD_TRIGGERED_BY" - CD_TRIGGER_TIME = "CD_TRIGGER_TIME" - APP_NAME = "APP_NAME" - DEVTRON_CD_TRIGGERED_BY = "DEVTRON_CD_TRIGGERED_BY" - DEVTRON_CD_TRIGGER_TIME = "DEVTRON_CD_TRIGGER_TIME" + DEVTRON_SYSTEM_USER_ID = 1 ) type CiArtifactDTO struct { @@ -162,15 +150,16 @@ type CiArtifactDTO struct { } type CdStageCompleteEvent struct { - CiProjectDetails []bean3.CiProjectDetails `json:"ciProjectDetails"` - WorkflowId int `json:"workflowId"` - WorkflowRunnerId int `json:"workflowRunnerId"` - CdPipelineId int `json:"cdPipelineId"` - TriggeredBy int32 `json:"triggeredBy"` - StageYaml string `json:"stageYaml"` - ArtifactLocation string `json:"artifactLocation"` - PipelineName string `json:"pipelineName"` - CiArtifactDTO pipelineConfig.CiArtifactDTO `json:"ciArtifactDTO"` + CiProjectDetails []bean3.CiProjectDetails `json:"ciProjectDetails"` + WorkflowId int `json:"workflowId"` + WorkflowRunnerId int `json:"workflowRunnerId"` + CdPipelineId int `json:"cdPipelineId"` + TriggeredBy int32 `json:"triggeredBy"` + StageYaml string `json:"stageYaml"` + ArtifactLocation string `json:"artifactLocation"` + PipelineName string `json:"pipelineName"` + CiArtifactDTO pipelineConfig.CiArtifactDTO `json:"ciArtifactDTO"` + PluginRegistryImageDetails map[string][]string } type GitMetadata struct { @@ -216,7 +205,7 @@ func NewWorkflowDagExecutorImpl(Logger *zap.SugaredLogger, pipelineRepository pi pipelineStageService PipelineStageService, k8sCommonService k8s.K8sCommonService, variableSnapshotHistoryService variables.VariableSnapshotHistoryService, globalPluginService plugin.GlobalPluginService, - pluginInputVariableParser plugin.InputVariableParser, + pluginInputVariableParser PluginInputVariableParser, ) *WorkflowDagExecutorImpl { wde := &WorkflowDagExecutorImpl{logger: Logger, pipelineRepository: pipelineRepository, @@ -299,7 +288,7 @@ func (impl *WorkflowDagExecutorImpl) Subscribe() error { } } else if wf.WorkflowType == bean.CD_WORKFLOW_TYPE_POST { impl.logger.Debugw("received post stage success event for workflow runner ", "wfId", strconv.Itoa(wf.Id)) - err = impl.HandlePostStageSuccessEvent(wf.CdWorkflowId, cdStageCompleteEvent.CdPipelineId, cdStageCompleteEvent.TriggeredBy) + err = impl.HandlePostStageSuccessEvent(wf.CdWorkflowId, cdStageCompleteEvent.CdPipelineId, cdStageCompleteEvent.TriggeredBy, cdStageCompleteEvent.PluginRegistryImageDetails) if err != nil { impl.logger.Errorw("deployment success event error", "err", err) return @@ -470,10 +459,21 @@ func (impl *WorkflowDagExecutorImpl) HandlePreStageSuccessEvent(cdStageCompleteE if err != nil { return err } + ciArtifact, err := impl.ciArtifactRepository.Get(cdStageCompleteEvent.CiArtifactDTO.Id) + if err != nil { + return err + } + var PreCDArtifacts []*repository.CiArtifact if pipeline.TriggerType == pipelineConfig.TRIGGER_TYPE_AUTOMATIC { - ciArtifact, err := impl.ciArtifactRepository.Get(cdStageCompleteEvent.CiArtifactDTO.Id) - if err != nil { - return err + if len(cdStageCompleteEvent.PluginRegistryImageDetails) > 0 { + PreCDArtifacts, err = impl.SavePluginArtifacts(ciArtifact, cdStageCompleteEvent.PluginRegistryImageDetails, pipeline.Id, repository.PRE_CD) + if err != nil { + impl.logger.Errorw("error in saving plugin artifacts", "err", err) + return err + } + if len(PreCDArtifacts) > 0 { + ciArtifact = PreCDArtifacts[0] // deployment will be trigger with artifact copied by plugin + } } cdWorkflow, err := impl.cdWorkflowRepository.FindById(cdStageCompleteEvent.WorkflowId) if err != nil { @@ -493,6 +493,36 @@ func (impl *WorkflowDagExecutorImpl) HandlePreStageSuccessEvent(cdStageCompleteE return nil } +func (impl *WorkflowDagExecutorImpl) SavePluginArtifacts(ciArtifact *repository.CiArtifact, pluginArtifactsDetail map[string][]string, pipelineId int, stage string) ([]*repository.CiArtifact, error) { + var CDArtifacts []*repository.CiArtifact + for registry, artifacts := range pluginArtifactsDetail { + // artifacts are list of images + for _, artifact := range artifacts { + pluginArtifact := &repository.CiArtifact{ + Image: artifact, + ImageDigest: ciArtifact.ImageDigest, + DataSource: stage, + ComponentId: pipelineId, + CredentialsSourceType: repository.GLOBAL_CONTAINER_REGISTRY, + CredentialSourceValue: registry, + AuditLog: sql.AuditLog{ + CreatedOn: time.Now(), + CreatedBy: DEVTRON_SYSTEM_USER_ID, + UpdatedOn: time.Now(), + UpdatedBy: DEVTRON_SYSTEM_USER_ID, + }, + } + CDArtifacts = append(CDArtifacts, pluginArtifact) + } + } + err := impl.ciArtifactRepository.SaveAll(CDArtifacts) + if err != nil { + impl.logger.Errorw("Error in saving artifacts metadata generated by plugin") + return CDArtifacts, err + } + return CDArtifacts, nil +} + func (impl *WorkflowDagExecutorImpl) TriggerPreStage(ctx context.Context, cdWf *pipelineConfig.CdWorkflow, artifact *repository.CiArtifact, pipeline *pipelineConfig.Pipeline, triggeredBy int32, applyAuth bool, refCdWorkflowRunnerId int) error { //setting triggeredAt variable to have consistent data for various audit log places in db for deployment time triggeredAt := time.Now() @@ -587,11 +617,11 @@ func (impl *WorkflowDagExecutorImpl) TriggerPreStage(ctx context.Context, cdWf * } cdStageWorkflowRequest.StageType = PRE // handling plugin specific logic - skopeoRefPluginId, err := impl.globalPluginService.GetRefPluginIdByRefPluginName(plugin.SKOPEO) + skopeoRefPluginId, err := impl.globalPluginService.GetRefPluginIdByRefPluginName(SKOPEO) for _, step := range cdStageWorkflowRequest.PreCiSteps { if step.RefPluginId == skopeoRefPluginId { // for Skopeo plugin parse destination images and save its data in image path reservation table - registryDestinationImageMap, registryCredentialMap, err := impl.pluginInputVariableParser.ParseSkopeoPluginInputVariables(step.InputVars, pkg.EntityTypePreCD, strconv.Itoa(pipeline.Id), cdStageWorkflowRequest.CiArtifactDTO.Image) + registryDestinationImageMap, registryCredentialMap, err := impl.pluginInputVariableParser.ParseSkopeoPluginInputVariables(step.InputVars, pkg.EntityTypePreCD, strconv.Itoa(pipeline.Id), cdStageWorkflowRequest.CiArtifactDTO.Image, cdStageWorkflowRequest.DockerRegistryId) if err != nil { impl.logger.Errorw("error in parsing skopeo input variable", "err", err) return err @@ -720,11 +750,11 @@ func (impl *WorkflowDagExecutorImpl) TriggerPostStage(cdWf *pipelineConfig.CdWor cdStageWorkflowRequest.Env = env cdStageWorkflowRequest.Type = bean3.CD_WORKFLOW_PIPELINE_TYPE // handling plugin specific logic - skopeoRefPluginId, err := impl.globalPluginService.GetRefPluginIdByRefPluginName(plugin.SKOPEO) + skopeoRefPluginId, err := impl.globalPluginService.GetRefPluginIdByRefPluginName(SKOPEO) for _, step := range cdStageWorkflowRequest.PostCiSteps { if step.RefPluginId == skopeoRefPluginId { // for Skopeo plugin parse destination images and save its data in image path reservation table - registryDestinationImageMap, registryCredentialMap, err := impl.pluginInputVariableParser.ParseSkopeoPluginInputVariables(step.InputVars, pkg.EntityTypePostCD, strconv.Itoa(pipeline.Id), cdStageWorkflowRequest.CiArtifactDTO.Image) + registryDestinationImageMap, registryCredentialMap, err := impl.pluginInputVariableParser.ParseSkopeoPluginInputVariables(step.InputVars, pkg.EntityTypePostCD, strconv.Itoa(pipeline.Id), cdStageWorkflowRequest.CiArtifactDTO.Image, cdStageWorkflowRequest.DockerRegistryId) if err != nil { impl.logger.Errorw("error in parsing skopeo input variable", "err", err) return err @@ -1038,9 +1068,9 @@ func (impl *WorkflowDagExecutorImpl) buildWFRequest(runner *pipelineConfig.CdWor extraEnvVariables := make(map[string]string) if env != nil { - extraEnvVariables[CD_PIPELINE_ENV_NAME_KEY] = env.Name + extraEnvVariables[plugin.CD_PIPELINE_ENV_NAME_KEY] = env.Name if env.Cluster != nil { - extraEnvVariables[CD_PIPELINE_CLUSTER_NAME_KEY] = env.Cluster.ClusterName + extraEnvVariables[plugin.CD_PIPELINE_CLUSTER_NAME_KEY] = env.Cluster.ClusterName } } ciWf, err := impl.ciWorkflowRepository.FindLastTriggeredWorkflowByArtifactId(artifact.Id) @@ -1093,7 +1123,7 @@ func (impl *WorkflowDagExecutorImpl) buildWFRequest(runner *pipelineConfig.CdWor impl.logger.Errorw("err while marshaling git metdata", "err", err) return nil, err } - extraEnvVariables[GIT_METADATA] = string(gitMetadata) + extraEnvVariables[plugin.GIT_METADATA] = string(gitMetadata) extraEnvVariables[GIT_SOURCE_COUNT] = strconv.Itoa(len(ciWf.GitTriggers)) } @@ -1124,7 +1154,7 @@ func (impl *WorkflowDagExecutorImpl) buildWFRequest(runner *pipelineConfig.CdWor impl.logger.Errorw("err while marshaling childCdEnvVariables", "err", err) return nil, err } - extraEnvVariables[CHILD_CD_METADATA] = string(childCdEnvVariablesMetadata) + extraEnvVariables[plugin.CHILD_CD_METADATA] = string(childCdEnvVariablesMetadata) extraEnvVariables[CHILD_CD_COUNT] = strconv.Itoa(len(childPipelines)) } @@ -1139,6 +1169,7 @@ func (impl *WorkflowDagExecutorImpl) buildWFRequest(runner *pipelineConfig.CdWor cdStageWorkflowRequest.SecretKey = ciPipeline.CiTemplate.DockerRegistry.AWSSecretAccessKey cdStageWorkflowRequest.DockerRegistryType = string(ciPipeline.CiTemplate.DockerRegistry.RegistryType) cdStageWorkflowRequest.DockerRegistryURL = ciPipeline.CiTemplate.DockerRegistry.RegistryURL + cdStageWorkflowRequest.DockerRegistryId = *ciPipeline.CiTemplate.DockerRegistryId } else if cdPipeline.AppId > 0 { ciTemplate, err := impl.CiTemplateRepository.FindByAppId(cdPipeline.AppId) if err != nil { @@ -1154,6 +1185,7 @@ func (impl *WorkflowDagExecutorImpl) buildWFRequest(runner *pipelineConfig.CdWor cdStageWorkflowRequest.SecretKey = ciTemplate.DockerRegistry.AWSSecretAccessKey cdStageWorkflowRequest.DockerRegistryType = string(ciTemplate.DockerRegistry.RegistryType) cdStageWorkflowRequest.DockerRegistryURL = ciTemplate.DockerRegistry.RegistryURL + cdStageWorkflowRequest.DockerRegistryId = *ciTemplate.DockerRegistryId appLabels, err := impl.appLabelRepository.FindAllByAppId(cdPipeline.AppId) if err != nil && err != pg.ErrNoRows { impl.logger.Errorw("error in getting labels by appId", "err", err, "appId", cdPipeline.AppId) @@ -1175,7 +1207,7 @@ func (impl *WorkflowDagExecutorImpl) buildWFRequest(runner *pipelineConfig.CdWor impl.logger.Errorw("err while marshaling appLabelEnvVariables", "err", err) return nil, err } - extraEnvVariables[APP_LABEL_METADATA] = string(appLabelEnvVariablesMetadata) + extraEnvVariables[plugin.APP_LABEL_METADATA] = string(appLabelEnvVariablesMetadata) } } @@ -1303,7 +1335,7 @@ func (impl *WorkflowDagExecutorImpl) HandleDeploymentSuccessEvent(pipelineOverri } else { // to trigger next pre/cd, if any // finding children cd by pipeline id - err = impl.HandlePostStageSuccessEvent(cdWorkflow.Id, pipelineOverride.PipelineId, 1) + err = impl.HandlePostStageSuccessEvent(cdWorkflow.Id, pipelineOverride.PipelineId, 1, nil) if err != nil { impl.logger.Errorw("error in triggering children cd after successful deployment event", "parentCdPipelineId", pipelineOverride.PipelineId) return err @@ -1312,7 +1344,7 @@ func (impl *WorkflowDagExecutorImpl) HandleDeploymentSuccessEvent(pipelineOverri return nil } -func (impl *WorkflowDagExecutorImpl) HandlePostStageSuccessEvent(cdWorkflowId int, cdPipelineId int, triggeredBy int32) error { +func (impl *WorkflowDagExecutorImpl) HandlePostStageSuccessEvent(cdWorkflowId int, cdPipelineId int, triggeredBy int32, pluginRegistryImageDetails map[string][]string) error { // finding children cd by pipeline id cdPipelinesMapping, err := impl.appWorkflowRepository.FindWFCDMappingByParentCDPipelineId(cdPipelineId) if err != nil { @@ -1324,6 +1356,16 @@ func (impl *WorkflowDagExecutorImpl) HandlePostStageSuccessEvent(cdWorkflowId in impl.logger.Errorw("error in finding artifact by cd workflow id", "err", err, "cdWorkflowId", cdWorkflowId) return err } + if len(pluginRegistryImageDetails) > 0 { + PostCDArtifacts, err := impl.SavePluginArtifacts(ciArtifact, pluginRegistryImageDetails, cdPipelineId, repository.POST_CD) + if err != nil { + impl.logger.Errorw("error in saving plugin artifacts", "err", err) + return err + } + if len(PostCDArtifacts) > 0 { + ciArtifact = PostCDArtifacts[0] + } + } //TODO : confirm about this logic used for applyAuth applyAuth := false if triggeredBy != 1 { diff --git a/pkg/pipeline/pipelineStageVariableParser.go b/pkg/pipeline/pipelineStageVariableParser.go new file mode 100644 index 0000000000..7aa0d07401 --- /dev/null +++ b/pkg/pipeline/pipelineStageVariableParser.go @@ -0,0 +1,147 @@ +package pipeline + +import ( + "errors" + "fmt" + "github.com/devtron-labs/devtron/pkg" + "github.com/devtron-labs/devtron/pkg/pipeline/bean" + "github.com/devtron-labs/devtron/pkg/plugin" + "github.com/go-pg/pg" + "go.uber.org/zap" + "strings" +) + +type SkopeoInputVariable = string +type RefPluginName = string + +const ( + SKOPEO RefPluginName = "Skopeo" +) + +const ( + DESTINATION_INFO SkopeoInputVariable = "DESTINATION_INFO" + SOURCE_INFO SkopeoInputVariable = "SOURCE_INFO" +) + +type PluginInputVariableParser interface { + ParseSkopeoPluginInputVariables(inputVariables []*bean.VariableObject, entityKey int, entityValue string, pluginTriggerImage string, buildConfigurationRegistry string) (map[string][]string, map[string]plugin.RegistryCredentials, error) +} + +type PluginInputVariableParserImpl struct { + logger *zap.SugaredLogger + dockerRegistryConfig DockerRegistryConfig + customTagService pkg.CustomTagService +} + +func NewPluginInputVariableParserImpl( + logger *zap.SugaredLogger, + dockerRegistryConfig DockerRegistryConfig, + customTagService pkg.CustomTagService, +) *PluginInputVariableParserImpl { + return &PluginInputVariableParserImpl{ + logger: logger, + dockerRegistryConfig: dockerRegistryConfig, + customTagService: customTagService, + } +} + +func (impl *PluginInputVariableParserImpl) ParseSkopeoPluginInputVariables(inputVariables []*bean.VariableObject, entityKey int, entityValue string, pluginTriggerImage string, buildConfigurationRegistry string) (map[string][]string, map[string]plugin.RegistryCredentials, error) { + var DestinationInfo, SourceRegistry, SourceImage string + for _, ipVariable := range inputVariables { + if ipVariable.Name == DESTINATION_INFO { + DestinationInfo = ipVariable.Value + } else if ipVariable.Name == SOURCE_INFO { + if len(pluginTriggerImage) == 0 { + if len(ipVariable.Value) == 0 { + impl.logger.Errorw("No image provided in source or during trigger time") + return nil, nil, errors.New("no image provided in source or during trigger time") + } + SourceInfo := ipVariable.Value + SourceInfoSplit := strings.Split(SourceInfo, "|") + SourceImage = SourceInfoSplit[len(SourceInfoSplit)-1] + SourceRegistry = SourceInfoSplit[0] + } else { + SourceImage = pluginTriggerImage + SourceRegistry = buildConfigurationRegistry + } + } + } + registryDestinationImageMap, registryCredentialMap, err := impl.getRegistryDetailsAndDestinationImagePathForSkopeo(entityKey, entityValue, SourceImage, SourceRegistry, DestinationInfo) + if err != nil { + impl.logger.Errorw("Error in parsing skopeo input variables") + return nil, nil, err + } + return registryDestinationImageMap, registryCredentialMap, nil +} + +func (impl *PluginInputVariableParserImpl) getRegistryDetailsAndDestinationImagePathForSkopeo(entityKey int, entityValue, sourceImage string, sourceRegistry string, destinationInfo string) (registryDestinationImageMap map[string][]string, registryCredentialsMap map[string]plugin.RegistryCredentials, err error) { + registryDestinationImageMap = make(map[string][]string) + registryCredentialsMap = make(map[string]plugin.RegistryCredentials) + + var sourceImageTag string + sourceSplit := strings.Split(sourceImage, ":") + sourceImageTag = sourceSplit[len(sourceSplit)-1] + + //saving source registry credentials + registryCredentials, err := impl.dockerRegistryConfig.FetchOneDockerAccount(sourceRegistry) + if err != nil { + impl.logger.Errorw("error in fetching registry details by registry name", "err", err) + return registryDestinationImageMap, registryCredentialsMap, err + } + registryCredentialsMap["SOURCE_REGISTRY_CREDENTIAL"] = plugin.RegistryCredentials{ + RegistryType: string(registryCredentials.RegistryType), + RegistryURL: registryCredentials.RegistryURL, + Username: registryCredentials.Username, + Password: registryCredentials.Password, + AWSRegion: registryCredentials.AWSRegion, + AWSSecretAccessKey: registryCredentials.AWSSecretAccessKey, + AWSAccessKeyId: registryCredentials.AWSAccessKeyId, + } + + registryRepoDetails := strings.Split(destinationInfo, "\n") + for _, detail := range registryRepoDetails { + registryRepoSplit := strings.Split(detail, "|") + registryName := strings.Trim(registryRepoSplit[0], " ") + registryCredentials, err := impl.dockerRegistryConfig.FetchOneDockerAccount(registryName) + if err != nil { + impl.logger.Errorw("error in fetching registry details by registry name", "err", err) + return registryDestinationImageMap, registryCredentialsMap, err + } + var destinationImages []string + repositoryValues := registryRepoSplit[1] + repositoryValuesSplit := strings.Split(repositoryValues, ",") + + for _, repositoryName := range repositoryValuesSplit { + repositoryName = strings.Trim(repositoryName, " ") + customTag, err := impl.customTagService.GetCustomTagByEntityKeyAndValue(entityKey, entityValue) + if err != nil && err != pg.ErrNoRows { + impl.logger.Errorw("error in fetching custom tag by entity key and entity value ", "entityKey", entityKey, "entityValue", entityValue) + return registryDestinationImageMap, registryCredentialsMap, err + } + var destinationImage string + if customTag != nil && customTag.Id == 0 { + destinationImage = fmt.Sprintf("%s/%s:%s", registryCredentials.RegistryURL, repositoryName, sourceImageTag) + } else { + imagePathReservation, err := impl.customTagService.GenerateImagePath(entityKey, entityValue, registryCredentials.RegistryURL, repositoryName) + if err != nil { + impl.logger.Errorw("error in reserving image path", "err", err) + return registryDestinationImageMap, registryCredentialsMap, err + } + destinationImage = imagePathReservation.ImagePath + } + destinationImages = append(destinationImages, destinationImage) + } + registryDestinationImageMap[registryName] = destinationImages + registryCredentialsMap[registryName] = plugin.RegistryCredentials{ + RegistryType: string(registryCredentials.RegistryType), + RegistryURL: registryCredentials.RegistryURL, + Username: registryCredentials.Username, + Password: registryCredentials.Password, + AWSRegion: registryCredentials.AWSRegion, + AWSSecretAccessKey: registryCredentials.AWSSecretAccessKey, + AWSAccessKeyId: registryCredentials.AWSAccessKeyId, + } + } + //adding source registry details + return registryDestinationImageMap, registryCredentialsMap, nil +} diff --git a/pkg/plugin/GlobalPluginService.go b/pkg/plugin/GlobalPluginService.go index 3b727b5569..69a4047834 100644 --- a/pkg/plugin/GlobalPluginService.go +++ b/pkg/plugin/GlobalPluginService.go @@ -1,7 +1,6 @@ package plugin import ( - "github.com/devtron-labs/devtron/pkg/pipeline" "github.com/devtron-labs/devtron/pkg/plugin/repository" "github.com/go-pg/pg" "go.uber.org/zap" @@ -15,6 +14,22 @@ type GlobalVariable struct { Type string `json:"stageType"` } +const ( + DOCKER_IMAGE = "DOCKER_IMAGE" + DEPLOYMENT_RELEASE_ID = "DEPLOYMENT_RELEASE_ID" + DEPLOYMENT_UNIQUE_ID = "DEPLOYMENT_UNIQUE_ID" + CD_TRIGGERED_BY = "CD_TRIGGERED_BY" + CD_TRIGGER_TIME = "CD_TRIGGER_TIME" + APP_NAME = "APP_NAME" + DEVTRON_CD_TRIGGERED_BY = "DEVTRON_CD_TRIGGERED_BY" + DEVTRON_CD_TRIGGER_TIME = "DEVTRON_CD_TRIGGER_TIME" + CD_PIPELINE_ENV_NAME_KEY = "CD_PIPELINE_ENV_NAME" + CD_PIPELINE_CLUSTER_NAME_KEY = "CD_PIPELINE_CLUSTER_NAME" + GIT_METADATA = "GIT_METADATA" + CHILD_CD_METADATA = "CHILD_CD_METADATA" + APP_LABEL_METADATA = "APP_LABEL_METADATA" +) + type GlobalPluginService interface { GetAllGlobalVariables() ([]*GlobalVariable, error) ListAllPlugins(stageType int) ([]*PluginListComponentDto, error) @@ -79,67 +94,67 @@ func (impl *GlobalPluginServiceImpl) GetAllGlobalVariables() ([]*GlobalVariable, Type: "ci", }, { - Name: pipeline.CD_PIPELINE_ENV_NAME_KEY, + Name: CD_PIPELINE_ENV_NAME_KEY, Format: string(repository.PLUGIN_VARIABLE_FORMAT_TYPE_STRING), Description: "The name of the environment for which this deployment pipeline is configured.", Type: "cd", }, { - Name: pipeline.CD_PIPELINE_CLUSTER_NAME_KEY, + Name: CD_PIPELINE_CLUSTER_NAME_KEY, Format: string(repository.PLUGIN_VARIABLE_FORMAT_TYPE_STRING), Description: "The name of the cluster to which the environment belongs for which this deployment pipeline is configured.", Type: "cd", }, { - Name: pipeline.DOCKER_IMAGE, + Name: DOCKER_IMAGE, Format: string(repository.PLUGIN_VARIABLE_FORMAT_TYPE_STRING), Description: "Complete image name(repository+registry+tag).", Type: "cd", }, { - Name: pipeline.APP_NAME, + Name: APP_NAME, Format: string(repository.PLUGIN_VARIABLE_FORMAT_TYPE_STRING), Description: "The name of the app this pipeline resides in.", Type: "cd", }, { - Name: pipeline.DEPLOYMENT_RELEASE_ID, + Name: DEPLOYMENT_RELEASE_ID, Format: string(repository.PLUGIN_VARIABLE_FORMAT_TYPE_STRING), Description: "Auto-incremented counter for deployment triggers.", Type: "post-cd", }, { - Name: pipeline.DEPLOYMENT_UNIQUE_ID, + Name: DEPLOYMENT_UNIQUE_ID, Format: string(repository.PLUGIN_VARIABLE_FORMAT_TYPE_STRING), Description: "Auto-incremented counter for deployment triggers. Counter is shared between Pre/Post/Deployment stages.", Type: "cd", }, { - Name: pipeline.CD_TRIGGERED_BY, + Name: CD_TRIGGERED_BY, Format: string(repository.PLUGIN_VARIABLE_FORMAT_TYPE_STRING), Description: "Email-Id/Name of the user who triggered the deployment pipeline.", Type: "post-cd", }, { - Name: pipeline.CD_TRIGGER_TIME, + Name: CD_TRIGGER_TIME, Format: string(repository.PLUGIN_VARIABLE_FORMAT_TYPE_STRING), Description: "Time when the deployment pipeline was triggered.", Type: "post-cd", }, { - Name: pipeline.GIT_METADATA, + Name: GIT_METADATA, Format: string(repository.PLUGIN_VARIABLE_FORMAT_TYPE_STRING), Description: "GIT_METADATA consists of GIT_COMMIT_HASH, GIT_SOURCE_TYPE, GIT_SOURCE_VALUE.", Type: "cd", }, { - Name: pipeline.APP_LABEL_METADATA, + Name: APP_LABEL_METADATA, Format: string(repository.PLUGIN_VARIABLE_FORMAT_TYPE_STRING), Description: "APP_LABEL_METADATA consists of APP_LABEL_KEY, APP_LABEL_VALUE. APP_LABEL_METADATA will only be available if workflow has External CI.", Type: "cd", }, { - Name: pipeline.CHILD_CD_METADATA, + Name: CHILD_CD_METADATA, Format: string(repository.PLUGIN_VARIABLE_FORMAT_TYPE_STRING), Description: "CHILD_CD_METADATA consists of CHILD_CD_ENV_NAME, CHILD_CD_CLUSTER_NAME. CHILD_CD_METADATA will only be available if this CD pipeline has a Child CD pipeline.", Type: "cd", diff --git a/pkg/plugin/bean.go b/pkg/plugin/bean.go index b078b69c56..5a45c64fb3 100644 --- a/pkg/plugin/bean.go +++ b/pkg/plugin/bean.go @@ -39,6 +39,8 @@ type PluginVariableDto struct { } type RegistryCredentials struct { + RegistryType string `json:"registryType" validate:"required"` + RegistryURL string `json:"registryURL"` Username string `json:"username"` Password string `json:"password"` AWSAccessKeyId string `json:"awsAccessKeyId,omitempty"` diff --git a/scripts/sql/181_ci_artifact_refactor.down.sql b/scripts/sql/181_ci_artifact_refactor.down.sql new file mode 100644 index 0000000000..e69de29bb2 diff --git a/scripts/sql/181_ci_artifact_refactor.up.sql b/scripts/sql/181_ci_artifact_refactor.up.sql new file mode 100644 index 0000000000..f176377712 --- /dev/null +++ b/scripts/sql/181_ci_artifact_refactor.up.sql @@ -0,0 +1,3 @@ +ALTER TABLE ci_artifact ADD COLUMN credentials_source_type VARCHAR(50); +ALTER TABLE ci_artifact ADD COLUMN credentials_source_value VARCHAR(50); +ALTER TABLE ci_artifact ADD COLUMN component_id integer; diff --git a/scripts/sql/182_skopeo_plugin.down.sql b/scripts/sql/182_skopeo_plugin.down.sql new file mode 100644 index 0000000000..e69de29bb2 diff --git a/scripts/sql/182_skopeo_plugin.up.sql b/scripts/sql/182_skopeo_plugin.up.sql new file mode 100644 index 0000000000..72a5be3a9b --- /dev/null +++ b/scripts/sql/182_skopeo_plugin.up.sql @@ -0,0 +1,33 @@ +INSERT INTO "plugin_metadata" ("id", "name", "description","type","icon","deleted", "created_on", "created_by", "updated_on", "updated_by") +VALUES (nextval('id_seq_plugin_metadata'), 'Skopeo','','PRESET','','f', 'now()', 1, 'now()', 1); + +INSERT INTO "plugin_tag" ("id", "name", "deleted", "created_on", "created_by", "updated_on", "updated_by") +VALUES (nextval('id_seq_plugin_tag'), 'CI task','f', 'now()',1, 'now()', 1); + +INSERT INTO "plugin_tag_relation" ("id", "tag_id", "plugin_id", "created_on", "created_by", "updated_on", "updated_by") +VALUES (nextval('id_seq_plugin_tag_relation'),(SELECT id FROM plugin_tag WHERE name='CI task') , (SELECT id FROM plugin_metadata WHERE name='Skopeo'),'now()', 1, 'now()', 1); + +INSERT INTO "plugin_stage_mapping" ("plugin_id","stage_type","created_on", "created_by", "updated_on", "updated_by") +VALUES ((SELECT id FROM plugin_metadata WHERE name='Skopeo'),0,'now()', 1, 'now()', 1); + +INSERT INTO "plugin_pipeline_script" ("id","type","mount_directory_from_host","container_image_path","deleted","created_on", "created_by", "updated_on", "updated_by") +VALUES (nextval('id_seq_plugin_pipeline_script'),'CONTAINER_IMAGE','t','','f','now()',1,'now()',1); + +INSERT INTO "plugin_step" ("id", "plugin_id","name","description","index","step_type","script_id","deleted", "created_on", "created_by", "updated_on", "updated_by") +VALUES (nextval('id_seq_plugin_step'), (SELECT id FROM plugin_metadata WHERE name='Skopeo'),'Step 1','Step 1 - Copy container images','1','INLINE',(SELECT last_value FROM id_seq_plugin_pipeline_script),'f','now()', 1, 'now()', 1); + +INSERT INTO "plugin_step_variable" ("id", "plugin_step_id", "name", "format", "description", "is_exposed", "allow_empty_value", "variable_type", "value_type", "variable_step_index", "deleted", "created_on", "created_by", "updated_on", "updated_by") +VALUES (nextval('id_seq_plugin_step_variable'), (SELECT ps.id FROM plugin_metadata p inner JOIN plugin_step ps on ps.plugin_id=p.id WHERE p.name='Skopeo' and ps."index"=1 and ps.deleted=false), 'SOURCE_INFO','STRING','source image info. FORMAT:- registry| ',true,true,'INPUT','NEW',1 ,'f','now()', 1, 'now()', 1); + +INSERT INTO "plugin_step_variable" ("id", "plugin_step_id", "name", "format", "description", "is_exposed", "allow_empty_value","variable_type", "value_type", "variable_step_index", "deleted", "created_on", "created_by", "updated_on", "updated_by") +VALUES (nextval('id_seq_plugin_step_variable'), (SELECT ps.id FROM plugin_metadata p inner JOIN plugin_step ps on ps.plugin_id=p.id WHERE p.name='Skopeo' and ps."index"=1 and ps.deleted=false), 'DESTINATION_INFO','STRING','Skopeo account username',true,true,'INPUT','NEW',1 ,'f','now()', 1, 'now()', 1); + +INSERT INTO "plugin_step_variable" ("id", "plugin_step_id", "name", "format", "description", "is_exposed", "allow_empty_value","variable_type", "value_type", "variable_step_index",reference_variable_name, "deleted", "created_on", "created_by", "updated_on", "updated_by") +VALUES (nextval('id_seq_plugin_step_variable'), (SELECT ps.id FROM plugin_metadata p inner JOIN plugin_step ps on ps.plugin_id=p.id WHERE p.name='Skopeo' and ps."index"=1 and ps.deleted=false), 'DOCKER_IMAGE','STRING','',false,true,'INPUT','GLOBAL',1 ,'DOCKER_IMAGE','f','now()', 1, 'now()', 1); + +INSERT INTO "plugin_step_variable" ("id", "plugin_step_id", "name", "format", "description", "is_exposed", "allow_empty_value","variable_type", "value_type", "variable_step_index",reference_variable_name, "deleted", "created_on", "created_by", "updated_on", "updated_by") +VALUES (nextval('id_seq_plugin_step_variable'), (SELECT ps.id FROM plugin_metadata p inner JOIN plugin_step ps on ps.plugin_id=p.id WHERE p.name='Skopeo' and ps."index"=1 and ps.deleted=false), 'REGISTRY_DESTINATION_IMAGE_MAP','STRING','',false,true,'INPUT','GLOBAL',1 ,'REGISTRY_DESTINATION_IMAGE_MAP','f','now()', 1, 'now()', 1); + +INSERT INTO "plugin_step_variable" ("id", "plugin_step_id", "name", "format", "description", "is_exposed", "allow_empty_value","variable_type", "value_type", "variable_step_index",reference_variable_name, "deleted", "created_on", "created_by", "updated_on", "updated_by") +VALUES (nextval('id_seq_plugin_step_variable'), (SELECT ps.id FROM plugin_metadata p inner JOIN plugin_step ps on ps.plugin_id=p.id WHERE p.name='Skopeo' and ps."index"=1 and ps.deleted=false), 'REGISTRY_CREDENTIALS','STRING','',false,true,'INPUT','GLOBAL',1 ,'REGISTRY_CREDENTIALS','f','now()', 1, 'now()', 1); + diff --git a/wire_gen.go b/wire_gen.go index 7638ed8cde..ab75504855 100644 --- a/wire_gen.go +++ b/wire_gen.go @@ -445,7 +445,12 @@ func InitializeApp() (*App, error) { pipelineStageRepositoryImpl := repository11.NewPipelineStageRepository(sugaredLogger, db) globalPluginRepositoryImpl := repository12.NewGlobalPluginRepository(sugaredLogger, db) pipelineStageServiceImpl := pipeline.NewPipelineStageService(sugaredLogger, pipelineStageRepositoryImpl, globalPluginRepositoryImpl, pipelineRepositoryImpl, scopedVariableServiceImpl, variableEntityMappingServiceImpl, variableTemplateParserImpl) - workflowDagExecutorImpl := pipeline.NewWorkflowDagExecutorImpl(sugaredLogger, pipelineRepositoryImpl, cdWorkflowRepositoryImpl, pubSubClientServiceImpl, appServiceImpl, workflowServiceImpl, ciArtifactRepositoryImpl, ciPipelineRepositoryImpl, materialRepositoryImpl, pipelineOverrideRepositoryImpl, userServiceImpl, deploymentGroupRepositoryImpl, environmentRepositoryImpl, enforcerImpl, enforcerUtilImpl, tokenCache, acdAuthConfig, eventSimpleFactoryImpl, eventRESTClientImpl, cvePolicyRepositoryImpl, imageScanResultRepositoryImpl, appWorkflowRepositoryImpl, prePostCdScriptHistoryServiceImpl, argoUserServiceImpl, pipelineStatusTimelineRepositoryImpl, pipelineStatusTimelineServiceImpl, ciTemplateRepositoryImpl, ciWorkflowRepositoryImpl, appLabelRepositoryImpl, clientImpl, pipelineStageServiceImpl, k8sCommonServiceImpl, variableSnapshotHistoryServiceImpl) + globalPluginServiceImpl := plugin.NewGlobalPluginService(sugaredLogger, globalPluginRepositoryImpl) + dockerRegistryConfigImpl := pipeline.NewDockerRegistryConfigImpl(sugaredLogger, helmAppServiceImpl, dockerArtifactStoreRepositoryImpl, dockerRegistryIpsConfigRepositoryImpl, ociRegistryConfigRepositoryImpl) + imageTagRepositoryImpl := repository.NewImageTagRepository(db, sugaredLogger) + customTagServiceImpl := pkg.NewCustomTagService(sugaredLogger, imageTagRepositoryImpl) + pluginInputVariableParserImpl := pipeline.NewPluginInputVariableParserImpl(sugaredLogger, dockerRegistryConfigImpl, customTagServiceImpl) + workflowDagExecutorImpl := pipeline.NewWorkflowDagExecutorImpl(sugaredLogger, pipelineRepositoryImpl, cdWorkflowRepositoryImpl, pubSubClientServiceImpl, appServiceImpl, workflowServiceImpl, ciArtifactRepositoryImpl, ciPipelineRepositoryImpl, materialRepositoryImpl, pipelineOverrideRepositoryImpl, userServiceImpl, deploymentGroupRepositoryImpl, environmentRepositoryImpl, enforcerImpl, enforcerUtilImpl, tokenCache, acdAuthConfig, eventSimpleFactoryImpl, eventRESTClientImpl, cvePolicyRepositoryImpl, imageScanResultRepositoryImpl, appWorkflowRepositoryImpl, prePostCdScriptHistoryServiceImpl, argoUserServiceImpl, pipelineStatusTimelineRepositoryImpl, pipelineStatusTimelineServiceImpl, ciTemplateRepositoryImpl, ciWorkflowRepositoryImpl, appLabelRepositoryImpl, clientImpl, pipelineStageServiceImpl, k8sCommonServiceImpl, variableSnapshotHistoryServiceImpl, globalPluginServiceImpl, pluginInputVariableParserImpl) deploymentGroupAppRepositoryImpl := repository.NewDeploymentGroupAppRepositoryImpl(sugaredLogger, db) deploymentGroupServiceImpl := deploymentGroup.NewDeploymentGroupServiceImpl(appRepositoryImpl, sugaredLogger, pipelineRepositoryImpl, ciPipelineRepositoryImpl, deploymentGroupRepositoryImpl, environmentRepositoryImpl, deploymentGroupAppRepositoryImpl, ciArtifactRepositoryImpl, appWorkflowRepositoryImpl, workflowDagExecutorImpl) deploymentConfigServiceImpl := pipeline.NewDeploymentConfigServiceImpl(sugaredLogger, envConfigOverrideRepositoryImpl, chartRepositoryImpl, pipelineRepositoryImpl, envLevelAppMetricsRepositoryImpl, appLevelMetricsRepositoryImpl, pipelineConfigRepositoryImpl, configMapRepositoryImpl, configMapHistoryServiceImpl, chartRefRepositoryImpl, variableEntityMappingServiceImpl, scopedVariableServiceImpl) @@ -463,8 +468,6 @@ func InitializeApp() (*App, error) { ciBuildConfigServiceImpl := pipeline.NewCiBuildConfigServiceImpl(sugaredLogger, ciBuildConfigRepositoryImpl) ciTemplateServiceImpl := pipeline.NewCiTemplateServiceImpl(sugaredLogger, ciBuildConfigServiceImpl, ciTemplateRepositoryImpl, ciTemplateOverrideRepositoryImpl) configMapServiceImpl := pipeline.NewConfigMapServiceImpl(chartRepositoryImpl, sugaredLogger, chartRepoRepositoryImpl, utilMergeUtil, pipelineConfigRepositoryImpl, configMapRepositoryImpl, envConfigOverrideRepositoryImpl, commonServiceImpl, appRepositoryImpl, configMapHistoryServiceImpl, environmentRepositoryImpl) - imageTagRepositoryImpl := repository.NewImageTagRepository(db, sugaredLogger) - customTagServiceImpl := pkg.NewCustomTagService(sugaredLogger, imageTagRepositoryImpl) ciCdPipelineOrchestratorImpl := pipeline.NewCiCdPipelineOrchestrator(appRepositoryImpl, sugaredLogger, materialRepositoryImpl, pipelineRepositoryImpl, ciPipelineRepositoryImpl, ciPipelineMaterialRepositoryImpl, clientImpl, ciCdConfig, appWorkflowRepositoryImpl, environmentRepositoryImpl, attributesServiceImpl, appListingRepositoryImpl, appCrudOperationServiceImpl, userAuthServiceImpl, prePostCdScriptHistoryServiceImpl, prePostCiScriptHistoryServiceImpl, pipelineStageServiceImpl, ciTemplateOverrideRepositoryImpl, gitMaterialHistoryServiceImpl, ciPipelineHistoryServiceImpl, ciTemplateServiceImpl, dockerArtifactStoreRepositoryImpl, configMapServiceImpl, customTagServiceImpl, genericNoteServiceImpl) ecrConfig, err := pipeline.GetEcrConfig() if err != nil { @@ -501,7 +504,6 @@ func InitializeApp() (*App, error) { } ciHandlerImpl := pipeline.NewCiHandlerImpl(sugaredLogger, ciServiceImpl, ciPipelineMaterialRepositoryImpl, clientImpl, ciWorkflowRepositoryImpl, workflowServiceImpl, ciLogServiceImpl, ciArtifactRepositoryImpl, userServiceImpl, eventRESTClientImpl, eventSimpleFactoryImpl, ciPipelineRepositoryImpl, appListingRepositoryImpl, k8sUtil, pipelineRepositoryImpl, enforcerUtilImpl, resourceGroupServiceImpl, environmentRepositoryImpl, imageTaggingServiceImpl, appWorkflowRepositoryImpl, customTagServiceImpl, k8sCommonServiceImpl) gitRegistryConfigImpl := pipeline.NewGitRegistryConfigImpl(sugaredLogger, gitProviderRepositoryImpl, clientImpl) - dockerRegistryConfigImpl := pipeline.NewDockerRegistryConfigImpl(sugaredLogger, helmAppServiceImpl, dockerArtifactStoreRepositoryImpl, dockerRegistryIpsConfigRepositoryImpl, ociRegistryConfigRepositoryImpl) appListingViewBuilderImpl := app2.NewAppListingViewBuilderImpl(sugaredLogger) linkoutsRepositoryImpl := repository.NewLinkoutsRepositoryImpl(sugaredLogger, db) appListingServiceImpl := app2.NewAppListingServiceImpl(sugaredLogger, appListingRepositoryImpl, applicationServiceClientImpl, appRepositoryImpl, appListingViewBuilderImpl, pipelineRepositoryImpl, linkoutsRepositoryImpl, appLevelMetricsRepositoryImpl, envLevelAppMetricsRepositoryImpl, cdWorkflowRepositoryImpl, pipelineOverrideRepositoryImpl, environmentRepositoryImpl, argoUserServiceImpl, envConfigOverrideRepositoryImpl, chartRepositoryImpl, ciPipelineRepositoryImpl, dockerRegistryIpsConfigServiceImpl) @@ -743,7 +745,6 @@ func InitializeApp() (*App, error) { externalLinkServiceImpl := externalLink.NewExternalLinkServiceImpl(sugaredLogger, externalLinkMonitoringToolRepositoryImpl, externalLinkIdentifierMappingRepositoryImpl, externalLinkRepositoryImpl) externalLinkRestHandlerImpl := externalLink2.NewExternalLinkRestHandlerImpl(sugaredLogger, externalLinkServiceImpl, userServiceImpl, enforcerImpl, enforcerUtilImpl) externalLinkRouterImpl := externalLink2.NewExternalLinkRouterImpl(externalLinkRestHandlerImpl) - globalPluginServiceImpl := plugin.NewGlobalPluginService(sugaredLogger, globalPluginRepositoryImpl) globalPluginRestHandlerImpl := restHandler.NewGlobalPluginRestHandler(sugaredLogger, globalPluginServiceImpl, enforcerUtilImpl, enforcerImpl, pipelineBuilderImpl) globalPluginRouterImpl := router.NewGlobalPluginRouter(sugaredLogger, globalPluginRestHandlerImpl) moduleRestHandlerImpl := module2.NewModuleRestHandlerImpl(sugaredLogger, moduleServiceImpl, userServiceImpl, enforcerImpl, validate) From 784b65a139b90afda4ffd8d1a00346bddeba95db Mon Sep 17 00:00:00 2001 From: ayushmaheshwari Date: Mon, 23 Oct 2023 18:50:40 +0530 Subject: [PATCH 051/143] fix validation --- pkg/CustomTagService.go | 26 +++++++++++++++++++------- 1 file changed, 19 insertions(+), 7 deletions(-) diff --git a/pkg/CustomTagService.go b/pkg/CustomTagService.go index 66103e1483..09ec18f7df 100644 --- a/pkg/CustomTagService.go +++ b/pkg/CustomTagService.go @@ -148,18 +148,30 @@ func validateTagPattern(customTagPattern string) error { return fmt.Errorf("tag length can not be zero") } - count := 0 - count = count + strings.Count(customTagPattern, ".{x}") - count = count + strings.Count(customTagPattern, ".{X}") - - if count == 0 { - return fmt.Errorf("variable with format {x} or {X} not found") - } else if count > 1 { + // for patterns like v1.0.{x} we will calculate count with . in {x} i.e .{x} + variableCount := 0 + variableCount = variableCount + strings.Count(customTagPattern, ".{x}") + variableCount = variableCount + strings.Count(customTagPattern, ".{X}") + + if variableCount == 0 { + // there can be case when there is only one {x} or {x} + IsOnlyVariableTag := 0 + IsOnlyVariableTag = IsOnlyVariableTag + strings.Count(customTagPattern, "{x}") + IsOnlyVariableTag = IsOnlyVariableTag + strings.Count(customTagPattern, "{X}") + + if IsOnlyVariableTag == 0 { + return fmt.Errorf("variable with format {x} or {X} not found") + } else if IsOnlyVariableTag > 1 { + return fmt.Errorf("only one variable with format {x} or {X} found") + } + } else if variableCount > 1 { return fmt.Errorf("only one variable with format {x} or {X} found") } tagWithoutVariable := strings.ReplaceAll(customTagPattern, ".{x}", "") tagWithoutVariable = strings.ReplaceAll(tagWithoutVariable, ".{X}", "") + tagWithoutVariable = strings.ReplaceAll(tagWithoutVariable, "{x}", "") + tagWithoutVariable = strings.ReplaceAll(tagWithoutVariable, "{X}", "") if len(tagWithoutVariable) == 0 { return nil } From fca223a8a7271d41bb29a4aad18b5d9852592c97 Mon Sep 17 00:00:00 2001 From: ayushmaheshwari Date: Mon, 23 Oct 2023 19:11:41 +0530 Subject: [PATCH 052/143] fixing error string --- pkg/CustomTagService.go | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/pkg/CustomTagService.go b/pkg/CustomTagService.go index 09ec18f7df..6038088292 100644 --- a/pkg/CustomTagService.go +++ b/pkg/CustomTagService.go @@ -158,14 +158,13 @@ func validateTagPattern(customTagPattern string) error { IsOnlyVariableTag := 0 IsOnlyVariableTag = IsOnlyVariableTag + strings.Count(customTagPattern, "{x}") IsOnlyVariableTag = IsOnlyVariableTag + strings.Count(customTagPattern, "{X}") - if IsOnlyVariableTag == 0 { return fmt.Errorf("variable with format {x} or {X} not found") } else if IsOnlyVariableTag > 1 { - return fmt.Errorf("only one variable with format {x} or {X} found") + return fmt.Errorf("only one variable with format {x} or {X} allowed") } } else if variableCount > 1 { - return fmt.Errorf("only one variable with format {x} or {X} found") + return fmt.Errorf("only one variable with format {x} or {X} allowed") } tagWithoutVariable := strings.ReplaceAll(customTagPattern, ".{x}", "") From 8b39dbe18ee4e3f2f1d9a15f4a1899e9dc2ad0c2 Mon Sep 17 00:00:00 2001 From: ayushmaheshwari Date: Mon, 23 Oct 2023 19:34:21 +0530 Subject: [PATCH 053/143] fix validation --- pkg/CustomTagService.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/CustomTagService.go b/pkg/CustomTagService.go index 6038088292..7a240cf5bd 100644 --- a/pkg/CustomTagService.go +++ b/pkg/CustomTagService.go @@ -175,7 +175,7 @@ func validateTagPattern(customTagPattern string) error { return nil } - if isValidDockerImageTag(tagWithoutVariable) { + if !isValidDockerImageTag(tagWithoutVariable) { return fmt.Errorf("not a valid image tag") } From 20f35c6c196cc51c3c83a7f31fbf16cde82d4c81 Mon Sep 17 00:00:00 2001 From: ayushmaheshwari Date: Mon, 23 Oct 2023 21:57:39 +0530 Subject: [PATCH 054/143] tag validation updated --- pkg/CustomTagService.go | 14 ++++++-------- 1 file changed, 6 insertions(+), 8 deletions(-) diff --git a/pkg/CustomTagService.go b/pkg/CustomTagService.go index 7a240cf5bd..61688b4796 100644 --- a/pkg/CustomTagService.go +++ b/pkg/CustomTagService.go @@ -167,15 +167,13 @@ func validateTagPattern(customTagPattern string) error { return fmt.Errorf("only one variable with format {x} or {X} allowed") } - tagWithoutVariable := strings.ReplaceAll(customTagPattern, ".{x}", "") - tagWithoutVariable = strings.ReplaceAll(tagWithoutVariable, ".{X}", "") - tagWithoutVariable = strings.ReplaceAll(tagWithoutVariable, "{x}", "") - tagWithoutVariable = strings.ReplaceAll(tagWithoutVariable, "{X}", "") - if len(tagWithoutVariable) == 0 { - return nil - } + // replacing variable with 1 (dummy value) and checking if resulting string is valid tag + tagWithDummyValue := strings.ReplaceAll(customTagPattern, ".{x}", "1") + tagWithDummyValue = strings.ReplaceAll(customTagPattern, ".{X}", "1") + tagWithDummyValue = strings.ReplaceAll(customTagPattern, "{x}", "1") + tagWithDummyValue = strings.ReplaceAll(customTagPattern, "{X}", "1") - if !isValidDockerImageTag(tagWithoutVariable) { + if !isValidDockerImageTag(tagWithDummyValue) { return fmt.Errorf("not a valid image tag") } From a3277dc89fc06792d22ae8f5facc32d8b4dc19bb Mon Sep 17 00:00:00 2001 From: ayushmaheshwari Date: Mon, 23 Oct 2023 22:32:14 +0530 Subject: [PATCH 055/143] tag pattern modification --- pkg/CustomTagService.go | 19 +++++-------------- 1 file changed, 5 insertions(+), 14 deletions(-) diff --git a/pkg/CustomTagService.go b/pkg/CustomTagService.go index 61688b4796..4cf92aca12 100644 --- a/pkg/CustomTagService.go +++ b/pkg/CustomTagService.go @@ -150,28 +150,19 @@ func validateTagPattern(customTagPattern string) error { // for patterns like v1.0.{x} we will calculate count with . in {x} i.e .{x} variableCount := 0 - variableCount = variableCount + strings.Count(customTagPattern, ".{x}") - variableCount = variableCount + strings.Count(customTagPattern, ".{X}") + variableCount = variableCount + strings.Count(customTagPattern, "{x}") + variableCount = variableCount + strings.Count(customTagPattern, "{X}") if variableCount == 0 { // there can be case when there is only one {x} or {x} - IsOnlyVariableTag := 0 - IsOnlyVariableTag = IsOnlyVariableTag + strings.Count(customTagPattern, "{x}") - IsOnlyVariableTag = IsOnlyVariableTag + strings.Count(customTagPattern, "{X}") - if IsOnlyVariableTag == 0 { - return fmt.Errorf("variable with format {x} or {X} not found") - } else if IsOnlyVariableTag > 1 { - return fmt.Errorf("only one variable with format {x} or {X} allowed") - } + return fmt.Errorf("variable with format {x} or {X} not found") } else if variableCount > 1 { return fmt.Errorf("only one variable with format {x} or {X} allowed") } // replacing variable with 1 (dummy value) and checking if resulting string is valid tag - tagWithDummyValue := strings.ReplaceAll(customTagPattern, ".{x}", "1") - tagWithDummyValue = strings.ReplaceAll(customTagPattern, ".{X}", "1") - tagWithDummyValue = strings.ReplaceAll(customTagPattern, "{x}", "1") - tagWithDummyValue = strings.ReplaceAll(customTagPattern, "{X}", "1") + tagWithDummyValue := strings.ReplaceAll(customTagPattern, "{x}", "1") + tagWithDummyValue = strings.ReplaceAll(tagWithDummyValue, "{X}", "1") if !isValidDockerImageTag(tagWithDummyValue) { return fmt.Errorf("not a valid image tag") From 5f1a6c75d69f594840e7b5fa7b4e6ee970e1448b Mon Sep 17 00:00:00 2001 From: ayushmaheshwari Date: Wed, 25 Oct 2023 11:34:25 +0530 Subject: [PATCH 056/143] wip: fix --- pkg/CustomTagService.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/CustomTagService.go b/pkg/CustomTagService.go index 4cf92aca12..20dd3e549c 100644 --- a/pkg/CustomTagService.go +++ b/pkg/CustomTagService.go @@ -137,7 +137,7 @@ func validateAndConstructTag(customTagData *repository.CustomTag) (string, error return "", fmt.Errorf("counter {x} can not be negative") } dockerImageTag := strings.ReplaceAll(customTagData.TagPattern, "{x}", strconv.Itoa(customTagData.AutoIncreasingNumber-1)) //-1 because number is already incremented, current value will be used next time - if isValidDockerImageTag(dockerImageTag) { + if !isValidDockerImageTag(dockerImageTag) { return dockerImageTag, fmt.Errorf("invalid docker tag") } return dockerImageTag, nil From d89d12effc4b0bbd97844611b62110e763af22dd Mon Sep 17 00:00:00 2001 From: ayushmaheshwari Date: Wed, 25 Oct 2023 11:41:26 +0530 Subject: [PATCH 057/143] fixing regex --- pkg/CustomTagService.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/CustomTagService.go b/pkg/CustomTagService.go index 20dd3e549c..a034702553 100644 --- a/pkg/CustomTagService.go +++ b/pkg/CustomTagService.go @@ -21,7 +21,7 @@ const ( ImageTagUnavailableMessage = "Desired image tag already exists" REGEX_PATTERN_FOR_ENSURING_ONLY_ONE_VARIABLE_BETWEEN_BRACKETS = `\{.{2,}\}` REGEX_PATTERN_FOR_CHARACTER_OTHER_THEN_X_OR_x = `\{[^xX]|{}\}` - REGEX_PATTERN_FOR_IMAGE_TAG = `^[a-zA-Z0-9][a-zA-Z0-9._-]{0,126}[a-zA-Z0-9]$` + REGEX_PATTERN_FOR_IMAGE_TAG = `^[a-zA-Z0-9]+[a-zA-Z0-9._-]*$` ) var ( From 0f73a9bfb2661e79944c1dafd4e88553508b7ada Mon Sep 17 00:00:00 2001 From: ayushmaheshwari Date: Wed, 25 Oct 2023 16:55:08 +0530 Subject: [PATCH 058/143] wip custom tag --- pkg/pipeline/DeploymentPipelineConfigService.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/pipeline/DeploymentPipelineConfigService.go b/pkg/pipeline/DeploymentPipelineConfigService.go index 44e6d19def..80e37041fe 100644 --- a/pkg/pipeline/DeploymentPipelineConfigService.go +++ b/pkg/pipeline/DeploymentPipelineConfigService.go @@ -1790,7 +1790,7 @@ func (impl *CdPipelineConfigServiceImpl) updateCdPipeline(ctx context.Context, p } // update custom tag data pipeline.Id = dbPipelineObj.Id // pipeline object is request received from FE - err = impl.SaveOrUpdateCustomTagForCDPipeline(pipeline) + err = impl.CDPipelineCustomTagDBOperations(pipeline) if err != nil { impl.logger.Errorw("error in updating custom tag data for pipeline", "err", err) return err From b713e2d71660bf2358a1f92383990953c04eabb6 Mon Sep 17 00:00:00 2001 From: ayushmaheshwari Date: Wed, 25 Oct 2023 17:57:04 +0530 Subject: [PATCH 059/143] pr review changes --- Wire.go | 5 +- .../app/BuildPipelineRestHandler.go | 3 +- pkg/pipeline/BuildPipelineConfigService.go | 9 ++-- pkg/pipeline/CiCdPipelineOrchestrator.go | 13 +++-- pkg/pipeline/CiHandler.go | 11 ++--- pkg/pipeline/CiService.go | 47 +++++++++---------- pkg/{ => pipeline}/CustomTagService.go | 39 +++++---------- pkg/pipeline/WebhookService.go | 5 +- pkg/pipeline/bean/CustomTagService.go | 25 ++++++++++ wire_gen.go | 3 +- 10 files changed, 80 insertions(+), 80 deletions(-) rename pkg/{ => pipeline}/CustomTagService.go (82%) create mode 100644 pkg/pipeline/bean/CustomTagService.go diff --git a/Wire.go b/Wire.go index f7deb21284..7cfd730487 100644 --- a/Wire.go +++ b/Wire.go @@ -80,7 +80,6 @@ import ( security2 "github.com/devtron-labs/devtron/internal/sql/repository/security" "github.com/devtron-labs/devtron/internal/util" "github.com/devtron-labs/devtron/internal/util/ArgoUtil" - "github.com/devtron-labs/devtron/pkg" "github.com/devtron-labs/devtron/pkg/app" "github.com/devtron-labs/devtron/pkg/app/status" "github.com/devtron-labs/devtron/pkg/appClone" @@ -290,8 +289,8 @@ func InitializeApp() (*App, error) { repository.NewImageTagRepository, wire.Bind(new(repository.ImageTagRepository), new(*repository.ImageTagRepositoryImpl)), - pkg.NewCustomTagService, - wire.Bind(new(pkg.CustomTagService), new(*pkg.CustomTagServiceImpl)), + pipeline.NewCustomTagService, + wire.Bind(new(pipeline.CustomTagService), new(*pipeline.CustomTagServiceImpl)), repository.NewGitProviderRepositoryImpl, wire.Bind(new(repository.GitProviderRepository), new(*repository.GitProviderRepositoryImpl)), diff --git a/api/restHandler/app/BuildPipelineRestHandler.go b/api/restHandler/app/BuildPipelineRestHandler.go index 59deb01c5e..ea5a2342a7 100644 --- a/api/restHandler/app/BuildPipelineRestHandler.go +++ b/api/restHandler/app/BuildPipelineRestHandler.go @@ -13,7 +13,6 @@ import ( "github.com/devtron-labs/devtron/internal/sql/repository/helper" "github.com/devtron-labs/devtron/internal/sql/repository/pipelineConfig" "github.com/devtron-labs/devtron/internal/util" - "github.com/devtron-labs/devtron/pkg" "github.com/devtron-labs/devtron/pkg/bean" "github.com/devtron-labs/devtron/pkg/pipeline" bean1 "github.com/devtron-labs/devtron/pkg/pipeline/bean" @@ -594,7 +593,7 @@ func (handler PipelineConfigRestHandlerImpl) TriggerCiPipeline(w http.ResponseWr //RBAC ENDS response := make(map[string]string) resp, err := handler.ciHandler.HandleCIManual(ciTriggerRequest) - if errors.Is(err, pkg.ErrImagePathInUse) { + if errors.Is(err, bean1.ErrImagePathInUse) { handler.Logger.Errorw("service err duplicate image tag, TriggerCiPipeline", "err", err, "payload", ciTriggerRequest) common.WriteJsonResp(w, err, response, http.StatusConflict) return diff --git a/pkg/pipeline/BuildPipelineConfigService.go b/pkg/pipeline/BuildPipelineConfigService.go index 950d67a72a..4219f677ef 100644 --- a/pkg/pipeline/BuildPipelineConfigService.go +++ b/pkg/pipeline/BuildPipelineConfigService.go @@ -26,7 +26,6 @@ import ( dockerRegistryRepository "github.com/devtron-labs/devtron/internal/sql/repository/dockerRegistry" "github.com/devtron-labs/devtron/internal/sql/repository/pipelineConfig" "github.com/devtron-labs/devtron/internal/util" - "github.com/devtron-labs/devtron/pkg" "github.com/devtron-labs/devtron/pkg/attributes" "github.com/devtron-labs/devtron/pkg/bean" bean3 "github.com/devtron-labs/devtron/pkg/pipeline/bean" @@ -126,7 +125,7 @@ type CiPipelineConfigServiceImpl struct { ciPipelineMaterialRepository pipelineConfig.CiPipelineMaterialRepository resourceGroupService resourceGroup2.ResourceGroupService enforcerUtil rbac.EnforcerUtil - customTagService pkg.CustomTagService + customTagService CustomTagService } func NewCiPipelineConfigServiceImpl(logger *zap.SugaredLogger, @@ -148,7 +147,7 @@ func NewCiPipelineConfigServiceImpl(logger *zap.SugaredLogger, enforcerUtil rbac.EnforcerUtil, ciWorkflowRepository pipelineConfig.CiWorkflowRepository, resourceGroupService resourceGroup2.ResourceGroupService, - customTagService pkg.CustomTagService) *CiPipelineConfigServiceImpl { + customTagService CustomTagService) *CiPipelineConfigServiceImpl { securityConfig := &SecurityConfig{} err := env.Parse(securityConfig) @@ -621,7 +620,7 @@ func (impl *CiPipelineConfigServiceImpl) GetCiPipeline(appId int) (ciConfig *bea impl.logger.Errorw("error in fetching ciEnvMapping", "ciPipelineId ", pipeline.Id, "err", err) return nil, err } - customTag, err := impl.customTagService.GetActiveCustomTagByEntityKeyAndValue(pkg.EntityTypeCiPipelineId, strconv.Itoa(pipeline.Id)) + customTag, err := impl.customTagService.GetActiveCustomTagByEntityKeyAndValue(bean3.EntityTypeCiPipelineId, strconv.Itoa(pipeline.Id)) if err != nil && err != pg.ErrNoRows { return nil, err } @@ -758,7 +757,7 @@ func (impl *CiPipelineConfigServiceImpl) GetCiPipelineById(pipelineId int) (ciPi IsDockerConfigOverridden: pipeline.IsDockerConfigOverridden, PipelineType: bean.PipelineType(pipeline.PipelineType), } - customTag, err := impl.customTagService.GetActiveCustomTagByEntityKeyAndValue(pkg.EntityTypeCiPipelineId, strconv.Itoa(pipeline.Id)) + customTag, err := impl.customTagService.GetActiveCustomTagByEntityKeyAndValue(bean3.EntityTypeCiPipelineId, strconv.Itoa(pipeline.Id)) if err != nil && err != pg.ErrNoRows { return nil, err } diff --git a/pkg/pipeline/CiCdPipelineOrchestrator.go b/pkg/pipeline/CiCdPipelineOrchestrator.go index 54ba928cbf..aee7eedeee 100644 --- a/pkg/pipeline/CiCdPipelineOrchestrator.go +++ b/pkg/pipeline/CiCdPipelineOrchestrator.go @@ -26,13 +26,12 @@ import ( "encoding/json" "errors" "fmt" - bean4 "github.com/devtron-labs/devtron/api/bean" util3 "github.com/devtron-labs/common-lib/utils/k8s" + bean4 "github.com/devtron-labs/devtron/api/bean" "github.com/devtron-labs/devtron/client/gitSensor" app2 "github.com/devtron-labs/devtron/internal/sql/repository/app" dockerRegistryRepository "github.com/devtron-labs/devtron/internal/sql/repository/dockerRegistry" "github.com/devtron-labs/devtron/internal/sql/repository/helper" - "github.com/devtron-labs/devtron/pkg" repository2 "github.com/devtron-labs/devtron/pkg/cluster/repository" "github.com/devtron-labs/devtron/pkg/genericNotes" repository3 "github.com/devtron-labs/devtron/pkg/genericNotes/repository" @@ -115,7 +114,7 @@ type CiCdPipelineOrchestratorImpl struct { dockerArtifactStoreRepository dockerRegistryRepository.DockerArtifactStoreRepository configMapService ConfigMapService genericNoteService genericNotes.GenericNoteService - customTagService pkg.CustomTagService + customTagService CustomTagService } func NewCiCdPipelineOrchestrator( @@ -141,7 +140,7 @@ func NewCiCdPipelineOrchestrator( ciTemplateService CiTemplateService, dockerArtifactStoreRepository dockerRegistryRepository.DockerArtifactStoreRepository, configMapService ConfigMapService, - customTagService pkg.CustomTagService, + customTagService CustomTagService, genericNoteService genericNotes.GenericNoteService) *CiCdPipelineOrchestratorImpl { return &CiCdPipelineOrchestratorImpl{ appRepository: pipelineGroupRepository, @@ -335,7 +334,7 @@ func (impl CiCdPipelineOrchestratorImpl) PatchMaterialValue(createRequest *bean. //Otherwise deleteIfExists if createRequest.CustomTagObject != nil { customTag := bean4.CustomTag{ - EntityKey: pkg.EntityTypeCiPipelineId, + EntityKey: bean2.EntityTypeCiPipelineId, EntityValue: strconv.Itoa(ciPipelineObject.Id), TagPattern: createRequest.CustomTagObject.TagPattern, AutoIncreasingNumber: createRequest.CustomTagObject.CounterX, @@ -346,7 +345,7 @@ func (impl CiCdPipelineOrchestratorImpl) PatchMaterialValue(createRequest *bean. } } else { customTag := bean4.CustomTag{ - EntityKey: pkg.EntityTypeCiPipelineId, + EntityKey: bean2.EntityTypeCiPipelineId, EntityValue: strconv.Itoa(ciPipelineObject.Id), } err := impl.customTagService.DeleteCustomTagIfExists(customTag) @@ -771,7 +770,7 @@ func (impl CiCdPipelineOrchestratorImpl) CreateCiConf(createRequest *bean.CiConf //If customTagObejct has been passed, save it if ciPipeline.CustomTagObject != nil { customTag := &bean4.CustomTag{ - EntityKey: pkg.EntityTypeCiPipelineId, + EntityKey: bean2.EntityTypeCiPipelineId, EntityValue: strconv.Itoa(ciPipeline.Id), TagPattern: ciPipeline.CustomTagObject.TagPattern, AutoIncreasingNumber: ciPipeline.CustomTagObject.CounterX, diff --git a/pkg/pipeline/CiHandler.go b/pkg/pipeline/CiHandler.go index e6a697f34c..385036df79 100644 --- a/pkg/pipeline/CiHandler.go +++ b/pkg/pipeline/CiHandler.go @@ -30,7 +30,6 @@ import ( "github.com/devtron-labs/devtron/client/gitSensor" "github.com/devtron-labs/devtron/internal/sql/repository/appWorkflow" repository2 "github.com/devtron-labs/devtron/internal/sql/repository/imageTagging" - "github.com/devtron-labs/devtron/pkg" "github.com/devtron-labs/devtron/pkg/cluster" repository3 "github.com/devtron-labs/devtron/pkg/cluster/repository" k8s2 "github.com/devtron-labs/devtron/pkg/k8s" @@ -107,13 +106,13 @@ type CiHandlerImpl struct { resourceGroupService resourceGroup.ResourceGroupService envRepository repository3.EnvironmentRepository imageTaggingService ImageTaggingService - customTagService pkg.CustomTagService + customTagService CustomTagService appWorkflowRepository appWorkflow.AppWorkflowRepository config *CiConfig k8sCommonService k8s2.K8sCommonService } -func NewCiHandlerImpl(Logger *zap.SugaredLogger, ciService CiService, ciPipelineMaterialRepository pipelineConfig.CiPipelineMaterialRepository, gitSensorClient gitSensor.Client, ciWorkflowRepository pipelineConfig.CiWorkflowRepository, workflowService WorkflowService, ciLogService CiLogService, ciArtifactRepository repository.CiArtifactRepository, userService user.UserService, eventClient client.EventClient, eventFactory client.EventFactory, ciPipelineRepository pipelineConfig.CiPipelineRepository, appListingRepository repository.AppListingRepository, K8sUtil *k8s.K8sUtil, cdPipelineRepository pipelineConfig.PipelineRepository, enforcerUtil rbac.EnforcerUtil, resourceGroupService resourceGroup.ResourceGroupService, envRepository repository3.EnvironmentRepository, imageTaggingService ImageTaggingService, appWorkflowRepository appWorkflow.AppWorkflowRepository, customTagService pkg.CustomTagService, k8sCommonService k8s2.K8sCommonService) *CiHandlerImpl { +func NewCiHandlerImpl(Logger *zap.SugaredLogger, ciService CiService, ciPipelineMaterialRepository pipelineConfig.CiPipelineMaterialRepository, gitSensorClient gitSensor.Client, ciWorkflowRepository pipelineConfig.CiWorkflowRepository, workflowService WorkflowService, ciLogService CiLogService, ciArtifactRepository repository.CiArtifactRepository, userService user.UserService, eventClient client.EventClient, eventFactory client.EventFactory, ciPipelineRepository pipelineConfig.CiPipelineRepository, appListingRepository repository.AppListingRepository, K8sUtil *k8s.K8sUtil, cdPipelineRepository pipelineConfig.PipelineRepository, enforcerUtil rbac.EnforcerUtil, resourceGroupService resourceGroup.ResourceGroupService, envRepository repository3.EnvironmentRepository, imageTaggingService ImageTaggingService, appWorkflowRepository appWorkflow.AppWorkflowRepository, customTagService CustomTagService, k8sCommonService k8s2.K8sCommonService) *CiHandlerImpl { cih := &CiHandlerImpl{ Logger: Logger, ciService: ciService, @@ -621,8 +620,8 @@ func (impl *CiHandlerImpl) GetBuildHistory(pipelineId int, appId int, offset int EnvironmentName: w.EnvironmentName, ReferenceWorkflowId: w.RefCiWorkflowId, } - if w.Message == pkg.ImageTagUnavailableMessage { - customTag, err := impl.customTagService.GetCustomTagByEntityKeyAndValue(pkg.EntityTypeCiPipelineId, strconv.Itoa(w.CiPipelineId)) + if w.Message == bean3.ImageTagUnavailableMessage { + customTag, err := impl.customTagService.GetCustomTagByEntityKeyAndValue(bean3.EntityTypeCiPipelineId, strconv.Itoa(w.CiPipelineId)) if err != nil && err != pg.ErrNoRows { //err == pg.ErrNoRows should never happen return nil, err @@ -635,7 +634,7 @@ func (impl *CiHandlerImpl) GetBuildHistory(pipelineId int, appId int, offset int wfResponse.CustomTag = &bean2.CustomTagErrorResponse{ TagPattern: customTag.TagPattern, AutoIncreasingNumber: customTag.AutoIncreasingNumber, - Message: pkg.ImageTagUnavailableMessage, + Message: bean3.ImageTagUnavailableMessage, } } if imageTagsDataMap[w.CiArtifactId] != nil { diff --git a/pkg/pipeline/CiService.go b/pkg/pipeline/CiService.go index d542f3f45f..c3dacf2cb6 100644 --- a/pkg/pipeline/CiService.go +++ b/pkg/pipeline/CiService.go @@ -24,7 +24,6 @@ import ( appRepository "github.com/devtron-labs/devtron/internal/sql/repository/app" repository3 "github.com/devtron-labs/devtron/internal/sql/repository/dockerRegistry" "github.com/devtron-labs/devtron/internal/sql/repository/helper" - "github.com/devtron-labs/devtron/pkg" "github.com/devtron-labs/devtron/pkg/app" repository1 "github.com/devtron-labs/devtron/pkg/cluster/repository" bean2 "github.com/devtron-labs/devtron/pkg/pipeline/bean" @@ -57,23 +56,23 @@ type CiService interface { } type CiServiceImpl struct { - Logger *zap.SugaredLogger - workflowService WorkflowService - ciPipelineMaterialRepository pipelineConfig.CiPipelineMaterialRepository - ciWorkflowRepository pipelineConfig.CiWorkflowRepository - ciConfig *CiConfig - eventClient client.EventClient - eventFactory client.EventFactory - mergeUtil *util.MergeUtil - ciPipelineRepository pipelineConfig.CiPipelineRepository - prePostCiScriptHistoryService history.PrePostCiScriptHistoryService - pipelineStageService PipelineStageService - userService user.UserService - ciTemplateService CiTemplateService - appCrudOperationService app.AppCrudOperationService - envRepository repository1.EnvironmentRepository - appRepository appRepository.AppRepository - customTagService pkg.CustomTagService + Logger *zap.SugaredLogger + workflowService WorkflowService + ciPipelineMaterialRepository pipelineConfig.CiPipelineMaterialRepository + ciWorkflowRepository pipelineConfig.CiWorkflowRepository + ciConfig *CiConfig + eventClient client.EventClient + eventFactory client.EventFactory + mergeUtil *util.MergeUtil + ciPipelineRepository pipelineConfig.CiPipelineRepository + prePostCiScriptHistoryService history.PrePostCiScriptHistoryService + pipelineStageService PipelineStageService + userService user.UserService + ciTemplateService CiTemplateService + appCrudOperationService app.AppCrudOperationService + envRepository repository1.EnvironmentRepository + appRepository appRepository.AppRepository + customTagService CustomTagService variableSnapshotHistoryService variables.VariableSnapshotHistoryService config *CiConfig } @@ -87,7 +86,7 @@ func NewCiServiceImpl(Logger *zap.SugaredLogger, workflowService WorkflowService userService user.UserService, ciTemplateService CiTemplateService, appCrudOperationService app.AppCrudOperationService, envRepository repository1.EnvironmentRepository, appRepository appRepository.AppRepository, variableSnapshotHistoryService variables.VariableSnapshotHistoryService, - customTagService pkg.CustomTagService, + customTagService CustomTagService, ) *CiServiceImpl { cis := &CiServiceImpl{ Logger: Logger, @@ -106,7 +105,7 @@ func NewCiServiceImpl(Logger *zap.SugaredLogger, workflowService WorkflowService envRepository: envRepository, appRepository: appRepository, variableSnapshotHistoryService: variableSnapshotHistoryService, - customTagService: customTagService, + customTagService: customTagService, } config, err := GetCiConfig() if err != nil { @@ -456,16 +455,16 @@ func (impl *CiServiceImpl) buildWfRequestForCiPipeline(pipeline *pipelineConfig. } var dockerImageTag string - customTag, err := impl.customTagService.GetActiveCustomTagByEntityKeyAndValue(pkg.EntityTypeCiPipelineId, strconv.Itoa(pipeline.Id)) + customTag, err := impl.customTagService.GetActiveCustomTagByEntityKeyAndValue(bean2.EntityTypeCiPipelineId, strconv.Itoa(pipeline.Id)) if err != nil && err != pg.ErrNoRows { return nil, err } if customTag.Id != 0 { - imagePathReservation, err := impl.customTagService.GenerateImagePath(pkg.EntityTypeCiPipelineId, strconv.Itoa(pipeline.Id), pipeline.CiTemplate.DockerRegistry.RegistryURL, pipeline.CiTemplate.DockerRepository) + imagePathReservation, err := impl.customTagService.GenerateImagePath(bean2.EntityTypeCiPipelineId, strconv.Itoa(pipeline.Id), pipeline.CiTemplate.DockerRegistry.RegistryURL, pipeline.CiTemplate.DockerRepository) if err != nil { - if errors.Is(err, pkg.ErrImagePathInUse) { + if errors.Is(err, bean2.ErrImagePathInUse) { savedWf.Status = pipelineConfig.WorkflowFailed - savedWf.Message = pkg.ImageTagUnavailableMessage + savedWf.Message = bean2.ImageTagUnavailableMessage err1 := impl.ciWorkflowRepository.UpdateWorkFlow(savedWf) if err1 != nil { impl.Logger.Errorw("could not save workflow, after failing due to conflicting image tag") diff --git a/pkg/CustomTagService.go b/pkg/pipeline/CustomTagService.go similarity index 82% rename from pkg/CustomTagService.go rename to pkg/pipeline/CustomTagService.go index a034702553..76cfc766ac 100644 --- a/pkg/CustomTagService.go +++ b/pkg/pipeline/CustomTagService.go @@ -1,9 +1,10 @@ -package pkg +package pipeline import ( "fmt" "github.com/devtron-labs/devtron/api/bean" "github.com/devtron-labs/devtron/internal/sql/repository" + bean2 "github.com/devtron-labs/devtron/pkg/pipeline/bean" "github.com/go-pg/pg" "go.uber.org/zap" "regexp" @@ -11,23 +12,6 @@ import ( "strings" ) -const ( - EntityNull = iota - EntityTypeCiPipelineId -) - -const ( - imagePathPattern = "%s/%s:%s" // dockerReg/dockerRepo:Tag - ImageTagUnavailableMessage = "Desired image tag already exists" - REGEX_PATTERN_FOR_ENSURING_ONLY_ONE_VARIABLE_BETWEEN_BRACKETS = `\{.{2,}\}` - REGEX_PATTERN_FOR_CHARACTER_OTHER_THEN_X_OR_x = `\{[^xX]|{}\}` - REGEX_PATTERN_FOR_IMAGE_TAG = `^[a-zA-Z0-9]+[a-zA-Z0-9._-]*$` -) - -var ( - ErrImagePathInUse = fmt.Errorf(ImageTagUnavailableMessage) -) - type CustomTagService interface { CreateOrUpdateCustomTag(tag *bean.CustomTag) error GetCustomTagByEntityKeyAndValue(entityKey int, entityValue string) (*repository.CustomTag, error) @@ -60,7 +44,7 @@ func (impl *CustomTagServiceImpl) CreateOrUpdateCustomTag(tag *bean.CustomTag) e customTagData := repository.CustomTag{ EntityKey: tag.EntityKey, EntityValue: tag.EntityValue, - TagPattern: strings.ReplaceAll(tag.TagPattern, "{X}", "{x}"), + TagPattern: strings.ReplaceAll(tag.TagPattern, bean2.IMAGE_TAG_VARIABLE_NAME_X, bean2.IMAGE_TAG_VARIABLE_NAME_x), AutoIncreasingNumber: tag.AutoIncreasingNumber, Metadata: tag.Metadata, Active: true, @@ -105,13 +89,13 @@ func (impl *CustomTagServiceImpl) GenerateImagePath(entityKey int, entityValue s if err != nil { return nil, err } - imagePath := fmt.Sprintf(imagePathPattern, dockerRegistryURL, dockerRepo, tag) + imagePath := fmt.Sprintf(bean2.ImagePathPattern, dockerRegistryURL, dockerRepo, tag) imagePathReservations, err := impl.customTagRepository.FindByImagePath(tx, imagePath) if err != nil && err != pg.ErrNoRows { return nil, err } if len(imagePathReservations) > 0 { - return nil, ErrImagePathInUse + return nil, bean2.ErrImagePathInUse } imagePathReservation := &repository.ImagePathReservation{ ImagePath: imagePath, @@ -136,7 +120,7 @@ func validateAndConstructTag(customTagData *repository.CustomTag) (string, error if customTagData.AutoIncreasingNumber < 0 { return "", fmt.Errorf("counter {x} can not be negative") } - dockerImageTag := strings.ReplaceAll(customTagData.TagPattern, "{x}", strconv.Itoa(customTagData.AutoIncreasingNumber-1)) //-1 because number is already incremented, current value will be used next time + dockerImageTag := strings.ReplaceAll(customTagData.TagPattern, bean2.IMAGE_TAG_VARIABLE_NAME_x, strconv.Itoa(customTagData.AutoIncreasingNumber-1)) //-1 because number is already incremented, current value will be used next time if !isValidDockerImageTag(dockerImageTag) { return dockerImageTag, fmt.Errorf("invalid docker tag") } @@ -148,10 +132,9 @@ func validateTagPattern(customTagPattern string) error { return fmt.Errorf("tag length can not be zero") } - // for patterns like v1.0.{x} we will calculate count with . in {x} i.e .{x} variableCount := 0 - variableCount = variableCount + strings.Count(customTagPattern, "{x}") - variableCount = variableCount + strings.Count(customTagPattern, "{X}") + variableCount = variableCount + strings.Count(customTagPattern, bean2.IMAGE_TAG_VARIABLE_NAME_x) + variableCount = variableCount + strings.Count(customTagPattern, bean2.IMAGE_TAG_VARIABLE_NAME_X) if variableCount == 0 { // there can be case when there is only one {x} or {x} @@ -161,8 +144,8 @@ func validateTagPattern(customTagPattern string) error { } // replacing variable with 1 (dummy value) and checking if resulting string is valid tag - tagWithDummyValue := strings.ReplaceAll(customTagPattern, "{x}", "1") - tagWithDummyValue = strings.ReplaceAll(tagWithDummyValue, "{X}", "1") + tagWithDummyValue := strings.ReplaceAll(customTagPattern, bean2.IMAGE_TAG_VARIABLE_NAME_x, "1") + tagWithDummyValue = strings.ReplaceAll(tagWithDummyValue, bean2.IMAGE_TAG_VARIABLE_NAME_X, "1") if !isValidDockerImageTag(tagWithDummyValue) { return fmt.Errorf("not a valid image tag") @@ -173,7 +156,7 @@ func validateTagPattern(customTagPattern string) error { func isValidDockerImageTag(tag string) bool { // Define the regular expression for a valid Docker image tag - re := regexp.MustCompile(REGEX_PATTERN_FOR_IMAGE_TAG) + re := regexp.MustCompile(bean2.REGEX_PATTERN_FOR_IMAGE_TAG) return re.MatchString(tag) } diff --git a/pkg/pipeline/WebhookService.go b/pkg/pipeline/WebhookService.go index adfd216bfc..5e1b6bc18e 100644 --- a/pkg/pipeline/WebhookService.go +++ b/pkg/pipeline/WebhookService.go @@ -28,7 +28,6 @@ import ( "github.com/devtron-labs/devtron/internal/sql/repository" "github.com/devtron-labs/devtron/internal/sql/repository/pipelineConfig" util2 "github.com/devtron-labs/devtron/internal/util" - "github.com/devtron-labs/devtron/pkg" "github.com/devtron-labs/devtron/pkg/app" "github.com/devtron-labs/devtron/pkg/sql" "github.com/devtron-labs/devtron/util/event" @@ -71,7 +70,7 @@ type WebhookServiceImpl struct { eventFactory client.EventFactory workflowDagExecutor WorkflowDagExecutor ciHandler CiHandler - customTagService pkg.CustomTagService + customTagService CustomTagService } func NewWebhookServiceImpl( @@ -81,7 +80,7 @@ func NewWebhookServiceImpl( appService app.AppService, eventClient client.EventClient, eventFactory client.EventFactory, ciWorkflowRepository pipelineConfig.CiWorkflowRepository, - customTagService pkg.CustomTagService, + customTagService CustomTagService, workflowDagExecutor WorkflowDagExecutor, ciHandler CiHandler) *WebhookServiceImpl { webhookHandler := &WebhookServiceImpl{ ciArtifactRepository: ciArtifactRepository, diff --git a/pkg/pipeline/bean/CustomTagService.go b/pkg/pipeline/bean/CustomTagService.go new file mode 100644 index 0000000000..b823de3aed --- /dev/null +++ b/pkg/pipeline/bean/CustomTagService.go @@ -0,0 +1,25 @@ +package bean + +import "fmt" + +const ( + EntityNull = iota + EntityTypeCiPipelineId +) + +const ( + ImagePathPattern = "%s/%s:%s" // dockerReg/dockerRepo:Tag + ImageTagUnavailableMessage = "Desired image tag already exists" + REGEX_PATTERN_FOR_ENSURING_ONLY_ONE_VARIABLE_BETWEEN_BRACKETS = `\{.{2,}\}` + REGEX_PATTERN_FOR_CHARACTER_OTHER_THEN_X_OR_x = `\{[^xX]|{}\}` + REGEX_PATTERN_FOR_IMAGE_TAG = `^[a-zA-Z0-9]+[a-zA-Z0-9._-]*$` +) + +var ( + ErrImagePathInUse = fmt.Errorf(ImageTagUnavailableMessage) +) + +const ( + IMAGE_TAG_VARIABLE_NAME_X = "{X}" + IMAGE_TAG_VARIABLE_NAME_x = "{x}" +) diff --git a/wire_gen.go b/wire_gen.go index da93db4351..a095cb7752 100644 --- a/wire_gen.go +++ b/wire_gen.go @@ -68,7 +68,6 @@ import ( "github.com/devtron-labs/devtron/internal/sql/repository/security" "github.com/devtron-labs/devtron/internal/util" "github.com/devtron-labs/devtron/internal/util/ArgoUtil" - "github.com/devtron-labs/devtron/pkg" "github.com/devtron-labs/devtron/pkg/apiToken" app2 "github.com/devtron-labs/devtron/pkg/app" "github.com/devtron-labs/devtron/pkg/app/status" @@ -466,7 +465,7 @@ func InitializeApp() (*App, error) { ciTemplateServiceImpl := pipeline.NewCiTemplateServiceImpl(sugaredLogger, ciBuildConfigServiceImpl, ciTemplateRepositoryImpl, ciTemplateOverrideRepositoryImpl) configMapServiceImpl := pipeline.NewConfigMapServiceImpl(chartRepositoryImpl, sugaredLogger, chartRepoRepositoryImpl, utilMergeUtil, pipelineConfigRepositoryImpl, configMapRepositoryImpl, envConfigOverrideRepositoryImpl, commonServiceImpl, appRepositoryImpl, configMapHistoryServiceImpl, environmentRepositoryImpl) imageTagRepositoryImpl := repository.NewImageTagRepository(db, sugaredLogger) - customTagServiceImpl := pkg.NewCustomTagService(sugaredLogger, imageTagRepositoryImpl) + customTagServiceImpl := pipeline.NewCustomTagService(sugaredLogger, imageTagRepositoryImpl) ciCdPipelineOrchestratorImpl := pipeline.NewCiCdPipelineOrchestrator(appRepositoryImpl, sugaredLogger, materialRepositoryImpl, pipelineRepositoryImpl, ciPipelineRepositoryImpl, ciPipelineMaterialRepositoryImpl, clientImpl, ciCdConfig, appWorkflowRepositoryImpl, environmentRepositoryImpl, attributesServiceImpl, appListingRepositoryImpl, appCrudOperationServiceImpl, userAuthServiceImpl, prePostCdScriptHistoryServiceImpl, prePostCiScriptHistoryServiceImpl, pipelineStageServiceImpl, ciTemplateOverrideRepositoryImpl, gitMaterialHistoryServiceImpl, ciPipelineHistoryServiceImpl, ciTemplateServiceImpl, dockerArtifactStoreRepositoryImpl, configMapServiceImpl, customTagServiceImpl, genericNoteServiceImpl) ecrConfig, err := pipeline.GetEcrConfig() if err != nil { From 5d131dba6708bb7672b17cb2a5bf25fcce321cd9 Mon Sep 17 00:00:00 2001 From: ayushmaheshwari Date: Wed, 25 Oct 2023 18:43:50 +0530 Subject: [PATCH 060/143] Revert "Merge branch 'refactor-tag-generation' into custom-tag-cd" This reverts commit 31120447630ec0be5b16ab819af6a573b9bbc895, reversing changes made to 0f73a9bfb2661e79944c1dafd4e88553508b7ada. --- .github/workflows/pr-issue-validator.yaml | 2 +- CHANGELOG/release-notes-v0.6.23.md | 74 - Wire.go | 17 +- api/bean/ConfigMapAndSecret.go | 8 - api/bean/ValuesOverrideRequest.go | 2 +- api/k8s/capacity/k8sCapacityRestHandler.go | 5 +- api/restHandler/CoreAppRestHandler.go | 96 +- api/restHandler/PipelineHistoryRestHandler.go | 18 +- api/restHandler/PipelineTriggerRestHandler.go | 8 +- .../app/BuildPipelineRestHandler.go | 3 +- .../app/DeploymentPipelineRestHandler.go | 13 +- api/router/pubsub/ApplicationStatusHandler.go | 2 +- charts/devtron/Chart.yaml | 4 +- charts/devtron/devtron-bom.yaml | 56 +- charts/devtron/templates/NOTES.txt | 2 - charts/devtron/templates/gitsensor.yaml | 160 -- charts/devtron/templates/lens.yaml | 115 - charts/devtron/templates/migrator.yaml | 2 +- charts/devtron/templates/workflow.yaml | 843 +------ charts/devtron/values.yaml | 57 +- .../argocdServer/ArgoClientWrapperService.go | 39 - .../argocdServer/{connection => }/Config.go | 2 +- .../{connection => }/Connection.go | 2 +- client/argocdServer/{connection => }/Tls.go | 2 +- client/argocdServer/{connection => }/Token.go | 2 +- client/argocdServer/Version.go | 5 +- .../argocdServer/application/Application.go | 6 +- client/argocdServer/bean/bean.go | 3 - client/argocdServer/cluster/Cluster.go | 6 +- client/argocdServer/{connection => }/proxy.go | 2 +- .../{connection => }/proxy_test.go | 2 +- client/argocdServer/repository/Repository.go | 6 +- client/argocdServer/session/Session.go | 4 +- cmd/external-app/wire.go | 7 - cmd/external-app/wire_gen.go | 3 +- docs/SUMMARY.md | 4 +- docs/reference/glossary.md | 6 +- .../creating-application/git-material.md | 149 +- .../deploying-application/triggering-ci.md | 17 - .../integrations/build-and-deploy-ci-cd.md | 4 - docs/user-guide/use-cases/oci-pull.md | 73 - .../pipelineConfig/CiWorkflowRepository.go | 2 +- .../security/ImageScanDeployInfoRepository.go | 9 +- internal/util/MergeUtil.go | 105 +- manifests/install/devtron-installer.yaml | 2 +- manifests/installation-script | 73 +- manifests/release.txt | 2 +- manifests/version.txt | 2 +- manifests/yamls/dashboard.yaml | 2 +- manifests/yamls/devtron.yaml | 11 +- manifests/yamls/gitsensor.yaml | 4 +- manifests/yamls/kubelink.yaml | 2 +- manifests/yamls/kubewatch.yaml | 2 +- manifests/yamls/migrator.yaml | 6 +- manifests/yamls/notifier.yaml | 2 +- manifests/yamls/serviceaccount.yaml | 26 +- pkg/{pipeline => }/CustomTagService.go | 78 +- pkg/app/AppCrudOperationService.go | 9 +- pkg/app/AppService.go | 1915 +++++++++++++++- pkg/appClone/AppCloneService.go | 277 ++- .../AppStoreDeploymentFullModeService.go | 9 - .../service/AppStoreDeploymentService.go | 31 +- .../tool/AppStoreDeploymentHelmService.go | 7 - .../gitops/AppStoreDeploymentArgoCdService.go | 24 +- pkg/auth/UserAuthOidcHelper.go | 6 +- pkg/bean/app.go | 3 - pkg/chart/ChartService.go | 37 +- pkg/cluster/ClusterCronService.go | 2 +- pkg/cluster/ClusterService.go | 14 - pkg/cluster/ClusterServiceExtended.go | 22 +- pkg/cluster/repository/ClusterRepository.go | 11 - .../DeployementTemplateService.go | 67 +- pkg/k8s/K8sCommonService.go | 56 +- pkg/k8s/capacity/bean/bean.go | 1 + pkg/k8s/capacity/k8sCapacityService.go | 4 +- pkg/pipeline/BuildPipelineConfigService.go | 17 +- pkg/pipeline/CdHandler.go | 12 +- pkg/pipeline/CiCdConfig.go | 44 - pkg/pipeline/CiCdPipelineOrchestrator.go | 38 +- pkg/pipeline/CiHandler.go | 11 +- pkg/pipeline/CiService.go | 13 +- pkg/pipeline/DeploymentConfigService.go | 58 +- .../DeploymentPipelineConfigService.go | 59 +- pkg/pipeline/PipelineStageService.go | 12 +- pkg/pipeline/WebhookService.go | 5 +- pkg/pipeline/WorkflowDagExecutor.go | 2005 +---------------- pkg/pipeline/WorkflowService.go | 34 +- pkg/pipeline/WorkflowUtils.go | 19 +- pkg/pipeline/bean/CustomTagService.go | 25 - .../DeployedConfigurationHistoryService.go | 19 +- .../DeploymentTemplateHistoryService.go | 79 +- pkg/pipeline/history/bean.go | 20 +- .../repository/PipelineStageRepository.go | 2 +- pkg/resourceQualifiers/bean.go | 3 - pkg/resourceQualifiers/constants.go | 10 +- pkg/user/casbin/rbac.go | 7 +- pkg/util/artifact-utils.go | 12 - pkg/variables/ScopedVariableService.go | 79 +- pkg/variables/ScopedVariableValidator.go | 5 - pkg/variables/models/variable-payload.go | 17 - .../parsers/VariableTemplateParser.go | 70 +- pkg/variables/parsers/bean.go | 24 +- .../repository/ScopedVariableRepository.go | 16 - releasenotes.md | 98 +- .../cronjob-chart_1-2-0/schema.json | 276 +-- .../deployment-chart_1-0-0/schema.json | 216 +- .../deployment-chart_1-0-0/values.yaml | 6 +- .../deployment-chart_1-1-0/schema.json | 234 +- .../deployment-chart_1-1-0/values.yaml | 6 +- .../deployment-chart_4-18-0/schema.json | 252 +-- .../reference-chart_3-10-0/schema.json | 216 +- .../reference-chart_3-11-0/schema.json | 216 +- .../reference-chart_3-12-0/schema.json | 216 +- .../reference-chart_3-13-0/schema.json | 216 +- .../reference-chart_3-9-0/schema.json | 216 +- .../reference-chart_4-10-0/schema.json | 216 +- .../reference-chart_4-11-0/schema.json | 216 +- .../reference-chart_4-12-0/schema.json | 216 +- .../reference-chart_4-13-0/schema.json | 216 +- .../reference-chart_4-14-0/schema.json | 216 +- .../reference-chart_4-14-0/values.yaml | 6 +- .../reference-chart_4-15-0/schema.json | 216 +- .../reference-chart_4-15-0/values.yaml | 6 +- .../reference-chart_4-16-0/schema.json | 216 +- .../reference-chart_4-16-0/values.yaml | 6 +- .../reference-chart_4-17-0/schema.json | 234 +- .../reference-chart_4-17-0/values.yaml | 6 +- .../reference-chart_4-18-0/schema.json | 246 +- .../reference-chart_5-0-0/schema.json | 246 +- .../statefulset-chart_4-18-0/schema.json | 246 +- .../statefulset-chart_4-18-0/values.yaml | 6 +- .../statefulset-chart_5-0-0/schema.json | 239 +- .../statefulset-chart_5-0-0/values.yaml | 6 +- .../workflow-chart_1-0-0/schema.json | 216 +- ...down.sql => 177_custom_image_tag.down.sql} | 0 ...tag.up.sql => 177_custom_image_tag.up.sql} | 0 ...move_index_image_scan_deploy_info.down.sql | 2 - ...remove_index_image_scan_deploy_info.up.sql | 2 - util/argo/ArgoUserService.go | 5 +- util/context-utils.go | 23 - util/rbac/EnforcerUtil.go | 93 +- util/rbac/EnforcerUtilHelm.go | 24 +- wire_gen.go | 74 +- 143 files changed, 3610 insertions(+), 9125 deletions(-) delete mode 100644 CHANGELOG/release-notes-v0.6.23.md delete mode 100644 charts/devtron/templates/gitsensor.yaml delete mode 100644 charts/devtron/templates/lens.yaml delete mode 100644 client/argocdServer/ArgoClientWrapperService.go rename client/argocdServer/{connection => }/Config.go (97%) rename client/argocdServer/{connection => }/Connection.go (99%) rename client/argocdServer/{connection => }/Tls.go (99%) rename client/argocdServer/{connection => }/Token.go (98%) delete mode 100644 client/argocdServer/bean/bean.go rename client/argocdServer/{connection => }/proxy.go (99%) rename client/argocdServer/{connection => }/proxy_test.go (98%) delete mode 100644 docs/user-guide/use-cases/oci-pull.md rename pkg/{pipeline => }/CustomTagService.go (71%) delete mode 100644 pkg/pipeline/bean/CustomTagService.go delete mode 100644 pkg/util/artifact-utils.go rename scripts/sql/{182_custom_image_tag.down.sql => 177_custom_image_tag.down.sql} (100%) rename scripts/sql/{182_custom_image_tag.up.sql => 177_custom_image_tag.up.sql} (100%) delete mode 100644 scripts/sql/181_remove_index_image_scan_deploy_info.down.sql delete mode 100644 scripts/sql/181_remove_index_image_scan_deploy_info.up.sql delete mode 100644 util/context-utils.go diff --git a/.github/workflows/pr-issue-validator.yaml b/.github/workflows/pr-issue-validator.yaml index 55ee469178..0ca7060cc4 100644 --- a/.github/workflows/pr-issue-validator.yaml +++ b/.github/workflows/pr-issue-validator.yaml @@ -39,7 +39,7 @@ jobs: TITLE: ${{ github.event.pull_request.title }} run: | set -x - if [[ "$TITLE" == *"doc:"* || "$TITLE" == *"docs:"* || "$TITLE" == *"chore:"* || "$TITLE" == *"release:"* || "$TITLE" == *"Release:"* ]]; then + if [[ "$TITLE" == *"doc:"* || "$TITLE" == *"docs:"* || "$TITLE" == *"chore:"* ]]; then echo "Skipping validation as this is a PR for documentation or chore." gh pr edit $PRNUM --remove-label "PR:Issue-verification-failed" gh pr edit $PRNUM --add-label "PR:Ready-to-Review" diff --git a/CHANGELOG/release-notes-v0.6.23.md b/CHANGELOG/release-notes-v0.6.23.md deleted file mode 100644 index e7cc215a5c..0000000000 --- a/CHANGELOG/release-notes-v0.6.23.md +++ /dev/null @@ -1,74 +0,0 @@ -## v0.6.23 - - - -## Bugs -- fix: DT19-v1 bug fixes (#3962) -- fix: ci pod request correction (#3980) -- fix: pipelineOverride id being sent instead of pipelineId (#3984) -- fix: Iam role handling script for plugin pull image from CR (#3955) -- fix: Deployment Template HCL parsing with % keyword (#4012) -- fix: handled releaseNotExists case for helm type cd pipeline resource tree fetch (#4016) -- fix: auto post cd not working in case of multiple parallel gitOps pipeline (#4018) -- fix: handled error in bulk trigger deploy (#4034) -- fix: The manager(non-admin user) of the application is unable to select a list of apps when assigning permissions (#4053) -- fix: ci job handling in app create api (#4054) -- fix: Deploying currently Active image using TriggerDeploy API from devtctl tool is broken (#4056) -- fix: Unable to delete ci pipeline in case you configure multi git (#4072) -- fix: env for specific deployment (#4085) -- fix: update build configuration fix (#4093) -- fix: Artifacts filter in CD trigger view (#4064) -- fix: Bugathon DT-19 version-2 fixes (#4105) -- fix: App Labels node selector not getting attach in ci-workflow (#4084) -- fix: Update cd pipeline create empty pre post cd steps (#4113) -- fix: normal Refresh after triggering gitops deployment to avoid sync delay in argo (#4066) -- fix: helm chart delete when no rows are found (#4124) -- fix: Unable to abort pre-cd and post-cd workflow (#4121) -- fix: Helm Apps permissions do not allow Terminal or Logs view (#4110) -- fix: port service mapping (#4132) -## Enhancements -- feat: Helm async install (#3856) -- feat: handle CI success event auto trigger in batch (#3951) -- feat: added env variable to skip gitops validation on create/update (#3956) -- feat: added flag to configure ecr repo creation (#3963) -- feat: Ability to change branch for all selected applications during bulk build from Application Groups (#3955) -- feat: Variables support in pre-post CI, CD and Jobs (#3911) -- feat: Poll Images from ECR Container Repository Plugin (#3971) -- feat: resource groups CRUD and environment filtering (#3974) -- feat: Scoped variables primitive handling (#4033) -- feat: adding DEVTRON_APP_NAME system variable for deployment template (#4041) -- feat: wf pod restart (#3892) -- feat: added deduction for system variables (#4075) -- feat: manifest comparision (#3844) -- feat: multiple images handling for single workflow for ECR Plugin Poll Images (#4027) -- feat: Jenkins plugin migration (#4039) -- feat: clone cd pipelines while cloning app across project (#4087) -## Documentation -- doc: Glossary of jargonish terms for layman in the context of Devtron (#3820) -- docs: Ephemeral Container Doc (#3912) -- docs: New Image Alignment in Ephemeral doc (#3959) -- docs: Snapshot updation in PVC docs + PreBuild CI-CD (#3964) -- doc: Fixed issuer url in okta docs (#4062) -- docs: Config Approval Draft (#3981) -- docs: Modified Existing Container Registry Doc (#4048) -- docs: Added OCI Pull in Usecases (#4112) -## Others -- chore: added workflow to escalate pager-duty issue (#3927) -- chore: changed loop from for to while (#3928) -- chore: scheduled escalate pager duty issue workflow (#3933) -- chore: added log config for dev mode (#3953) -- chore: minor correction in devtron reference charts (#3957) -- chore: workflow refactoring (#3714) -- chore: pr-issue-validator permissions fix (#3967) -- chore: added CODEOWNERS (#3966) -- chore: Scoped variable refactoring (#3977) -- chore: modified labels of keda autoscale object in deployment chart (#3999) -- chore: Update pr-issue-validator.yaml (#3854) -- chore: refactoring around PipelineBuilder (#4043) -- chore: moved k8s library to common-lib and added scripts for adding sshTunnel config to clusters (#3848) -- chore: Add pager-duty issue template (#3988) -- chore: first cut refactor ci-pipeline (#4091) -- chore: refactored appartifact manager and cimaterialconfigservice (#4096) -- chore: Remove the EnvVariablesFromFieldPath from values.yaml in refcharts (#4111) -- chore: Updated schema for Scope Variable (#4079) -- chore: skip validation for release PRs (#4128) diff --git a/Wire.go b/Wire.go index 248c021b56..daa6421273 100644 --- a/Wire.go +++ b/Wire.go @@ -55,7 +55,6 @@ import ( "github.com/devtron-labs/devtron/client/argocdServer" "github.com/devtron-labs/devtron/client/argocdServer/application" cluster2 "github.com/devtron-labs/devtron/client/argocdServer/cluster" - "github.com/devtron-labs/devtron/client/argocdServer/connection" repository2 "github.com/devtron-labs/devtron/client/argocdServer/repository" session2 "github.com/devtron-labs/devtron/client/argocdServer/session" "github.com/devtron-labs/devtron/client/cron" @@ -80,6 +79,7 @@ import ( security2 "github.com/devtron-labs/devtron/internal/sql/repository/security" "github.com/devtron-labs/devtron/internal/util" "github.com/devtron-labs/devtron/internal/util/ArgoUtil" + "github.com/devtron-labs/devtron/pkg" "github.com/devtron-labs/devtron/pkg/app" "github.com/devtron-labs/devtron/pkg/app/status" "github.com/devtron-labs/devtron/pkg/appClone" @@ -172,10 +172,10 @@ func InitializeApp() (*App, error) { wire.Value(appStoreBean.RefChartProxyDir("scripts/devtron-reference-helm-charts")), wire.Value(chart.DefaultChart("reference-app-rolling")), wire.Value(util.ChartWorkingDir("/tmp/charts/")), - connection.SettingsManager, + argocdServer.SettingsManager, //auth.GetConfig, - connection.GetConfig, + argocdServer.GetConfig, wire.Bind(new(session2.ServiceClient), new(*middleware.LoginService)), sse.NewSSE, @@ -289,8 +289,8 @@ func InitializeApp() (*App, error) { repository.NewImageTagRepository, wire.Bind(new(repository.ImageTagRepository), new(*repository.ImageTagRepositoryImpl)), - pipeline.NewCustomTagService, - wire.Bind(new(pipeline.CustomTagService), new(*pipeline.CustomTagServiceImpl)), + pkg.NewCustomTagService, + wire.Bind(new(pkg.CustomTagService), new(*pkg.CustomTagServiceImpl)), repository.NewGitProviderRepositoryImpl, wire.Bind(new(repository.GitProviderRepository), new(*repository.GitProviderRepositoryImpl)), @@ -821,8 +821,8 @@ func InitializeApp() (*App, error) { wire.Bind(new(pipeline.PipelineStageService), new(*pipeline.PipelineStageServiceImpl)), //plugin ends - connection.NewArgoCDConnectionManagerImpl, - wire.Bind(new(connection.ArgoCDConnectionManager), new(*connection.ArgoCDConnectionManagerImpl)), + argocdServer.NewArgoCDConnectionManagerImpl, + wire.Bind(new(argocdServer.ArgoCDConnectionManager), new(*argocdServer.ArgoCDConnectionManagerImpl)), argo.NewArgoUserServiceImpl, wire.Bind(new(argo.ArgoUserService), new(*argo.ArgoUserServiceImpl)), util2.GetDevtronSecretName, @@ -951,9 +951,6 @@ func InitializeApp() (*App, error) { devtronResource.NewDevtronResourceSearchableKeyServiceImpl, wire.Bind(new(devtronResource.DevtronResourceService), new(*devtronResource.DevtronResourceSearchableKeyServiceImpl)), - argocdServer.NewArgoClientWrapperServiceImpl, - wire.Bind(new(argocdServer.ArgoClientWrapperService), new(*argocdServer.ArgoClientWrapperServiceImpl)), - pipeline.NewPluginInputVariableParserImpl, wire.Bind(new(pipeline.PluginInputVariableParser), new(*pipeline.PluginInputVariableParserImpl)), ) diff --git a/api/bean/ConfigMapAndSecret.go b/api/bean/ConfigMapAndSecret.go index b600ecd5ef..d35d58b824 100644 --- a/api/bean/ConfigMapAndSecret.go +++ b/api/bean/ConfigMapAndSecret.go @@ -19,7 +19,6 @@ package bean import ( "encoding/json" - "github.com/devtron-labs/devtron/util" ) type ConfigMapRootJson struct { @@ -62,10 +61,3 @@ func (configSecret ConfigSecretMap) GetDataMap() (map[string]string, error) { err := json.Unmarshal(configSecret.Data, &datamap) return datamap, err } -func (configSecretJson ConfigSecretJson) GetDereferencedSecrets() []ConfigSecretMap { - return util.GetDeReferencedArray(configSecretJson.Secrets) -} - -func (configSecretJson *ConfigSecretJson) SetReferencedSecrets(secrets []ConfigSecretMap) { - configSecretJson.Secrets = util.GetReferencedArray(secrets) -} diff --git a/api/bean/ValuesOverrideRequest.go b/api/bean/ValuesOverrideRequest.go index 7bd30e8082..5dc7b24682 100644 --- a/api/bean/ValuesOverrideRequest.go +++ b/api/bean/ValuesOverrideRequest.go @@ -72,7 +72,7 @@ type ValuesOverrideRequest struct { AppName string `json:"-"` PipelineName string `json:"-"` DeploymentAppType string `json:"-"` - Image string `json:"-"` + ImageTag string `json:"-"` } type BulkCdDeployEvent struct { diff --git a/api/k8s/capacity/k8sCapacityRestHandler.go b/api/k8s/capacity/k8sCapacityRestHandler.go index 56d0714a1b..8197408057 100644 --- a/api/k8s/capacity/k8sCapacityRestHandler.go +++ b/api/k8s/capacity/k8sCapacityRestHandler.go @@ -61,7 +61,7 @@ func (handler *K8sCapacityRestHandlerImpl) GetClusterListRaw(w http.ResponseWrit return } token := r.Header.Get("token") - clusters, err := handler.clusterService.FindAllExceptVirtual() + clusters, err := handler.clusterService.FindAll() if err != nil { handler.logger.Errorw("error in getting all clusters", "err", err) common.WriteJsonResp(w, err, nil, http.StatusInternalServerError) @@ -83,6 +83,7 @@ func (handler *K8sCapacityRestHandlerImpl) GetClusterListRaw(w http.ResponseWrit Id: cluster.Id, Name: cluster.ClusterName, ErrorInConnection: cluster.ErrorInConnecting, + IsVirtualCluster: cluster.IsVirtualCluster, } clusterDetailList = append(clusterDetailList, clusterDetail) } @@ -101,7 +102,7 @@ func (handler *K8sCapacityRestHandlerImpl) GetClusterListWithDetail(w http.Respo return } token := r.Header.Get("token") - clusters, err := handler.clusterService.FindAllExceptVirtual() + clusters, err := handler.clusterService.FindAll() if err != nil { handler.logger.Errorw("error in getting all clusters", "err", err) common.WriteJsonResp(w, err, nil, http.StatusInternalServerError) diff --git a/api/restHandler/CoreAppRestHandler.go b/api/restHandler/CoreAppRestHandler.go index 5427d8b0d7..dce1a87c23 100644 --- a/api/restHandler/CoreAppRestHandler.go +++ b/api/restHandler/CoreAppRestHandler.go @@ -1192,50 +1192,45 @@ func (handler CoreAppRestHandlerImpl) deleteApp(ctx context.Context, appId int, // delete all CD pipelines for app starts cdPipelines, err := handler.pipelineBuilder.GetCdPipelinesForApp(appId) - if err != nil && err != pg.ErrNoRows { + if err != nil { handler.logger.Errorw("service err, GetCdPipelines in DeleteApp", "err", err, "appId", appId) return err } - if err != pg.ErrNoRows { - for _, cdPipeline := range cdPipelines.Pipelines { - cdPipelineDeleteRequest := &bean.CDPatchRequest{ - AppId: appId, - UserId: userId, - Action: bean.CD_DELETE, - ForceDelete: true, - NonCascadeDelete: false, - Pipeline: cdPipeline, - } - _, err = handler.pipelineBuilder.PatchCdPipelines(cdPipelineDeleteRequest, ctx) - if err != nil { - handler.logger.Errorw("err in deleting cd pipeline in DeleteApp", "err", err, "payload", cdPipelineDeleteRequest) - return err - } - } + for _, cdPipeline := range cdPipelines.Pipelines { + cdPipelineDeleteRequest := &bean.CDPatchRequest{ + AppId: appId, + UserId: userId, + Action: bean.CD_DELETE, + ForceDelete: true, + NonCascadeDelete: false, + Pipeline: cdPipeline, + } + _, err = handler.pipelineBuilder.PatchCdPipelines(cdPipelineDeleteRequest, ctx) + if err != nil { + handler.logger.Errorw("err in deleting cd pipeline in DeleteApp", "err", err, "payload", cdPipelineDeleteRequest) + return err + } } // delete all CD pipelines for app ends // delete all CI pipelines for app starts ciPipelines, err := handler.pipelineBuilder.GetCiPipeline(appId) - if err != nil && err != pg.ErrNoRows { + if err != nil { handler.logger.Errorw("service err, GetCiPipelines in DeleteApp", "err", err, "appId", appId) return err } - if err != pg.ErrNoRows { - - for _, ciPipeline := range ciPipelines.CiPipelines { - ciPipelineDeleteRequest := &bean.CiPatchRequest{ - AppId: appId, - UserId: userId, - Action: bean.DELETE, - CiPipeline: ciPipeline, - } - _, err := handler.pipelineBuilder.PatchCiPipeline(ciPipelineDeleteRequest) - if err != nil { - handler.logger.Errorw("err in deleting ci pipeline in DeleteApp", "err", err, "payload", ciPipelineDeleteRequest) - return err - } + for _, ciPipeline := range ciPipelines.CiPipelines { + ciPipelineDeleteRequest := &bean.CiPatchRequest{ + AppId: appId, + UserId: userId, + Action: bean.DELETE, + CiPipeline: ciPipeline, + } + _, err := handler.pipelineBuilder.PatchCiPipeline(ciPipelineDeleteRequest) + if err != nil { + handler.logger.Errorw("err in deleting ci pipeline in DeleteApp", "err", err, "payload", ciPipelineDeleteRequest) + return err } } // delete all CI pipelines for app ends @@ -1556,37 +1551,16 @@ func (handler CoreAppRestHandlerImpl) createWorkflows(ctx context.Context, appId //Creating workflow ends //Creating CI pipeline starts - ciPipeline, err := handler.createCiPipeline(appId, userId, workflowId, workflow.CiPipeline) + ciPipelineId, err := handler.createCiPipeline(appId, userId, workflowId, workflow.CiPipeline) if err != nil { - err1 := handler.appWorkflowService.DeleteAppWorkflow(workflowId, userId) - if err1 != nil { - handler.logger.Errorw("service err, DeleteAppWorkflow ") - return err1, http.StatusInternalServerError - } handler.logger.Errorw("err in saving ci pipelines", err, "appId", appId) return err, http.StatusInternalServerError } //Creating CI pipeline ends //Creating CD pipeline starts - err = handler.createCdPipelines(ctx, appId, userId, workflowId, ciPipeline.Id, workflow.CdPipelines) + err = handler.createCdPipelines(ctx, appId, userId, workflowId, ciPipelineId, workflow.CdPipelines) if err != nil { - ciPipelineDeleteRequest := &bean.CiPatchRequest{ - AppId: appId, - UserId: userId, - Action: bean.DELETE, - CiPipeline: ciPipeline, - } - _, err1 := handler.pipelineBuilder.PatchCiPipeline(ciPipelineDeleteRequest) - if err1 != nil { - handler.logger.Errorw("err in deleting ci pipeline in DeleteApp", "err", err, "payload", ciPipelineDeleteRequest) - return err1, http.StatusInternalServerError - } - err1 = handler.appWorkflowService.DeleteAppWorkflow(workflowId, userId) - if err1 != nil { - handler.logger.Errorw("service err, DeleteAppWorkflow ") - return err1, http.StatusInternalServerError - } handler.logger.Errorw("err in saving cd pipelines", err, "appId", appId) return err, http.StatusInternalServerError } @@ -1616,13 +1590,13 @@ func (handler CoreAppRestHandlerImpl) createWorkflowInDb(workflowName string, ap return savedAppWf.Id, nil } -func (handler CoreAppRestHandlerImpl) createCiPipeline(appId int, userId int32, workflowId int, ciPipelineData *appBean.CiPipelineDetails) (*bean.CiPipeline, error) { +func (handler CoreAppRestHandlerImpl) createCiPipeline(appId int, userId int32, workflowId int, ciPipelineData *appBean.CiPipelineDetails) (int, error) { // if ci pipeline is of external type, then throw error as we are not supporting it as of now if ciPipelineData.ParentCiPipeline == 0 && ciPipelineData.ParentAppId == 0 && ciPipelineData.IsExternal { err := errors.New("external ci pipeline creation is not supported yet") handler.logger.Error("external ci pipeline creation is not supported yet") - return nil, err + return 0, err } // build ci pipeline materials starts @@ -1639,13 +1613,13 @@ func (handler CoreAppRestHandlerImpl) createCiPipeline(appId int, userId int32, } if err != nil { handler.logger.Errorw("service err, FindByAppIdAndCheckoutPath in CreateWorkflows", "err", err, "appId", appId) - return nil, err + return 0, err } if gitMaterial == nil { err = errors.New("gitMaterial is nil") handler.logger.Errorw("gitMaterial is nil", "checkoutPath", ciMaterial.CheckoutPath) - return nil, err + return 0, err } ciMaterialRequest := &bean.CiMaterial{ @@ -1690,10 +1664,10 @@ func (handler CoreAppRestHandlerImpl) createCiPipeline(appId int, userId int32, res, err := handler.pipelineBuilder.PatchCiPipeline(ciPipelineRequest) if err != nil { handler.logger.Errorw("service err, PatchCiPipelines", "err", err, "appId", appId) - return nil, err + return 0, err } - return res.CiPipelines[0], nil + return res.CiPipelines[0].Id, nil } func (handler CoreAppRestHandlerImpl) createCdPipelines(ctx context.Context, appId int, userId int32, workflowId int, ciPipelineId int, cdPipelines []*appBean.CdPipelineDetails) error { diff --git a/api/restHandler/PipelineHistoryRestHandler.go b/api/restHandler/PipelineHistoryRestHandler.go index a0e3edcd37..376e88067e 100644 --- a/api/restHandler/PipelineHistoryRestHandler.go +++ b/api/restHandler/PipelineHistoryRestHandler.go @@ -6,7 +6,6 @@ import ( history2 "github.com/devtron-labs/devtron/pkg/pipeline/history" "github.com/devtron-labs/devtron/pkg/user" "github.com/devtron-labs/devtron/pkg/user/casbin" - "github.com/devtron-labs/devtron/util" "github.com/devtron-labs/devtron/util/rbac" "github.com/gorilla/mux" "go.uber.org/zap" @@ -210,11 +209,7 @@ func (handler *PipelineHistoryRestHandlerImpl) FetchDeployedHistoryComponentDeta //RBAC END //checking if user has admin access userHasAdminAccess := handler.enforcer.Enforce(token, casbin.ResourceApplications, casbin.ActionUpdate, resourceName) - isSuperAdmin, _ := handler.userAuthService.IsSuperAdmin(int(userId)) - - ctx := r.Context() - ctx = util.SetSuperAdminInContext(ctx, isSuperAdmin) - res, err := handler.deployedConfigurationHistoryService.GetDeployedHistoryComponentDetail(ctx, pipelineId, id, historyComponent, historyComponentName, userHasAdminAccess) + res, err := handler.deployedConfigurationHistoryService.GetDeployedHistoryComponentDetail(pipelineId, id, historyComponent, historyComponentName, userHasAdminAccess) if err != nil { handler.logger.Errorw("service err, GetDeployedHistoryComponentDetail", "err", err, "pipelineId", pipelineId, "id", id) common.WriteJsonResp(w, err, nil, http.StatusInternalServerError) @@ -254,10 +249,7 @@ func (handler *PipelineHistoryRestHandlerImpl) GetAllDeployedConfigurationHistor //RBAC END //checking if user has admin access userHasAdminAccess := handler.enforcer.Enforce(token, casbin.ResourceApplications, casbin.ActionUpdate, resourceName) - isSuperAdmin, _ := handler.userAuthService.IsSuperAdmin(int(userId)) - ctx := r.Context() - ctx = util.SetSuperAdminInContext(ctx, isSuperAdmin) - res, err := handler.deployedConfigurationHistoryService.GetAllDeployedConfigurationByPipelineIdAndLatestWfrId(ctx, pipelineId, userHasAdminAccess) + res, err := handler.deployedConfigurationHistoryService.GetAllDeployedConfigurationByPipelineIdAndLatestWfrId(pipelineId, userHasAdminAccess) if err != nil { handler.logger.Errorw("service err, GetAllDeployedConfigurationByPipelineIdAndLatestWfrId", "err", err, "pipelineId", pipelineId) common.WriteJsonResp(w, err, nil, http.StatusInternalServerError) @@ -302,13 +294,9 @@ func (handler *PipelineHistoryRestHandlerImpl) GetAllDeployedConfigurationHistor return } //RBAC END - - isSuperAdmin, _ := handler.userAuthService.IsSuperAdmin(int(userId)) - ctx := r.Context() - ctx = util.SetSuperAdminInContext(ctx, isSuperAdmin) //checking if user has admin access userHasAdminAccess := handler.enforcer.Enforce(token, casbin.ResourceApplications, casbin.ActionUpdate, resourceName) - res, err := handler.deployedConfigurationHistoryService.GetAllDeployedConfigurationByPipelineIdAndWfrId(ctx, pipelineId, wfrId, userHasAdminAccess) + res, err := handler.deployedConfigurationHistoryService.GetAllDeployedConfigurationByPipelineIdAndWfrId(pipelineId, wfrId, userHasAdminAccess) if err != nil { handler.logger.Errorw("service err, GetAllDeployedConfigurationByPipelineIdAndWfrId", "err", err, "pipelineId", pipelineId, "wfrId", wfrId) common.WriteJsonResp(w, err, nil, http.StatusInternalServerError) diff --git a/api/restHandler/PipelineTriggerRestHandler.go b/api/restHandler/PipelineTriggerRestHandler.go index bd6235c478..f5344bff68 100644 --- a/api/restHandler/PipelineTriggerRestHandler.go +++ b/api/restHandler/PipelineTriggerRestHandler.go @@ -21,8 +21,6 @@ import ( "context" "encoding/json" "fmt" - "github.com/devtron-labs/devtron/pkg/app" - "github.com/devtron-labs/devtron/util" "github.com/gorilla/mux" "go.opentelemetry.io/otel" @@ -31,6 +29,7 @@ import ( "github.com/devtron-labs/devtron/api/bean" "github.com/devtron-labs/devtron/api/restHandler/common" + "github.com/devtron-labs/devtron/pkg/app" "github.com/devtron-labs/devtron/pkg/deploymentGroup" "github.com/devtron-labs/devtron/pkg/pipeline" "github.com/devtron-labs/devtron/pkg/team" @@ -353,12 +352,9 @@ func (handler PipelineTriggerRestHandlerImpl) GetAllLatestDeploymentConfiguratio return } //RBAC END - isSuperAdmin, _ := handler.userAuthService.IsSuperAdmin(int(userId)) - ctx := r.Context() - ctx = util.SetSuperAdminInContext(ctx, isSuperAdmin) //checking if user has admin access userHasAdminAccess := handler.enforcer.Enforce(token, casbin.ResourceApplications, casbin.ActionUpdate, resourceName) - allDeploymentconfig, err := handler.deploymentConfigService.GetLatestDeploymentConfigurationByPipelineId(ctx, pipelineId, userHasAdminAccess) + allDeploymentconfig, err := handler.deploymentConfigService.GetLatestDeploymentConfigurationByPipelineId(pipelineId, userHasAdminAccess) if err != nil { handler.logger.Errorw("error in getting latest deployment config, GetAllDeployedConfigurationHistoryForSpecificWfrIdForPipeline", "err", err, "pipelineId", pipelineId) common.WriteJsonResp(w, err, nil, http.StatusInternalServerError) diff --git a/api/restHandler/app/BuildPipelineRestHandler.go b/api/restHandler/app/BuildPipelineRestHandler.go index ea5a2342a7..59deb01c5e 100644 --- a/api/restHandler/app/BuildPipelineRestHandler.go +++ b/api/restHandler/app/BuildPipelineRestHandler.go @@ -13,6 +13,7 @@ import ( "github.com/devtron-labs/devtron/internal/sql/repository/helper" "github.com/devtron-labs/devtron/internal/sql/repository/pipelineConfig" "github.com/devtron-labs/devtron/internal/util" + "github.com/devtron-labs/devtron/pkg" "github.com/devtron-labs/devtron/pkg/bean" "github.com/devtron-labs/devtron/pkg/pipeline" bean1 "github.com/devtron-labs/devtron/pkg/pipeline/bean" @@ -593,7 +594,7 @@ func (handler PipelineConfigRestHandlerImpl) TriggerCiPipeline(w http.ResponseWr //RBAC ENDS response := make(map[string]string) resp, err := handler.ciHandler.HandleCIManual(ciTriggerRequest) - if errors.Is(err, bean1.ErrImagePathInUse) { + if errors.Is(err, pkg.ErrImagePathInUse) { handler.Logger.Errorw("service err duplicate image tag, TriggerCiPipeline", "err", err, "payload", ciTriggerRequest) common.WriteJsonResp(w, err, response, http.StatusConflict) return diff --git a/api/restHandler/app/DeploymentPipelineRestHandler.go b/api/restHandler/app/DeploymentPipelineRestHandler.go index 72d9f311a9..1d614ff2a3 100644 --- a/api/restHandler/app/DeploymentPipelineRestHandler.go +++ b/api/restHandler/app/DeploymentPipelineRestHandler.go @@ -19,7 +19,6 @@ import ( resourceGroup2 "github.com/devtron-labs/devtron/pkg/resourceGroup" "github.com/devtron-labs/devtron/pkg/resourceQualifiers" "github.com/devtron-labs/devtron/pkg/user/casbin" - util2 "github.com/devtron-labs/devtron/util" "github.com/go-pg/pg" "github.com/gorilla/mux" "go.opentelemetry.io/otel" @@ -929,21 +928,10 @@ func (handler PipelineConfigRestHandlerImpl) GetDeploymentTemplateData(w http.Re common.WriteJsonResp(w, err, "unauthorized user", http.StatusForbidden) return } - - userId, err := handler.userAuthService.GetLoggedInUser(r) - if userId == 0 || err != nil { - handler.Logger.Errorw("request err, userId", "err", err, "payload", userId) - common.WriteJsonResp(w, err, "Unauthorized User", http.StatusUnauthorized) - return - } - isSuperAdmin, _ := handler.userAuthService.IsSuperAdmin(int(userId)) - //RBAC enforcer Ends ctx, cancel := context.WithTimeout(r.Context(), 60*time.Second) - ctx = util2.SetSuperAdminInContext(ctx, isSuperAdmin) defer cancel() - //TODO fix resp, err := handler.deploymentTemplateService.GetDeploymentTemplate(ctx, request) if err != nil { handler.Logger.Errorw("service err, GetEnvConfigOverride", "err", err, "payload", request) @@ -951,6 +939,7 @@ func (handler PipelineConfigRestHandlerImpl) GetDeploymentTemplateData(w http.Re return } common.WriteJsonResp(w, nil, resp, http.StatusOK) + } func (handler PipelineConfigRestHandlerImpl) GetDeploymentTemplate(w http.ResponseWriter, r *http.Request) { diff --git a/api/router/pubsub/ApplicationStatusHandler.go b/api/router/pubsub/ApplicationStatusHandler.go index 94378a3639..cd27a60b6e 100644 --- a/api/router/pubsub/ApplicationStatusHandler.go +++ b/api/router/pubsub/ApplicationStatusHandler.go @@ -22,7 +22,6 @@ import ( "encoding/json" "errors" "fmt" - "github.com/devtron-labs/devtron/pkg/app" "time" "github.com/devtron-labs/devtron/internal/sql/repository/pipelineConfig" @@ -33,6 +32,7 @@ import ( v1alpha12 "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1" pubsub "github.com/devtron-labs/common-lib/pubsub-lib" + "github.com/devtron-labs/devtron/pkg/app" "github.com/devtron-labs/devtron/pkg/appStore/deployment/service" "github.com/devtron-labs/devtron/pkg/pipeline" "github.com/go-pg/pg" diff --git a/charts/devtron/Chart.yaml b/charts/devtron/Chart.yaml index ef73f6ed80..4dff6167c4 100644 --- a/charts/devtron/Chart.yaml +++ b/charts/devtron/Chart.yaml @@ -1,6 +1,6 @@ apiVersion: v2 name: devtron-operator -appVersion: 0.6.23 +appVersion: 0.6.22 description: Chart to configure and install Devtron. Devtron is a Kubernetes Orchestration system. keywords: - Devtron @@ -11,7 +11,7 @@ keywords: - argocd - Hyperion engine: gotpl -version: 0.22.62 +version: 0.22.61 sources: - https://github.com/devtron-labs/charts dependencies: diff --git a/charts/devtron/devtron-bom.yaml b/charts/devtron/devtron-bom.yaml index 0af4a2b095..d106022999 100644 --- a/charts/devtron/devtron-bom.yaml +++ b/charts/devtron/devtron-bom.yaml @@ -9,38 +9,32 @@ global: runAsNonRoot: true installer: - release: "v0.6.23" + release: "v0.6.22" image: "quay.io/devtron/inception" tag: "44b30917-185-13275" components: dashboard: - image: "quay.io/devtron/dashboard:ba04f4f4-325-18824" + image: "quay.io/devtron/dashboard:12717798-325-16265" config: extraConfigs: USE_V2: "true" ENABLE_BUILD_CONTEXT: "true" ENABLE_RESTART_WORKLOAD: "true" HIDE_EXCLUDE_INCLUDE_GIT_COMMITS: "false" - ENABLE_SCOPED_VARIABLES: "true" - ENABLE_CI_JOB: "true" devtron: - image: "quay.io/devtron/hyperion:65577374-280-18804" - cicdImage: "quay.io/devtron/devtron:50ac85e6-434-18829" + image: "quay.io/devtron/hyperion:3c1ba1ad-280-16262" + cicdImage: "quay.io/devtron/devtron:3c1ba1ad-434-16260" customOverrides: - DEFAULT_CI_IMAGE: "quay.io/devtron/ci-runner:ad3af321-138-18662" + DEFAULT_CI_IMAGE: "quay.io/devtron/ci-runner:d8d774c3-138-16238" argocdDexServer: image: "ghcr.io/dexidp/dex:v2.30.2" initContainer: authenticator: "quay.io/devtron/authenticator:e414faff-393-13273" kubelink: - image: "quay.io/devtron/kubelink:25052130-318-18795" + image: "quay.io/devtron/kubelink:aefc1baf-318-16208" configs: ENABLE_HELM_RELEASE_CACHE: "true" - MANIFEST_FETCH_BATCH_SIZE: "2" - NATS_MSG_PROCESSING_BATCH_SIZE: "1" - NATS_SERVER_HOST: nats://devtron-nats.devtroncd:4222 - RUN_HELM_INSTALL_IN_ASYNC_MODE: "true" PG_ADDR: postgresql-postgresql.devtroncd PG_DATABASE: orchestrator PG_LOG_QUERY: "true" @@ -53,34 +47,7 @@ components: image: "quay.io/devtron/postgres:11.9.0-debian-10-r26" armImage: "quay.io/devtron/postgres:11.9" gitsensor: - image: "quay.io/devtron/git-sensor:b6c3ea0e-200-16327" - imagePullPolicy: "IfNotPresent" - serviceMonitor: - enabled: false - persistence: - volumeSize: 2Gi - configs: - PG_ADDR: postgresql-postgresql.devtroncd - PG_USER: postgres - COMMIT_STATS_TIMEOUT_IN_SEC: "2" - ENABLE_FILE_STATS: "true" - dbconfig: - secretName: postgresql-postgresql - keyName: postgresql-password - lens: - image: "quay.io/devtron/lens:8803028b-333-16178" - imagePullPolicy: IfNotPresent - configs: - GIT_SENSOR_PROTOCOL: GRPC - GIT_SENSOR_URL: git-sensor-service.devtroncd:90 - NATS_SERVER_HOST: nats://devtron-nats.devtroncd:4222 - PG_ADDR: postgresql-postgresql.devtroncd - PG_PORT: "5432" - PG_USER: postgres - PG_DATABASE: lens - dbconfig: - secretName: postgresql-postgresql - keyName: postgresql-password + image: "quay.io/devtron/git-sensor:46b8f0f1-200-16195" migrator: image: "quay.io/devtron/migrator:v4.16.2" envVars: @@ -99,14 +66,13 @@ argo-cd: repository: quay.io/argoproj/argocd tag: "v2.5.2" imagePullPolicy: IfNotPresent - -workflowController: - image: "quay.io/argoproj/workflow-controller:v3.4.3" - executorImage: "quay.io/argoproj/argoexec:v3.4.3" + +lens: + image: "quay.io/devtron/lens:8803028b-333-16178" security: imageScanner: image: "quay.io/devtron/image-scanner:ea03b0af-334-15158" notifier: - image: "quay.io/devtron/notifier:d71bcbcd-372-18717" + image: "quay.io/devtron/notifier:d9c72180-372-14306" diff --git a/charts/devtron/templates/NOTES.txt b/charts/devtron/templates/NOTES.txt index 0150709137..ddf0fea2f9 100644 --- a/charts/devtron/templates/NOTES.txt +++ b/charts/devtron/templates/NOTES.txt @@ -55,5 +55,3 @@ Please wait for ~1 minute before running any of the following commands. 2. "Applied" means installation is successful. {{- end }} - -Facing issues? Reach out to our team on Discord https://discord.devtron.ai for immediate assistance! diff --git a/charts/devtron/templates/gitsensor.yaml b/charts/devtron/templates/gitsensor.yaml deleted file mode 100644 index 356dec9ee6..0000000000 --- a/charts/devtron/templates/gitsensor.yaml +++ /dev/null @@ -1,160 +0,0 @@ -{{- if $.Values.installer.modules }} -{{- if has "cicd" $.Values.installer.modules }} -{{- with .Values.components.gitsensor }} -apiVersion: v1 -kind: Secret -metadata: - name: git-sensor-secret - labels: - app: git-sensor - release: devtron -{{- if .secrets }} -data: -{{- range $k, $v := .secrets }} - {{ $k }}: {{ $v | b64enc }} -{{- end }} -{{- end }} ---- -apiVersion: v1 -kind: ConfigMap -metadata: - name: git-sensor-cm - labels: - app: git-sensor - release: devtron -{{- if .configs }} -data: -{{ toYaml .configs | indent 2 }} -{{- end }} - ---- -# Source: gitsensor/templates/generic.yaml -apiVersion: v1 -kind: Service -metadata: - name: git-sensor-service - labels: - app: git-sensor - release: devtron -spec: - ports: - - name: sensor - port: 80 - protocol: TCP - targetPort: 8080 - - name: grpc - port: 90 - protocol: TCP - targetPort: 8081 - selector: - app: git-sensor ---- -# Source: gitsensor/templates/generic.yaml -apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: git-sensor - labels: - release: devtron - app: git-sensor -spec: - selector: - matchLabels: - app: git-sensor # has to match .spec.template.metadata.labels - serviceName: git-sensor - replicas: 1 # by default is 1 - template: - metadata: - labels: - app: git-sensor - spec: - terminationGracePeriodSeconds: 10 - securityContext: - runAsGroup: 1000 - runAsUser: 1000 - initContainers: - - command: - - /bin/sh - - -c - - mkdir -p /git-base/ssh-keys && chown -R devtron:devtron /git-base && chmod 777 /git-base/ssh-keys - image: {{ .image }} - imagePullPolicy: IfNotPresent - name: chown-git-base - resources: {} - securityContext: - runAsUser: 0 - terminationMessagePath: /dev/termination-log - terminationMessagePolicy: File - volumeMounts: - - mountPath: /git-base/ - name: git-volume - containers: - - name: git-sensor - image: {{ .image }} - {{- if .imagePullPolicy }} - imagePullPolicy: {{ .imagePullPolicy }} - {{- end }} - securityContext: - allowPrivilegeEscalation: false - runAsUser: 1000 - runAsNonRoot: true - ports: - - containerPort: 8080 - name: sensor - - containerPort: 8081 - name: grpc - volumeMounts: - - name: git-volume - mountPath: /git-base/ - env: - - name: DEVTRON_APP_NAME - value: git-sensor - - name: POD_NAME - valueFrom: - fieldRef: - fieldPath: metadata.name - {{- if $.Values.components.gitsensor.dbconfig }} - - name: PG_PASSWORD - valueFrom: - secretKeyRef: - name: {{ .dbconfig.secretName }} - key: {{ .dbconfig.keyName }} - {{- end }} - envFrom: - - secretRef: - name: git-sensor-secret - - configMapRef: - name: git-sensor-cm - {{- if .resources }} - resources: - {{- toYaml .resources | nindent 12 }} - {{- end }} - volumeClaimTemplates: - - metadata: - name: git-volume - spec: - accessModes: [ "ReadWriteOnce" ] - resources: - requests: - storage: {{ .persistence.volumeSize }} ---- -{{- if .serviceMonitor.enabled }} -apiVersion: monitoring.coreos.com/v1 -kind: ServiceMonitor -metadata: - name: git-sensor-sm - labels: - app: git-sensor - kind: Prometheus - release: devtron -spec: - endpoints: - - port: app - path: /metrics - selector: - matchLabels: - app: git-sensor -{{- end }} -{{- end }} -{{- end }} -{{- end }} \ No newline at end of file diff --git a/charts/devtron/templates/lens.yaml b/charts/devtron/templates/lens.yaml deleted file mode 100644 index 76328fb7dc..0000000000 --- a/charts/devtron/templates/lens.yaml +++ /dev/null @@ -1,115 +0,0 @@ -{{- if $.Values.installer.modules }} -{{- if has "cicd" $.Values.installer.modules }} -{{- with .Values.components.lens }} -apiVersion: v1 -kind: Secret -metadata: - name: lens-secret - labels: - app: lens - release: devtron -{{- if .secrets }} -data: -{{- range $k, $v := .secrets }} - {{ $k }}: {{ $v | b64enc }} -{{- end }} -{{- end }} ---- -apiVersion: v1 -kind: ConfigMap -metadata: - name: lens-cm - labels: - app: lens - release: devtron -{{- if .configs }} -data: -{{ toYaml .configs | indent 2 }} -{{- end }} ---- -apiVersion: v1 -kind: Service -metadata: - name: lens-service - labels: - app: lens - release: devtron -spec: - type: ClusterIP - ports: - - port: 80 - targetPort: app - protocol: TCP - name: app - selector: - app: lens ---- -# Source: lens/templates/deployment.yaml -apiVersion: apps/v1 -kind: Deployment -metadata: - name: lens - labels: - app: lens - release: devtron -spec: - selector: - matchLabels: - app: lens - release: devtron - replicas: 1 - minReadySeconds: 60 - template: - metadata: - labels: - app: lens - release: devtron - spec: - terminationGracePeriodSeconds: 30 - restartPolicy: Always - {{- if and $.Values.global $.Values.global.podSecurityContext }} - securityContext: -{{- toYaml $.Values.global.podSecurityContext | nindent 8 }} - {{- end }} - containers: - - name: lens - image: {{ .image }} - {{- if .imagePullPolicy }} - imagePullPolicy: {{ .imagePullPolicy }} - {{- end }} - {{- if and $.Values.global $.Values.global.containerSecurityContext }} - securityContext: -{{- toYaml $.Values.global.containerSecurityContext | nindent 12 }} - {{- end }} - ports: - - name: app - containerPort: 8080 - protocol: TCP - env: - - name: DEVTRON_APP_NAME - value: lens - - name: POD_NAME - valueFrom: - fieldRef: - fieldPath: metadata.name - {{- if .dbconfig }} - - name: PG_PASSWORD - valueFrom: - secretKeyRef: - name: {{ .dbconfig.secretName }} - key: {{ .dbconfig.keyName }} - {{- end }} - envFrom: - - configMapRef: - name: lens-cm - - secretRef: - name: lens-secret - {{- if .resources }} - resources: - {{- toYaml .resources | nindent 12 }} - {{- end }} - volumeMounts: [] - revisionHistoryLimit: 3 -{{- end }} -{{- end }} -{{- end }} \ No newline at end of file diff --git a/charts/devtron/templates/migrator.yaml b/charts/devtron/templates/migrator.yaml index f663ee208e..6a9b014380 100644 --- a/charts/devtron/templates/migrator.yaml +++ b/charts/devtron/templates/migrator.yaml @@ -270,7 +270,7 @@ spec: - /bin/sh - -c - cp -r sql /shared/ - image: {{ $.Values.components.lens.image }} + image: {{ $.Values.lens.image }} name: init-lens {{- if and $.Values.global $.Values.global.containerSecurityContext }} securityContext: diff --git a/charts/devtron/templates/workflow.yaml b/charts/devtron/templates/workflow.yaml index aeb8b66196..1548e27533 100644 --- a/charts/devtron/templates/workflow.yaml +++ b/charts/devtron/templates/workflow.yaml @@ -35,8 +35,6 @@ kind: CustomResourceDefinition metadata: name: workflows.argoproj.io spec: - conversion: - strategy: None group: argoproj.io names: kind: Workflow @@ -69,12 +67,12 @@ spec: type: object spec: type: object - x-kubernetes-map-type: atomic x-kubernetes-preserve-unknown-fields: true + x-kubernetes-map-type: atomic status: type: object - x-kubernetes-map-type: atomic x-kubernetes-preserve-unknown-fields: true + x-kubernetes-map-type: atomic required: - metadata - spec @@ -88,8 +86,6 @@ kind: CustomResourceDefinition metadata: name: workflowtemplates.argoproj.io spec: - conversion: - strategy: None group: argoproj.io names: kind: WorkflowTemplate @@ -120,799 +116,6 @@ spec: served: true storage: true --- -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - name: workfloweventbindings.argoproj.io -spec: - conversion: - strategy: None - group: argoproj.io - names: - kind: WorkflowEventBinding - listKind: WorkflowEventBindingList - plural: workfloweventbindings - shortNames: - - wfeb - singular: workfloweventbinding - scope: Namespaced - versions: - - name: v1alpha1 - schema: - openAPIV3Schema: - properties: - apiVersion: - type: string - kind: - type: string - metadata: - type: object - spec: - type: object - x-kubernetes-map-type: atomic - x-kubernetes-preserve-unknown-fields: true - required: - - metadata - - spec - type: object - served: true - storage: true ---- -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - name: workflowtasksets.argoproj.io -spec: - conversion: - strategy: None - group: argoproj.io - names: - kind: WorkflowTaskSet - listKind: WorkflowTaskSetList - plural: workflowtasksets - shortNames: - - wfts - singular: workflowtaskset - scope: Namespaced - versions: - - name: v1alpha1 - schema: - openAPIV3Schema: - properties: - apiVersion: - type: string - kind: - type: string - metadata: - type: object - spec: - type: object - x-kubernetes-map-type: atomic - x-kubernetes-preserve-unknown-fields: true - status: - type: object - x-kubernetes-map-type: atomic - x-kubernetes-preserve-unknown-fields: true - required: - - metadata - - spec - type: object - served: true - storage: true - subresources: - status: {} ---- -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - name: workflowtaskresults.argoproj.io -spec: - group: argoproj.io - names: - kind: WorkflowTaskResult - listKind: WorkflowTaskResultList - plural: workflowtaskresults - singular: workflowtaskresult - scope: Namespaced - versions: - - name: v1alpha1 - schema: - openAPIV3Schema: - properties: - apiVersion: - type: string - kind: - type: string - message: - type: string - metadata: - type: object - outputs: - properties: - artifacts: - items: - properties: - archive: - properties: - none: - type: object - tar: - properties: - compressionLevel: - format: int32 - type: integer - type: object - zip: - type: object - type: object - archiveLogs: - type: boolean - artifactGC: - properties: - podMetadata: - properties: - annotations: - additionalProperties: - type: string - type: object - labels: - additionalProperties: - type: string - type: object - type: object - serviceAccountName: - type: string - strategy: - enum: - - "" - - OnWorkflowCompletion - - OnWorkflowDeletion - - Never - type: string - type: object - artifactory: - properties: - passwordSecret: - properties: - key: - type: string - name: - type: string - optional: - type: boolean - required: - - key - type: object - url: - type: string - usernameSecret: - properties: - key: - type: string - name: - type: string - optional: - type: boolean - required: - - key - type: object - required: - - url - type: object - azure: - properties: - accountKeySecret: - properties: - key: - type: string - name: - type: string - optional: - type: boolean - required: - - key - type: object - blob: - type: string - container: - type: string - endpoint: - type: string - useSDKCreds: - type: boolean - required: - - blob - - container - - endpoint - type: object - deleted: - type: boolean - from: - type: string - fromExpression: - type: string - gcs: - properties: - bucket: - type: string - key: - type: string - serviceAccountKeySecret: - properties: - key: - type: string - name: - type: string - optional: - type: boolean - required: - - key - type: object - required: - - key - type: object - git: - properties: - branch: - type: string - depth: - format: int64 - type: integer - disableSubmodules: - type: boolean - fetch: - items: - type: string - type: array - insecureIgnoreHostKey: - type: boolean - passwordSecret: - properties: - key: - type: string - name: - type: string - optional: - type: boolean - required: - - key - type: object - repo: - type: string - revision: - type: string - singleBranch: - type: boolean - sshPrivateKeySecret: - properties: - key: - type: string - name: - type: string - optional: - type: boolean - required: - - key - type: object - usernameSecret: - properties: - key: - type: string - name: - type: string - optional: - type: boolean - required: - - key - type: object - required: - - repo - type: object - globalName: - type: string - hdfs: - properties: - addresses: - items: - type: string - type: array - force: - type: boolean - hdfsUser: - type: string - krbCCacheSecret: - properties: - key: - type: string - name: - type: string - optional: - type: boolean - required: - - key - type: object - krbConfigConfigMap: - properties: - key: - type: string - name: - type: string - optional: - type: boolean - required: - - key - type: object - krbKeytabSecret: - properties: - key: - type: string - name: - type: string - optional: - type: boolean - required: - - key - type: object - krbRealm: - type: string - krbServicePrincipalName: - type: string - krbUsername: - type: string - path: - type: string - required: - - path - type: object - http: - properties: - auth: - properties: - basicAuth: - properties: - passwordSecret: - properties: - key: - type: string - name: - type: string - optional: - type: boolean - required: - - key - type: object - usernameSecret: - properties: - key: - type: string - name: - type: string - optional: - type: boolean - required: - - key - type: object - type: object - clientCert: - properties: - clientCertSecret: - properties: - key: - type: string - name: - type: string - optional: - type: boolean - required: - - key - type: object - clientKeySecret: - properties: - key: - type: string - name: - type: string - optional: - type: boolean - required: - - key - type: object - type: object - oauth2: - properties: - clientIDSecret: - properties: - key: - type: string - name: - type: string - optional: - type: boolean - required: - - key - type: object - clientSecretSecret: - properties: - key: - type: string - name: - type: string - optional: - type: boolean - required: - - key - type: object - endpointParams: - items: - properties: - key: - type: string - value: - type: string - required: - - key - type: object - type: array - scopes: - items: - type: string - type: array - tokenURLSecret: - properties: - key: - type: string - name: - type: string - optional: - type: boolean - required: - - key - type: object - type: object - type: object - headers: - items: - properties: - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - url: - type: string - required: - - url - type: object - mode: - format: int32 - type: integer - name: - type: string - optional: - type: boolean - oss: - properties: - accessKeySecret: - properties: - key: - type: string - name: - type: string - optional: - type: boolean - required: - - key - type: object - bucket: - type: string - createBucketIfNotPresent: - type: boolean - endpoint: - type: string - key: - type: string - lifecycleRule: - properties: - markDeletionAfterDays: - format: int32 - type: integer - markInfrequentAccessAfterDays: - format: int32 - type: integer - type: object - secretKeySecret: - properties: - key: - type: string - name: - type: string - optional: - type: boolean - required: - - key - type: object - securityToken: - type: string - required: - - key - type: object - path: - type: string - raw: - properties: - data: - type: string - required: - - data - type: object - recurseMode: - type: boolean - s3: - properties: - accessKeySecret: - properties: - key: - type: string - name: - type: string - optional: - type: boolean - required: - - key - type: object - bucket: - type: string - createBucketIfNotPresent: - properties: - objectLocking: - type: boolean - type: object - encryptionOptions: - properties: - enableEncryption: - type: boolean - kmsEncryptionContext: - type: string - kmsKeyId: - type: string - serverSideCustomerKeySecret: - properties: - key: - type: string - name: - type: string - optional: - type: boolean - required: - - key - type: object - type: object - endpoint: - type: string - insecure: - type: boolean - key: - type: string - region: - type: string - roleARN: - type: string - secretKeySecret: - properties: - key: - type: string - name: - type: string - optional: - type: boolean - required: - - key - type: object - useSDKCreds: - type: boolean - type: object - subPath: - type: string - required: - - name - type: object - type: array - exitCode: - type: string - parameters: - items: - properties: - default: - type: string - description: - type: string - enum: - items: - type: string - type: array - globalName: - type: string - name: - type: string - value: - type: string - valueFrom: - properties: - configMapKeyRef: - properties: - key: - type: string - name: - type: string - optional: - type: boolean - required: - - key - type: object - default: - type: string - event: - type: string - expression: - type: string - jqFilter: - type: string - jsonPath: - type: string - parameter: - type: string - path: - type: string - supplied: - type: object - type: object - required: - - name - type: object - type: array - result: - type: string - type: object - phase: - type: string - progress: - type: string - required: - - metadata - type: object - served: true - storage: true ---- -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - name: workflowartifactgctasks.argoproj.io -spec: - conversion: - strategy: None - group: argoproj.io - names: - kind: WorkflowArtifactGCTask - listKind: WorkflowArtifactGCTaskList - plural: workflowartifactgctasks - shortNames: - - wfat - singular: workflowartifactgctask - scope: Namespaced - versions: - - name: v1alpha1 - schema: - openAPIV3Schema: - properties: - apiVersion: - type: string - kind: - type: string - metadata: - type: object - spec: - type: object - x-kubernetes-map-type: atomic - x-kubernetes-preserve-unknown-fields: true - status: - type: object - x-kubernetes-map-type: atomic - x-kubernetes-preserve-unknown-fields: true - required: - - metadata - - spec - type: object - served: true - storage: true - subresources: - status: {} ---- -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - name: cronworkflows.argoproj.io -spec: - conversion: - strategy: None - group: argoproj.io - names: - kind: CronWorkflow - listKind: CronWorkflowList - plural: cronworkflows - shortNames: - - cwf - - cronwf - singular: cronworkflow - scope: Namespaced - versions: - - name: v1alpha1 - schema: - openAPIV3Schema: - properties: - apiVersion: - type: string - kind: - type: string - metadata: - type: object - spec: - type: object - x-kubernetes-map-type: atomic - x-kubernetes-preserve-unknown-fields: true - status: - type: object - x-kubernetes-map-type: atomic - x-kubernetes-preserve-unknown-fields: true - required: - - metadata - - spec - type: object - served: true - storage: true ---- -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - name: clusterworkflowtemplates.argoproj.io -spec: - conversion: - strategy: None - group: argoproj.io - names: - kind: ClusterWorkflowTemplate - listKind: ClusterWorkflowTemplateList - plural: clusterworkflowtemplates - shortNames: - - clusterwftmpl - - cwft - singular: clusterworkflowtemplate - scope: Cluster - versions: - - name: v1alpha1 - schema: - openAPIV3Schema: - properties: - apiVersion: - type: string - kind: - type: string - metadata: - type: object - spec: - type: object - x-kubernetes-map-type: atomic - x-kubernetes-preserve-unknown-fields: true - required: - - metadata - - spec - type: object - served: true - storage: true ---- apiVersion: v1 kind: ServiceAccount metadata: @@ -1025,6 +228,15 @@ rules: - update - patch - delete +- apiGroups: + - argoproj.io + resources: + - workflowtemplates + - workflowtemplates/finalizers + verbs: + - get + - list + - watch - apiGroups: - "" resources: @@ -1037,10 +249,8 @@ rules: - "" resources: - persistentvolumeclaims - - persistentvolumeclaims/finalizers verbs: - create - - update - delete - get - apiGroups: @@ -1048,9 +258,6 @@ rules: resources: - workflows - workflows/finalizers - - workflowtasksets - - workflowtasksets/finalizers - - workflowartifactgctasks verbs: - get - list @@ -1058,27 +265,15 @@ rules: - update - patch - delete - - create - apiGroups: - argoproj.io resources: - workflowtemplates - workflowtemplates/finalizers - - clusterworkflowtemplates - - clusterworkflowtemplates/finalizers verbs: - get - list - watch -- apiGroups: - - argoproj.io - resources: - - workflowtaskresults - - workflowtaskresults/finalizers - verbs: - - list - - watch - - deletecollection - apiGroups: - "" resources: @@ -1146,9 +341,7 @@ data: parallelism: 50 artifactRepository: archiveLogs: false - {{- if not $.Values.workflowController.IMDSv2Enforced }} containerRuntimeExecutor: pns - {{- end }} executor: imagePullPolicy: Always kind: ConfigMap @@ -1175,11 +368,7 @@ spec: - --configmap - workflow-controller-configmap - --executor-image - {{- if $.Values.workflowController.IMDSv2Enforced }} - {{ $.Values.workflowController.executorImage }} - {{- else }} - - quay.io/argoproj/argoexec:v3.0.7 - {{- end }} command: - workflow-controller env: @@ -1188,15 +377,7 @@ spec: fieldRef: apiVersion: v1 fieldPath: metadata.name - {{- if $.Values.workflowController.IMDSv2Enforced }} - - name: POD_NAMES - value: v1 - {{- end }} - {{- if $.Values.workflowController.IMDSv2Enforced }} image: {{ $.Values.workflowController.image }} - {{- else }} - image: quay.io/argoproj/workflow-controller:v3.0.7 - {{- end }} name: workflow-controller {{- if $.Values.workflowController.resources }} resources: @@ -1204,4 +385,4 @@ spec: {{- end }} serviceAccountName: argo {{- end }} -{{- end }} +{{- end }} \ No newline at end of file diff --git a/charts/devtron/values.yaml b/charts/devtron/values.yaml index c9da3f93e6..6ff484957c 100644 --- a/charts/devtron/values.yaml +++ b/charts/devtron/values.yaml @@ -10,7 +10,7 @@ global: installer: repo: "devtron-labs/devtron" # For Kubernetes version < 1.16, set release: legacy. You won't be able to upgrade Devtron unless you upgrade the K8s version to 1.16 or above. - release: "v0.6.23" #You can use a branch name or a release tag name as a release, for gitee as source only "main" is supported as of now + release: "v0.6.22" #You can use a branch name or a release tag name as a release, for gitee as source only "main" is supported as of now image: quay.io/devtron/inception tag: 44b30917-185-13275 source: "github" # Available options are github and gitee @@ -55,17 +55,15 @@ components: ENABLE_BUILD_CONTEXT: "true" ENABLE_RESTART_WORKLOAD: "true" HIDE_EXCLUDE_INCLUDE_GIT_COMMITS: "false" - ENABLE_SCOPED_VARIABLES: "true" - ENABLE_CI_JOB: "true" - image: "quay.io/devtron/dashboard:ba04f4f4-325-18824" + image: "quay.io/devtron/dashboard:12717798-325-16265" imagePullPolicy: IfNotPresent devtron: - image: "quay.io/devtron/hyperion:65577374-280-18804" - cicdImage: "quay.io/devtron/devtron:50ac85e6-434-18829" + image: "quay.io/devtron/hyperion:3c1ba1ad-280-16262" + cicdImage: "quay.io/devtron/devtron:3c1ba1ad-434-16260" imagePullPolicy: IfNotPresent customOverrides: - DEFAULT_CI_IMAGE: "quay.io/devtron/ci-runner:ad3af321-138-18662" + DEFAULT_CI_IMAGE: "quay.io/devtron/ci-runner:d8d774c3-138-16238" serviceMonitor: enabled: false service: @@ -95,14 +93,10 @@ components: authenticator: "quay.io/devtron/authenticator:e414faff-393-13273" kubelink: - image: "quay.io/devtron/kubelink:25052130-318-18795" + image: "quay.io/devtron/kubelink:aefc1baf-318-16208" imagePullPolicy: IfNotPresent configs: ENABLE_HELM_RELEASE_CACHE: "true" - MANIFEST_FETCH_BATCH_SIZE: "2" - NATS_MSG_PROCESSING_BATCH_SIZE: "1" - NATS_SERVER_HOST: nats://devtron-nats.devtroncd:4222 - RUN_HELM_INSTALL_IN_ASYNC_MODE: "true" PG_ADDR: postgresql-postgresql.devtroncd PG_DATABASE: orchestrator PG_LOG_QUERY: "true" @@ -119,34 +113,7 @@ components: persistence: volumeSize: "20Gi" gitsensor: - image: "quay.io/devtron/git-sensor:b6c3ea0e-200-16327" - imagePullPolicy: "IfNotPresent" - serviceMonitor: - enabled: false - persistence: - volumeSize: 2Gi - configs: - PG_ADDR: postgresql-postgresql.devtroncd - PG_USER: postgres - COMMIT_STATS_TIMEOUT_IN_SEC: "2" - ENABLE_FILE_STATS: "true" - dbconfig: - secretName: postgresql-postgresql - keyName: postgresql-password - lens: - image: "quay.io/devtron/lens:8803028b-333-16178" - imagePullPolicy: IfNotPresent - configs: - GIT_SENSOR_PROTOCOL: GRPC - GIT_SENSOR_URL: git-sensor-service.devtroncd:90 - NATS_SERVER_HOST: nats://devtron-nats.devtroncd:4222 - PG_ADDR: postgresql-postgresql.devtroncd - PG_PORT: "5432" - PG_USER: postgres - PG_DATABASE: lens - dbconfig: - secretName: postgresql-postgresql - keyName: postgresql-password + image: "quay.io/devtron/git-sensor:46b8f0f1-200-16195" migrator: image: "quay.io/devtron/migrator:v4.16.2" envVars: @@ -327,7 +294,7 @@ security: notifier: enabled: false imagePullPolicy: IfNotPresent - image: "quay.io/devtron/notifier:d71bcbcd-372-18717" + image: "quay.io/devtron/notifier:d9c72180-372-14306" configs: CD_ENVIRONMENT: PROD DB: orchestrator @@ -351,11 +318,11 @@ minio: storage: "50Gi" # Change below values for workflow controller workflowController: - # Set this to true if you have IMDSv2 enforced or IMDSv1 and v2 on your AWS EKS cluster and false if you are using IMDSv1 with token hop limit set to 1 - IMDSv2Enforced: true - image: "quay.io/argoproj/workflow-controller:v3.4.3" - executorImage: "quay.io/argoproj/argoexec:v3.4.3" + image: "quay.io/argoproj/workflow-controller:v3.0.7" + executorImage: "quay.io/argoproj/argoexec:v3.0.7" +lens: + image: "quay.io/devtron/lens:8803028b-333-16178" # Values for grafana integration monitoring: grafana: diff --git a/client/argocdServer/ArgoClientWrapperService.go b/client/argocdServer/ArgoClientWrapperService.go deleted file mode 100644 index 93682ee20a..0000000000 --- a/client/argocdServer/ArgoClientWrapperService.go +++ /dev/null @@ -1,39 +0,0 @@ -package argocdServer - -import ( - "context" - application2 "github.com/argoproj/argo-cd/v2/pkg/apiclient/application" - "github.com/devtron-labs/devtron/client/argocdServer/application" - "github.com/devtron-labs/devtron/client/argocdServer/bean" - "go.uber.org/zap" -) - -type ArgoClientWrapperService interface { - GetArgoAppWithNormalRefresh(context context.Context, argoAppName string) error -} - -type ArgoClientWrapperServiceImpl struct { - logger *zap.SugaredLogger - acdClient application.ServiceClient -} - -func NewArgoClientWrapperServiceImpl(logger *zap.SugaredLogger, - acdClient application.ServiceClient, -) *ArgoClientWrapperServiceImpl { - return &ArgoClientWrapperServiceImpl{ - logger: logger, - acdClient: acdClient, - } -} - -func (impl *ArgoClientWrapperServiceImpl) GetArgoAppWithNormalRefresh(context context.Context, argoAppName string) error { - refreshType := bean.RefreshTypeNormal - impl.logger.Debugw("trying to normal refresh application through get ", "argoAppName", argoAppName) - _, err := impl.acdClient.Get(context, &application2.ApplicationQuery{Name: &argoAppName, Refresh: &refreshType}) - if err != nil { - impl.logger.Errorw("cannot get application with refresh", "app", argoAppName) - return err - } - impl.logger.Debugw("done getting the application with refresh with no error", "argoAppName", argoAppName) - return nil -} diff --git a/client/argocdServer/connection/Config.go b/client/argocdServer/Config.go similarity index 97% rename from client/argocdServer/connection/Config.go rename to client/argocdServer/Config.go index 54805c1cbd..d0f1333557 100644 --- a/client/argocdServer/connection/Config.go +++ b/client/argocdServer/Config.go @@ -15,7 +15,7 @@ * */ -package connection +package argocdServer import ( "github.com/caarlos0/env" diff --git a/client/argocdServer/connection/Connection.go b/client/argocdServer/Connection.go similarity index 99% rename from client/argocdServer/connection/Connection.go rename to client/argocdServer/Connection.go index 56e77c776f..c325268529 100644 --- a/client/argocdServer/connection/Connection.go +++ b/client/argocdServer/Connection.go @@ -15,7 +15,7 @@ * */ -package connection +package argocdServer import ( "context" diff --git a/client/argocdServer/connection/Tls.go b/client/argocdServer/Tls.go similarity index 99% rename from client/argocdServer/connection/Tls.go rename to client/argocdServer/Tls.go index a9d11826de..ab22560bba 100644 --- a/client/argocdServer/connection/Tls.go +++ b/client/argocdServer/Tls.go @@ -15,7 +15,7 @@ * */ -package connection +package argocdServer import ( "crypto/ecdsa" diff --git a/client/argocdServer/connection/Token.go b/client/argocdServer/Token.go similarity index 98% rename from client/argocdServer/connection/Token.go rename to client/argocdServer/Token.go index 437d3fd1d2..fb194aa50e 100644 --- a/client/argocdServer/connection/Token.go +++ b/client/argocdServer/Token.go @@ -15,7 +15,7 @@ * */ -package connection +package argocdServer import "context" diff --git a/client/argocdServer/Version.go b/client/argocdServer/Version.go index 6bea985554..ac1c4f3828 100644 --- a/client/argocdServer/Version.go +++ b/client/argocdServer/Version.go @@ -20,7 +20,6 @@ package argocdServer import ( "context" "github.com/argoproj/argo-cd/v2/pkg/apiclient/version" - "github.com/devtron-labs/devtron/client/argocdServer/connection" "github.com/golang/protobuf/ptypes/empty" "go.uber.org/zap" ) @@ -32,10 +31,10 @@ type VersionService interface { type VersionServiceImpl struct { logger *zap.SugaredLogger - argoCDConnectionManager connection.ArgoCDConnectionManager + argoCDConnectionManager ArgoCDConnectionManager } -func NewVersionServiceImpl(logger *zap.SugaredLogger, argoCDConnectionManager connection.ArgoCDConnectionManager) *VersionServiceImpl { +func NewVersionServiceImpl(logger *zap.SugaredLogger, argoCDConnectionManager ArgoCDConnectionManager) *VersionServiceImpl { return &VersionServiceImpl{logger: logger, argoCDConnectionManager: argoCDConnectionManager} } diff --git a/client/argocdServer/application/Application.go b/client/argocdServer/application/Application.go index d35fa8b0be..5206cf5f77 100644 --- a/client/argocdServer/application/Application.go +++ b/client/argocdServer/application/Application.go @@ -23,13 +23,13 @@ import ( "errors" "fmt" "github.com/devtron-labs/devtron/api/restHandler/bean" - "github.com/devtron-labs/devtron/client/argocdServer/connection" "strings" "time" "github.com/argoproj/argo-cd/v2/pkg/apiclient/application" "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1" "github.com/argoproj/argo-cd/v2/reposerver/apiclient" + "github.com/devtron-labs/devtron/client/argocdServer" "github.com/devtron-labs/devtron/util" "go.uber.org/zap" "google.golang.org/grpc" @@ -116,11 +116,11 @@ type Manifests struct { type ServiceClientImpl struct { logger *zap.SugaredLogger - argoCDConnectionManager connection.ArgoCDConnectionManager + argoCDConnectionManager argocdServer.ArgoCDConnectionManager } func NewApplicationClientImpl( - logger *zap.SugaredLogger, argoCDConnectionManager connection.ArgoCDConnectionManager, + logger *zap.SugaredLogger, argoCDConnectionManager argocdServer.ArgoCDConnectionManager, ) *ServiceClientImpl { return &ServiceClientImpl{ logger: logger, diff --git a/client/argocdServer/bean/bean.go b/client/argocdServer/bean/bean.go deleted file mode 100644 index 1a75a5c204..0000000000 --- a/client/argocdServer/bean/bean.go +++ /dev/null @@ -1,3 +0,0 @@ -package bean - -const RefreshTypeNormal = "normal" diff --git a/client/argocdServer/cluster/Cluster.go b/client/argocdServer/cluster/Cluster.go index b9e1f0fc5f..5658f4760c 100644 --- a/client/argocdServer/cluster/Cluster.go +++ b/client/argocdServer/cluster/Cluster.go @@ -22,7 +22,7 @@ import ( "errors" "github.com/argoproj/argo-cd/v2/pkg/apiclient/cluster" "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1" - "github.com/devtron-labs/devtron/client/argocdServer/connection" + "github.com/devtron-labs/devtron/client/argocdServer" "go.uber.org/zap" "time" ) @@ -44,10 +44,10 @@ type ServiceClient interface { type ServiceClientImpl struct { logger *zap.SugaredLogger - argoCdConnection connection.ArgoCDConnectionManager + argoCdConnection argocdServer.ArgoCDConnectionManager } -func NewServiceClientImpl(logger *zap.SugaredLogger, argoCdConnection connection.ArgoCDConnectionManager) *ServiceClientImpl { +func NewServiceClientImpl(logger *zap.SugaredLogger, argoCdConnection argocdServer.ArgoCDConnectionManager) *ServiceClientImpl { return &ServiceClientImpl{ logger: logger, argoCdConnection: argoCdConnection, diff --git a/client/argocdServer/connection/proxy.go b/client/argocdServer/proxy.go similarity index 99% rename from client/argocdServer/connection/proxy.go rename to client/argocdServer/proxy.go index 54f44c5a51..1e66d3dcef 100644 --- a/client/argocdServer/connection/proxy.go +++ b/client/argocdServer/proxy.go @@ -15,7 +15,7 @@ * */ -package connection +package argocdServer import ( "bytes" diff --git a/client/argocdServer/connection/proxy_test.go b/client/argocdServer/proxy_test.go similarity index 98% rename from client/argocdServer/connection/proxy_test.go rename to client/argocdServer/proxy_test.go index bc84026e13..a1f4be9b7b 100644 --- a/client/argocdServer/connection/proxy_test.go +++ b/client/argocdServer/proxy_test.go @@ -1,4 +1,4 @@ -package connection +package argocdServer import "testing" diff --git a/client/argocdServer/repository/Repository.go b/client/argocdServer/repository/Repository.go index 7b5013f3f3..19930c6806 100644 --- a/client/argocdServer/repository/Repository.go +++ b/client/argocdServer/repository/Repository.go @@ -23,8 +23,8 @@ import ( repository2 "github.com/argoproj/argo-cd/v2/pkg/apiclient/repository" "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1" "github.com/argoproj/argo-cd/v2/reposerver/apiclient" + "github.com/devtron-labs/devtron/client/argocdServer" "github.com/devtron-labs/devtron/client/argocdServer/application" - "github.com/devtron-labs/devtron/client/argocdServer/connection" "go.uber.org/zap" ) @@ -45,10 +45,10 @@ type ServiceClient interface { type ServiceClientImpl struct { logger *zap.SugaredLogger - argoCDConnectionManager connection.ArgoCDConnectionManager + argoCDConnectionManager argocdServer.ArgoCDConnectionManager } -func NewServiceClientImpl(logger *zap.SugaredLogger, argoCDConnectionManager connection.ArgoCDConnectionManager) *ServiceClientImpl { +func NewServiceClientImpl(logger *zap.SugaredLogger, argoCDConnectionManager argocdServer.ArgoCDConnectionManager) *ServiceClientImpl { return &ServiceClientImpl{ logger: logger, argoCDConnectionManager: argoCDConnectionManager, diff --git a/client/argocdServer/session/Session.go b/client/argocdServer/session/Session.go index e5de2470f5..f4b041e0d7 100644 --- a/client/argocdServer/session/Session.go +++ b/client/argocdServer/session/Session.go @@ -20,7 +20,7 @@ package session import ( "context" "github.com/argoproj/argo-cd/v2/pkg/apiclient/session" - "github.com/devtron-labs/devtron/client/argocdServer/connection" + "github.com/devtron-labs/devtron/client/argocdServer" "time" ) @@ -32,7 +32,7 @@ type ServiceClientImpl struct { ssc session.SessionServiceClient } -func NewSessionServiceClient(argoCDConnectionManager connection.ArgoCDConnectionManager) *ServiceClientImpl { +func NewSessionServiceClient(argoCDConnectionManager argocdServer.ArgoCDConnectionManager) *ServiceClientImpl { // this function only called when gitops configured and user ask for creating acd token conn := argoCDConnectionManager.GetConnection("") ssc := session.NewSessionServiceClient(conn) diff --git a/cmd/external-app/wire.go b/cmd/external-app/wire.go index d4b68e7b92..38fd34b681 100644 --- a/cmd/external-app/wire.go +++ b/cmd/external-app/wire.go @@ -40,7 +40,6 @@ import ( "github.com/devtron-labs/devtron/internal/util" "github.com/devtron-labs/devtron/pkg/app" appStoreBean "github.com/devtron-labs/devtron/pkg/appStore/bean" - repository3 "github.com/devtron-labs/devtron/pkg/appStore/deployment/repository" appStoreDeploymentTool "github.com/devtron-labs/devtron/pkg/appStore/deployment/tool" appStoreDeploymentGitopsTool "github.com/devtron-labs/devtron/pkg/appStore/deployment/tool/gitops" "github.com/devtron-labs/devtron/pkg/attributes" @@ -200,12 +199,6 @@ func InitializeApp() (*App, error) { wire.Bind(new(dockerRegistryRepository.DockerRegistryIpsConfigRepository), new(*dockerRegistryRepository.DockerRegistryIpsConfigRepositoryImpl)), dockerRegistryRepository.NewOCIRegistryConfigRepositoryImpl, wire.Bind(new(dockerRegistryRepository.OCIRegistryConfigRepository), new(*dockerRegistryRepository.OCIRegistryConfigRepositoryImpl)), - - // chart group repository layer wire injection started - repository3.NewChartGroupDeploymentRepositoryImpl, - wire.Bind(new(repository3.ChartGroupDeploymentRepository), new(*repository3.ChartGroupDeploymentRepositoryImpl)), - // chart group repository layer wire injection ended - // end: docker registry wire set injection ) return &App{}, nil diff --git a/cmd/external-app/wire_gen.go b/cmd/external-app/wire_gen.go index b48223587e..959fbf6239 100644 --- a/cmd/external-app/wire_gen.go +++ b/cmd/external-app/wire_gen.go @@ -274,7 +274,6 @@ func InitializeApp() (*App, error) { appStoreValuesServiceImpl := service2.NewAppStoreValuesServiceImpl(sugaredLogger, appStoreApplicationVersionRepositoryImpl, installedAppRepositoryImpl, appStoreVersionValuesRepositoryImpl, userServiceImpl) appStoreValuesRestHandlerImpl := appStoreValues.NewAppStoreValuesRestHandlerImpl(sugaredLogger, userServiceImpl, appStoreValuesServiceImpl) appStoreValuesRouterImpl := appStoreValues.NewAppStoreValuesRouterImpl(appStoreValuesRestHandlerImpl) - chartGroupDeploymentRepositoryImpl := repository4.NewChartGroupDeploymentRepositoryImpl(db, sugaredLogger) clusterInstalledAppsRepositoryImpl := repository4.NewClusterInstalledAppsRepositoryImpl(db, sugaredLogger) appStoreDeploymentHelmServiceImpl := appStoreDeploymentTool.NewAppStoreDeploymentHelmServiceImpl(sugaredLogger, helmAppServiceImpl, appStoreApplicationVersionRepositoryImpl, environmentRepositoryImpl, helmAppClientImpl, installedAppRepositoryImpl, appStoreDeploymentCommonServiceImpl, ociRegistryConfigRepositoryImpl) installedAppVersionHistoryRepositoryImpl := repository4.NewInstalledAppVersionHistoryRepositoryImpl(sugaredLogger, db) @@ -283,7 +282,7 @@ func InitializeApp() (*App, error) { return nil, err } pubSubClientServiceImpl := pubsub_lib.NewPubSubClientServiceImpl(sugaredLogger) - appStoreDeploymentServiceImpl := service3.NewAppStoreDeploymentServiceImpl(sugaredLogger, installedAppRepositoryImpl, chartGroupDeploymentRepositoryImpl, appStoreApplicationVersionRepositoryImpl, environmentRepositoryImpl, clusterInstalledAppsRepositoryImpl, appRepositoryImpl, appStoreDeploymentHelmServiceImpl, appStoreDeploymentHelmServiceImpl, environmentServiceImpl, clusterServiceImpl, helmAppServiceImpl, appStoreDeploymentCommonServiceImpl, globalEnvVariables, installedAppVersionHistoryRepositoryImpl, gitOpsConfigRepositoryImpl, attributesServiceImpl, deploymentServiceTypeConfig, chartTemplateServiceImpl, pubSubClientServiceImpl) + appStoreDeploymentServiceImpl := service3.NewAppStoreDeploymentServiceImpl(sugaredLogger, installedAppRepositoryImpl, appStoreApplicationVersionRepositoryImpl, environmentRepositoryImpl, clusterInstalledAppsRepositoryImpl, appRepositoryImpl, appStoreDeploymentHelmServiceImpl, appStoreDeploymentHelmServiceImpl, environmentServiceImpl, clusterServiceImpl, helmAppServiceImpl, appStoreDeploymentCommonServiceImpl, globalEnvVariables, installedAppVersionHistoryRepositoryImpl, gitOpsConfigRepositoryImpl, attributesServiceImpl, deploymentServiceTypeConfig, chartTemplateServiceImpl, pubSubClientServiceImpl) appStoreDeploymentRestHandlerImpl := appStoreDeployment.NewAppStoreDeploymentRestHandlerImpl(sugaredLogger, userServiceImpl, enforcerImpl, enforcerUtilImpl, enforcerUtilHelmImpl, appStoreDeploymentServiceImpl, validate, helmAppServiceImpl, appStoreDeploymentCommonServiceImpl, helmUserServiceImpl, attributesServiceImpl) appStoreDeploymentRouterImpl := appStoreDeployment.NewAppStoreDeploymentRouterImpl(appStoreDeploymentRestHandlerImpl) chartProviderServiceImpl := chartProvider.NewChartProviderServiceImpl(sugaredLogger, chartRepoRepositoryImpl, chartRepositoryServiceImpl, dockerArtifactStoreRepositoryImpl, ociRegistryConfigRepositoryImpl) diff --git a/docs/SUMMARY.md b/docs/SUMMARY.md index fb6df44e1e..8e3953c8e6 100644 --- a/docs/SUMMARY.md +++ b/docs/SUMMARY.md @@ -114,7 +114,7 @@ * [Connect SpringBoot with Mysql Database](user-guide/use-cases/connect-springboot-with-mysql-database.md) * [Connect Expressjs With Mongodb Database](user-guide/use-cases/connect-expressjs-with-mongodb-database.md) * [Connect Django With Mysql Database](user-guide/use-cases/connect-django-with-mysql-database.md) - * [Pull Helm Charts from OCI Registry](user-guide/use-cases/oci-pull.md) * [Telemetry Overview](user-guide/telemetry.md) * [Devtron on Graviton](reference/graviton.md) -* [Release Notes](https://github.com/devtron-labs/devtron/releases) \ No newline at end of file +* [Release Notes](https://github.com/devtron-labs/devtron/releases) + diff --git a/docs/reference/glossary.md b/docs/reference/glossary.md index 16a82760a6..cd2c294503 100644 --- a/docs/reference/glossary.md +++ b/docs/reference/glossary.md @@ -50,11 +50,9 @@ Kubernetes objects used to store configuration data as key-value pairs. They all You can use different ConfigMaps for respective environments too. [Read More...](https://docs.devtron.ai/usage/applications/creating-application/config-maps) -### Container/OCI Registry +### Container Registry -It is a collection of repositories that store container images. It allows developers to store, share, and manage images used to deploy containers. In Devtron, you can add a container registry by going to Global Configurations → Container / OCI Registry. Your CI images are pushed to the container registry you configure. [Read More...](https://docs.devtron.ai/global-configurations/container-registries). - -An OCI-compliant registry can also store artifacts (such as helm charts). Here, OCI stands for Open Container Initiative. It is an open industry standard for container formats and registries. +It is a collection of repositories that store container images. It allows developers to store, share, and manage images used to deploy containers. In Devtron, you can add a container registry by going to Global Configurations → Container / OCI Registry. Your CI images are pushed to the container registry you configure. [Read More...](https://docs.devtron.ai/global-configurations/docker-registries) ### Cordoning diff --git a/docs/user-guide/creating-application/git-material.md b/docs/user-guide/creating-application/git-material.md index a322a15bf0..14d5e43ec0 100644 --- a/docs/user-guide/creating-application/git-material.md +++ b/docs/user-guide/creating-application/git-material.md @@ -1,144 +1,67 @@ # Git Repository -## Introduction +**Please configure Global configurations > Git Accounts to configure Git Repository is using private repo** -During the [CI process](https://docs.devtron.ai/usage/applications/deploying-application/triggering-ci), the application source code is pulled from your [git repository](https://docs.devtron.ai/resources/glossary#repo). +Git Repository is used to pull your application source code during the CI step. Select `Git Repository` section of the `App Configuration`. Inside `Git Repository` when you click on `Add Git Repository` you will see three options as shown below: -Devtron also supports multiple Git repositories (be it from one Git account or multiple Git accounts) in a single deployment. +1. Git Account +2. Git Repo URL +3. Checkout Path -![Figure 1: Adding Git Repository](https://devtron-public-asset.s3.us-east-2.amazonaws.com/images/creating-application/git-material/add-git-repo.jpg) +Devtron also supports multiple git repositories in a single deployment. We will discuss this in detail in the multi git option [below](#5-multi-git). -Therefore, this doc is divided into 2 sections, read the one that caters to your application: -* [Single Repo Application](#single-repo-application) -* [Multi Repo Application](#multi-repo-application) +![](../../.gitbook/assets/create-app-git-account.gif) ---- +## 1. Git Account -## Single Repo Application +In this section, you have to select the git account of your code repository. If the authentication type of the Git account is anonymous, only public git repository will be accessible. If you are using a private git repository, you can configure your git provider via [git accounts](../global-configurations/git-accounts.md). -Follow the below steps if the source code of your application is hosted on a single Git repository. +## 2. Git Repo URL -In your application, go to **App Configuration** → **Git Repository**. You will get the following fields and options: +Inside the git repo URL, you have to provide your code repository’s URL. For Example- [https://github.com/devtron-labs/django-repo](https://github.com/devtron-labs/django-repo) -1. [Git Account](#git-account) -2. [Git Repo URL](#git-repo-url) -3. (Checkboxes) - * [Exclude specific file/folder in this repo](#exclude-specific-filefolder-in-this-repo) - * [Set clone directory](#set-clone-directory) - * [Pull submodules recursively](#pull-submodules-recursively) +You can find this URL by clicking on the '⤓ code' button on your git repository page. -### Git Account +Note: +* Copy the HTTPS/SSH url of the repository +* Please make sure that you've added your [dockerfile](https://docs.docker.com/engine/reference/builder/) in the repo. -This is a dropdown that shows the list of Git accounts added to your organization on Devtron. If you haven't done already, we recommend you to first [add your Git account](https://docs.devtron.ai/global-configurations/git-accounts) (especially when the repository is private). +![](https://devtron-public-asset.s3.us-east-2.amazonaws.com/images/creating-application/git-material/git-repo-1.jpg) -![Figure 2: Selecting Git Account](https://devtron-public-asset.s3.us-east-2.amazonaws.com/images/creating-application/git-material/select-git-account.jpg) -{% hint style="info" %} -If the authentication type of your Git account is anonymous, only public Git repositories in that account will be accessible. Whereas, adding a user auth or SSH key will make both public and private repositories accessible. -{% endhint %} +## 3. Checkout Path +After clicking on checkbox, git checkout path field appears. The git checkout path is the directory where your code is pulled or cloned for the repository you specified in the previous step. -### Git Repo URL +This field is optional in case of a single git repository application and you can leave the path as default. Devtron assigns a directory by itself when the field is left blank. The default value of this field is `./` -In this field, you have to provide your code repository’s URL, for e.g., `https://github.com/devtron-labs/django-repo`. +![](https://devtron-public-asset.s3.us-east-2.amazonaws.com/images/creating-application/git-material/save-git-repo.jpg) -You can find this URL by clicking on the **Code** button available on your repository page as shown below: +If you want to go with a multi-git approach, then you need to specify a separate path for each of your repositories. The first repository can be checked out at the default `./` path as explained above. But, for all the rest of the repositories, you need to ensure that you provide unique checkout paths. In failing to do so, you may cause Devtron to checkout multiple repositories in one directory and overwriting files from different repositories on each other. -![Figure 3: Getting Repo URL](https://devtron-public-asset.s3.us-east-2.amazonaws.com/images/creating-application/git-material/repo-url.jpg) +## 4. Pull Modules Recursively: -{% hint style="info" %} -* Copy the HTTPS/SSH portion of the URL too -* Make sure you've added your [Dockerfile](https://docs.docker.com/engine/reference/builder/) in the repo -{% endhint %} +This checkbox is optional and is used for pulling [git submodules](https://git-scm.com/book/en/v2/Git-Tools-Submodules) present in a repo. The submodules will be pulled recursively and same auth method which is used for parent repo will be used for submodules. +## 5. Multi Git: -### Exclude specific file/folder in this repo +As we discussed, Devtron also supports multiple git repositories in a single application. To add multiple repositories, click on add repo and repeat steps 1 to 3. Repeat the process for every new git repository you add. Ensure that the checkout paths are unique for each. -Not all repository changes are worth triggering a new [CI build](https://docs.devtron.ai/usage/applications/deploying-application/triggering-ci). If you enable this checkbox, you can define the file(s) or folder(s) whose commits you wish to use in the CI build. +Note: Even if you add multiple repositories, only one image will be created based on the docker file as shown in the [docker build config](docker-build-configuration.md). -![Figure 4: Sample Exclusion Rule](https://devtron-public-asset.s3.us-east-2.amazonaws.com/images/creating-application/git-material/sample1.jpg) +## **Why do we need Multi Git support-** -In other words, if a given commit contains changes only in file(s) present in your exclusion rule, the commit won't show up while selecting the [Git material](https://docs.devtron.ai/resources/glossary#material), which means it will not be eligible for build. However, if a given commit contains changes in other files too (along with the excluded file), the commit won't be excluded and it will definitely show up in the list of commits. - -![Figure 5: Excludes commits made to README.md](https://devtron-public-asset.s3.us-east-2.amazonaws.com/images/creating-application/git-material/excluded-commit.jpg) - -Devtron allows you to create either an exclusion rule, an inclusion rule, or a combination of both. In case of multiple files or folders, you can list them in new lines. - -To exclude a path, use **!** as the prefix, e.g. `!path/to/file`
-To include a path, don't use any prefix, e.g. `path/to/file` - - -#### Examples - - -| Sample Values | Description | -|---|---| -| `!README.md` | **Exclusion of a single file in root folder:**
Commits containing changes made only in README.md file will not be shown | -| `!README.md`
`!index.js` | **Exclusion of multiple files in root folder:**
Commits containing changes made only in README.md or/and index.js files will not be shown | -| `README.md` | **Inclusion of a single file in root folder:**
Commits containing changes made only in README.md file will be shown. Rest all will be excluded. | -| `!src/extensions/printer/code2.py` | **Exclusion of a single file in a folder tree:**
Commits containing changes made specifically to code2.py file will not be shown | -| `!src/*` | **Exclusion of a single folder and all its files:**
Commits containing changes made specifically to files within src folder will not be shown | -| `!README.md`
`index.js` | **Exclusion and inclusion of files:**
Commits containing changes made only in README.md will not be shown, but commits made in index.js file will be shown. All other commits apart from the aforementioned files will be excluded. | -| `!README.md`
`README.md` | **Exclusion and inclusion of conflicting files:**
If conflicting paths are defined in the rule, the one defined later will be considered. In this case, commits containing changes made only in README.md will be shown. | - - -You may use the **Learn how** link (as shown below) to understand the syntax of defining an exclusion or inclusion rule. - -![Figure 6: 'Learn how' Button](https://devtron-public-asset.s3.us-east-2.amazonaws.com/images/creating-application/git-material/rules.jpg) - -Since file paths can be long, Devtron supports regex too for writing the paths. To understand it better, you may click the **How to use** link as shown below. - -![Figure 7: Regex Support](https://devtron-public-asset.s3.us-east-2.amazonaws.com/images/creating-application/git-material/regex-help.jpg) - -#### How to view excluded commits? - -As we saw earlier in fig. 4 and 5, commits containing the changes of only `README.md` file were not displayed, since the file was in the exclusion list. - -However, Devtron gives you the option to view the excluded commits too. There's a döner menu at the top-right (beside the `Search by commit hash` search bar). - -![Figure 8a: Döner Menu Icon](https://devtron-public-asset.s3.us-east-2.amazonaws.com/images/creating-application/git-material/doner-menu.jpg) - -![Figure 8b: Show Excluded Commits](https://devtron-public-asset.s3.us-east-2.amazonaws.com/images/creating-application/git-material/show-exclusions.jpg) - -![Figure 8c: Commits Unavailable for Build](https://devtron-public-asset.s3.us-east-2.amazonaws.com/images/creating-application/git-material/excluded-commits.jpg) - -The **EXCLUDED** label (in red) indicates that the commits contain changes made only to the excluded file, and hence they are unavailable for build. - - -### Set clone directory - -After clicking the checkbox, a field titled `clone directory path` appears. It is the directory where your code will be cloned for the repository you specified in the previous step. - -This field is optional for a single Git repository application and you can leave the path as default. Devtron assigns a directory by itself when the field is left blank. The default value of this field is `./` - -![Figure 8: Clone Directory Option](https://devtron-public-asset.s3.us-east-2.amazonaws.com/images/creating-application/git-material/clone-directory.jpg) - - -### Pull submodules recursively - -This checkbox is optional and is used for pulling [git submodules](https://git-scm.com/book/en/v2/Git-Tools-Submodules) present in a repo. The submodules will be pulled recursively, and the auth method used for the parent repo will be used for submodules too. - ---- - -## Multi Repo Application - -As discussed earlier, Devtron also supports multiple git repositories in a single application. To add multiple repositories, click **Add Git Repository** and repeat all the steps as mentioned in [Single Repo Application](#single-repo-application). However, ensure that the clone directory paths are unique for each repo. - -Repeat the process for every new git repository you add. The clone directory path is used by Devtron to assign a directory to each of your Git repositories. Devtron will clone your code at those locations and those paths can be referenced in the Docker file to create a Docker image of the application. - -Whenever a change is pushed to any of the configured repositories, CI will be triggered and a new Docker image file will be built (based on the latest commits of the configured repositories). Next, the image will be pushed to the container registry you configured in Devtron. - -{% hint style="info" %} -Even if you add multiple repositories, only one image will be created based on the Dockerfile as shown in the [docker build config](docker-build-configuration.md) -{% endhint %} +Let’s look at this with an example: -### Why do you need Multi-Git support? +Due to security reasons, you may want to keep sensitive configurations like third party API keys in a separate access restricted git repositories and the source code in a git repository that every developer has access to. To deploy this application, code from both the repositories is required. A multi-git support will help you to do that. -Let’s look at this with an example: +Few other examples, where you may want to have multiple repositories for your application and will need multi git checkout support: -Due to security reasons, you want to keep sensitive configurations like third-party API keys in separate access-restricted git repositories, and the source code in a Git repository that every developer has access to. To deploy this application, code from both the repositories are required. A Multi-Git support helps you achieve it. +* To make code modularize, you are keeping front-end and back-end code in different repositories. +* Common Library extracted out in different repo so that it can be used via multiple other projects. +* Due to security reasons you are keeping configuration files in different access restricted git repositories. -Other examples where you might need Multi-Git support: +## **How Devtron's 'Checkout Path' Works** -* To make code modularized, where front-end and back-end code are in different repos -* Common library extracted out in a different repo so that other projects can use it \ No newline at end of file +The checkout path is used by Devtron to assign a directory to each of your git repositories. Once you provide different checkout paths for your repositories, Devtron will clone your code at those locations and these checkout paths can be referenced in the docker file to create docker image for the application. +Whenever a change is pushed to any the configured repositories, the CI will be triggered and a new docker image file will be built based on the latest commits of the configured repositories and pushed to the container registry. \ No newline at end of file diff --git a/docs/user-guide/deploying-application/triggering-ci.md b/docs/user-guide/deploying-application/triggering-ci.md index fe6ae8f267..3a0909fc8c 100644 --- a/docs/user-guide/deploying-application/triggering-ci.md +++ b/docs/user-guide/deploying-application/triggering-ci.md @@ -10,21 +10,6 @@ Once clicked, a list will appear showing various commits made in the repository, CI Pipelines with automatic trigger enabled are triggered immediately when a new commit is made to the git branch. If the trigger for a build pipeline is set to manual, it will not be automatically triggered and requires a manual trigger. -{% hint style="info" %} - -### Partal Cloning Feature [![](https://img.shields.io/badge/ENT-Devtron-blue)](https://devtron.ai/pricing) - -CI builds can be time-consuming for large repositories, especially for enterprises. However, Devtron's partial cloning feature significantly increases cloning speed, reducing the time it takes to clone your source code and leading to faster build times. - -**Advantages** -* Smaller image sizes -* Reduced resource usage and costs -* Faster software releases -* Improved productivity - -Get in touch with us if you are looking for a way to improve the efficiency of your software development process -{% endhint %} - The **Refresh** icon updates the Git Commits section in the CI Pipeline by fetching the latest commits from the repository. Clicking on the refresh icon ensures that you have the most recent commit available. The **Ignore Cache** option ignores the previous build cache and creates a fresh build. If selected, will take a longer build time than usual. @@ -47,5 +32,3 @@ To check for any vulnerabilities in the build image, click on `Security`. Please ![](https://devtron-public-asset.s3.us-east-2.amazonaws.com/images/deploying-application/triggering-ci/security-scan-report.jpg) - - diff --git a/docs/user-guide/integrations/build-and-deploy-ci-cd.md b/docs/user-guide/integrations/build-and-deploy-ci-cd.md index 6347adf5a8..c1d8977205 100644 --- a/docs/user-guide/integrations/build-and-deploy-ci-cd.md +++ b/docs/user-guide/integrations/build-and-deploy-ci-cd.md @@ -14,10 +14,6 @@ Devtron CI/CD integration enables software development teams to automate the bui * Provides deployment metrics like; deployment frequency, lead time, change failure rate, and mean-time recovery. * Seamless integration with Grafana for continuous application metrics like CPU and memory usage, status code, throughput, and latency on the dashboard. -{% hint style="info" %} -Devtron also gives you the option of partial cloning. It increases the cloning speed of your [code repository](../../../docs/reference/glossary#repo), thus reducing the [build time](../../../docs/reference/glossary#build-pipeline) during the [CI process](../deploying-application/triggering-ci). -{% endhint %} - ## Installation 1. On the **Devtron Stack Manager > Discover** page, click the **Build and Deploy (CI/CD)**. diff --git a/docs/user-guide/use-cases/oci-pull.md b/docs/user-guide/use-cases/oci-pull.md deleted file mode 100644 index 858eabdaf7..0000000000 --- a/docs/user-guide/use-cases/oci-pull.md +++ /dev/null @@ -1,73 +0,0 @@ -# Pull Helm Charts from OCI Registry - -## Introduction - -Devtron supports the installation of [Helm charts](https://docs.devtron.ai/resources/glossary#helm-charts-packages) from both: Helm [repos](https://docs.devtron.ai/resources/glossary#repo) and [Container/OCI registries](https://docs.devtron.ai/resources/glossary#container-registry). Unlike Helm repos, OCI registries do not have an index file to discover all the charts. However, Devtron makes it easier for you to populate your charts from multiple sources to the [chart store](https://docs.devtron.ai/resources/glossary#chart-store). - -**Pre-requisites** - -* Helm Chart(s) -* OCI-compliant Registry (e.g. Docker Hub and [many more](https://docs.devtron.ai/global-configurations/container-registries#supported-registry-providers)) - -You must [add your OCI registry](https://docs.devtron.ai/global-configurations/container-registries) to Devtron with the `Use as chart repository` option enabled. - ---- - -## Tutorial - -{% embed url="https://www.youtube.com/watch?v=9imC5MMz9gs" caption="Pulling Charts from an OCI Registry to Devtron" %} - ---- - -## Populating your Charts to the Chart Store - -1. Go to **Global Configurations** → **Container/OCI Registry**. - -2. Search your OCI registry in the list, and click it. - -3. In the **List of repositories**, add the chart repo(s). The format should be `username/chartname`. You can find the username from your registry provider account. - - ![Figure 1: Adding Chart Repos](https://devtron-public-asset.s3.us-east-2.amazonaws.com/images/use-cases/oci-pull/container-config.jpg) - -4. Click **Save** or **Update**. - -5. From the left sidebar, go to **Chart Store**. - -6. You can find your chart(s) either by using the search bar or by selecting your chart source. - - ![Figure 2: Searching your Chart](https://devtron-public-asset.s3.us-east-2.amazonaws.com/images/use-cases/oci-pull/chart-search.jpg) - -You have successfully pulled your charts to the chart store. - -![Figure 3: Uploaded Helm Charts](https://devtron-public-asset.s3.us-east-2.amazonaws.com/images/use-cases/oci-pull/chart-list.jpg) - -### Unable to find your Charts? - -Deprecated charts won't show up in the Chart Store unless you enable the **Show deprecated charts** filter as shown below - -![Figure 4: Checking Deprecated Charts](https://devtron-public-asset.s3.us-east-2.amazonaws.com/images/use-cases/oci-pull/deprecated.jpg) - -Or, you may try performing a resync as shown below - -![Figure 5: Performing a Resync](https://devtron-public-asset.s3.us-east-2.amazonaws.com/images/use-cases/oci-pull/chart-sync.jpg) - ---- - - -## Removing your Chart from the Chart Store - -1. Go to your OCI registry settings in Devtron. - -2. In the **List of repositories** field, remove the unwanted chart repo. - - ![Figure 6: Removing a Chart Repo](https://devtron-public-asset.s3.us-east-2.amazonaws.com/images/use-cases/oci-pull/remove-chart-repo.jpg) - -3. Click **Update**. - -The removed chart would no longer appear in the Chart Store. - -{% hint style="info" %} -Deleting a chart repo from your OCI registry will not lead to the removal of chart from the Chart Store -{% endhint %} - - diff --git a/internal/sql/repository/pipelineConfig/CiWorkflowRepository.go b/internal/sql/repository/pipelineConfig/CiWorkflowRepository.go index 171cd872d4..083be267d0 100644 --- a/internal/sql/repository/pipelineConfig/CiWorkflowRepository.go +++ b/internal/sql/repository/pipelineConfig/CiWorkflowRepository.go @@ -73,7 +73,7 @@ type CiWorkflow struct { EnvironmentId int `sql:"environment_id"` ImagePathReservationId int `sql:"image_path_reservation_id"` ReferenceCiWorkflowId int `sql:"ref_ci_workflow_id"` - ParentCiWorkFlowId int `sql:"parent_ci_workflow_id"` + ParentCiWorkFlowId int `sql:"parent_ci_workflow_id"` CiPipeline *CiPipeline } diff --git a/internal/sql/repository/security/ImageScanDeployInfoRepository.go b/internal/sql/repository/security/ImageScanDeployInfoRepository.go index cea243d14b..6f421e774f 100644 --- a/internal/sql/repository/security/ImageScanDeployInfoRepository.go +++ b/internal/sql/repository/security/ImageScanDeployInfoRepository.go @@ -145,13 +145,13 @@ func (impl ImageScanDeployInfoRepositoryImpl) FetchListingGroupByObject(size int } func (impl ImageScanDeployInfoRepositoryImpl) FetchByAppIdAndEnvId(appId int, envId int, objectType []string) (*ImageScanDeployInfo, error) { - var model *ImageScanDeployInfo - err := impl.dbConnection.Model(model). + var model ImageScanDeployInfo + err := impl.dbConnection.Model(&model). Where("scan_object_meta_id = ?", appId). Where("env_id = ?", envId).Where("object_type in (?)", pg.In(objectType)). Order("created_on desc").Limit(1). Select() - return model, err + return &model, err } func (impl ImageScanDeployInfoRepositoryImpl) FindByTypeMetaAndTypeId(scanObjectMetaId int, objectType string) (*ImageScanDeployInfo, error) { @@ -185,8 +185,7 @@ func (impl ImageScanDeployInfoRepositoryImpl) scanListQueryWithoutObject(request } query = query + " INNER JOIN environment env on env.id=info.env_id" query = query + " INNER JOIN cluster clus on clus.id=env.cluster_id" - query = query + " LEFT JOIN app ap on ap.id = info.scan_object_meta_id and info.object_type='app' WHERE ap.active=true" - query = query + " AND info.scan_object_meta_id > 0 and env.active=true and info.image_scan_execution_history_id[1] != -1 " + query = query + " WHERE info.scan_object_meta_id > 0 and env.active=true and info.image_scan_execution_history_id[1] != -1" if len(deployInfoIds) > 0 { ids := strings.Trim(strings.Join(strings.Fields(fmt.Sprint(deployInfoIds)), ","), "[]") query = query + " AND info.id IN (" + ids + ")" diff --git a/internal/util/MergeUtil.go b/internal/util/MergeUtil.go index 3f252bb9d1..df6ca22fd4 100644 --- a/internal/util/MergeUtil.go +++ b/internal/util/MergeUtil.go @@ -19,11 +19,11 @@ package util import ( "encoding/json" + "github.com/devtron-labs/devtron/api/bean" "github.com/devtron-labs/devtron/util" jsonpatch "github.com/evanphx/json-patch" "go.uber.org/zap" - "golang.org/x/exp/slices" ) type MergeUtil struct { @@ -99,6 +99,8 @@ func (m MergeUtil) ConfigMapMerge(appLevelConfigMapJson string, envLevelConfigMa appLevelConfigMap := bean.ConfigMapJson{} envLevelConfigMap := bean.ConfigMapJson{} configResponse := bean.ConfigMapJson{} + commonMaps := map[string]bean.ConfigSecretMap{} + var finalMaps []bean.ConfigSecretMap if appLevelConfigMapJson != "" { err = json.Unmarshal([]byte(appLevelConfigMapJson), &appLevelConfigMap) if err != nil { @@ -115,7 +117,20 @@ func (m MergeUtil) ConfigMapMerge(appLevelConfigMapJson string, envLevelConfigMa configResponse.Enabled = true } - configResponse.Maps = mergeConfigMapsAndSecrets(envLevelConfigMap.Maps, appLevelConfigMap.Maps) + for _, item := range envLevelConfigMap.Maps { + commonMaps[item.Name] = item + } + for _, item := range appLevelConfigMap.Maps { + if _, ok := commonMaps[item.Name]; ok { + //ignoring this value as override from configB + } else { + commonMaps[item.Name] = item + } + } + for _, v := range commonMaps { + finalMaps = append(finalMaps, v) + } + configResponse.Maps = finalMaps byteData, err := json.Marshal(configResponse) if err != nil { m.Logger.Debugw("error in marshal ", "err", err) @@ -125,75 +140,67 @@ func (m MergeUtil) ConfigMapMerge(appLevelConfigMapJson string, envLevelConfigMa func (m MergeUtil) ConfigSecretMerge(appLevelSecretJson string, envLevelSecretJson string, chartMajorVersion int, chartMinorVersion int, isJob bool) (data string, err error) { appLevelSecret := bean.ConfigSecretJson{} + envLevelSecret := bean.ConfigSecretJson{} + secretResponse := bean.ConfigSecretJson{} + commonSecrets := map[string]*bean.ConfigSecretMap{} + var finalMaps []*bean.ConfigSecretMap if appLevelSecretJson != "" { err = json.Unmarshal([]byte(appLevelSecretJson), &appLevelSecret) if err != nil { m.Logger.Debugw("error in Unmarshal ", "appLevelSecretJson", appLevelSecretJson, "envLevelSecretJson", envLevelSecretJson, "err", err) } } - envLevelSecret := bean.ConfigSecretJson{} if envLevelSecretJson != "" { err = json.Unmarshal([]byte(envLevelSecretJson), &envLevelSecret) if err != nil { m.Logger.Debugw("error in Unmarshal ", "appLevelSecretJson", appLevelSecretJson, "envLevelSecretJson", envLevelSecretJson, "err", err) } } - secretResponse := bean.ConfigSecretJson{} if len(appLevelSecret.Secrets) > 0 || len(envLevelSecret.Secrets) > 0 { secretResponse.Enabled = true } - finalCMCS := mergeConfigMapsAndSecrets(envLevelSecret.GetDereferencedSecrets(), appLevelSecret.GetDereferencedSecrets()) - for _, finalMap := range finalCMCS { - finalMap = m.processExternalSecrets(finalMap, chartMajorVersion, chartMinorVersion, isJob) + for _, item := range envLevelSecret.Secrets { + commonSecrets[item.Name] = item } - secretResponse.SetReferencedSecrets(finalCMCS) - byteData, err := json.Marshal(secretResponse) - if err != nil { - m.Logger.Debugw("error in marshal ", "err", err) - } - return string(byteData), err -} - -func mergeConfigMapsAndSecrets(envLevelCMCS []bean.ConfigSecretMap, appLevelSecretCMCS []bean.ConfigSecretMap) []bean.ConfigSecretMap { - envCMCSNames := make([]string, 0) - var finalCMCS []bean.ConfigSecretMap - for _, item := range envLevelCMCS { - envCMCSNames = append(envCMCSNames, item.Name) - } - for _, item := range appLevelSecretCMCS { + for _, item := range appLevelSecret.Secrets { //else ignoring this value as override from configB - if !slices.Contains(envCMCSNames, item.Name) { - finalCMCS = append(finalCMCS, item) + if _, ok := commonSecrets[item.Name]; !ok { + commonSecrets[item.Name] = item } } - for _, item := range envLevelCMCS { - finalCMCS = append(finalCMCS, item) - } - return finalCMCS -} -func (m MergeUtil) processExternalSecrets(secret bean.ConfigSecretMap, chartMajorVersion int, chartMinorVersion int, isJob bool) bean.ConfigSecretMap { - if secret.ExternalType == util.AWSSecretsManager || secret.ExternalType == util.AWSSystemManager || secret.ExternalType == util.HashiCorpVault { - if secret.SecretData != nil && ((chartMajorVersion <= 3 && chartMinorVersion < 8) || isJob) { - var es []map[string]interface{} - esNew := make(map[string]interface{}) - err := json.Unmarshal(secret.SecretData, &es) - if err != nil { - m.Logger.Debugw("error in Unmarshal ", "SecretData", secret.SecretData, "external secret", es, "err", err) + for _, item := range commonSecrets { + if item.ExternalType == util.AWSSecretsManager || item.ExternalType == util.AWSSystemManager || item.ExternalType == util.HashiCorpVault { + if item.SecretData != nil && ((chartMajorVersion <= 3 && chartMinorVersion < 8) || isJob) { + var es []map[string]interface{} + esNew := make(map[string]interface{}) + err = json.Unmarshal(item.SecretData, &es) + if err != nil { + m.Logger.Debugw("error in Unmarshal ", "appLevelSecretJson", appLevelSecretJson, "envLevelSecretJson", envLevelSecretJson, "err", err) + } + for _, item := range es { + keyProp := item["name"].(string) + valueProp := item["key"] + esNew[keyProp] = valueProp + } + byteData, err := json.Marshal(esNew) + if err != nil { + m.Logger.Debugw("error in marshal ", "err", err) + } + item.Data = byteData + item.SecretData = nil } - for _, item := range es { - keyProp := item["name"].(string) - valueProp := item["key"] - esNew[keyProp] = valueProp - } - byteData, err := json.Marshal(esNew) - if err != nil { - m.Logger.Debugw("error in marshal ", "err", err) - } - secret.Data = byteData - secret.SecretData = nil } } - return secret + + for _, v := range commonSecrets { + finalMaps = append(finalMaps, v) + } + secretResponse.Secrets = finalMaps + byteData, err := json.Marshal(secretResponse) + if err != nil { + m.Logger.Debugw("error in marshal ", "err", err) + } + return string(byteData), err } diff --git a/manifests/install/devtron-installer.yaml b/manifests/install/devtron-installer.yaml index 101cec01c3..a035d78329 100644 --- a/manifests/install/devtron-installer.yaml +++ b/manifests/install/devtron-installer.yaml @@ -4,4 +4,4 @@ metadata: name: installer-devtron namespace: devtroncd spec: - url: https://raw.githubusercontent.com/devtron-labs/devtron/v0.6.23/manifests/installation-script + url: https://raw.githubusercontent.com/devtron-labs/devtron/v0.6.22/manifests/installation-script diff --git a/manifests/installation-script b/manifests/installation-script index 1c30ab92be..3d9964e8f3 100644 --- a/manifests/installation-script +++ b/manifests/installation-script @@ -1,4 +1,4 @@ -LTAG="v0.6.23"; +LTAG="v0.6.22"; REPO_RAW_URL="https://raw.githubusercontent.com/devtron-labs/devtron/"; operatorSecret = kubectl get secret -n devtroncd devtron-operator-secret; @@ -60,7 +60,11 @@ if !defaultCacheBucket { ######Generating raw urls argocdResource_raw = REPO_RAW_URL + LTAG + "/manifests/yamls/argocd-resource.json"; devtronHousekeeping_raw = REPO_RAW_URL + LTAG + "/manifests/yamls/devtron-housekeeping.yaml"; +dashboard_raw = REPO_RAW_URL + LTAG + "/manifests/yamls/dashboard.yaml"; +gitSensor_raw = REPO_RAW_URL + LTAG + "/manifests/yamls/gitsensor.yaml"; +kubelink_raw = REPO_RAW_URL + LTAG + "/manifests/yamls/kubelink.yaml"; kubewatch_raw = REPO_RAW_URL + LTAG + "/manifests/yamls/kubewatch.yaml"; +lens_raw = REPO_RAW_URL + LTAG + "/manifests/yamls/lens.yaml"; natsServer_raw = REPO_RAW_URL + LTAG + "/manifests/yamls/nats-server.yaml"; devtron_raw = REPO_RAW_URL + LTAG + "/manifests/yamls/devtron.yaml"; devtronIngress_raw = REPO_RAW_URL + LTAG + "/manifests/yamls/devtron-ingress.yaml"; @@ -70,33 +74,49 @@ devtronIngress_raw = REPO_RAW_URL + LTAG + "/manifests/yamls/devtron-ingress-leg log(devtronIngress_raw); serviceAccount_raw = REPO_RAW_URL + LTAG + "/manifests/yamls/serviceaccount.yaml"; namespace_raw = REPO_RAW_URL + LTAG + "/manifests/yamls/namespace.yaml"; +rollout_raw = REPO_RAW_URL + LTAG + "/manifests/yamls/rollout.yaml"; ######Downloading the manifests argocdResource = download(argocdResource_raw); devtronHousekeeping = download(devtronHousekeeping_raw); +dashboard = download(dashboard_raw); +gitSensor = download(gitSensor_raw); +kubelink = download(kubelink_raw); kubewatch = download(kubewatch_raw); +lens = download(lens_raw); natsServer = download(natsServer_raw); devtron = download(devtron_raw); devtronIngress = download(devtronIngress_raw); serviceAccount = download(serviceAccount_raw); namespace = download(namespace_raw); +rollout = download(rollout_raw); ######Downloading the manifests devtronHousekeepingOverride = kubectl get cm -n devtroncd devtron-housekeeping-override-cm; +dashboardOverride = kubectl get cm -n devtroncd dashboard-override-cm; +gitSensorOverride = kubectl get cm -n devtroncd git-sensor-override-cm; +kubelinkOverride = kubectl get cm -n devtroncd kubelink-override-cm; kubewatchOverride = kubectl get cm -n devtroncd kubewatch-override-cm; +lensOverride = kubectl get cm -n devtroncd lens-override-cm; natsServerOverride = kubectl get cm -n devtroncd nats-server-override-cm; devtronOverride = kubectl get cm -n devtroncd devtron-override-cm; devtronIngressOverride = kubectl get cm -n devtroncd devtron-ingress-override-cm; serviceAccountOverride = kubectl get cm -n devtroncd devtron-service-account-override-cm; namespaceOverride = kubectl get cm -n devtroncd namespace-override-cm; +rolloutOverride = kubectl get cm -n devtroncd rollout-override-cm; +dashboardOverride = jsonSelect(dashboardOverride, "data.override"); +gitSensorOverride = jsonSelect(gitSensorOverride, "data.override"); +kubelinkOverride = jsonSelect(kubelinkOverride, "data.override"); kubewatchOverride = jsonSelect(kubewatchOverride, "data.override"); +lensOverride = jsonSelect(lensOverride, "data.override"); natsServerOverride = jsonSelect(natsServerOverride, "data.override"); devtronOverride = jsonSelect(devtronOverride, "data.override"); devtronIngressOverride = jsonSelect(devtronIngressOverride, "data.override"); serviceAccountOverride = jsonSelect(serviceAccountOverride, "data.override"); namespaceOverride = jsonSelect(namespaceOverride, "data.override"); +rolloutOverride = jsonSelect(rolloutOverride, "data.override"); namespaces = kubectl apply namespace; log("created namespaces"); @@ -106,6 +126,15 @@ log("created service account"); pa = kubectl patch -n devtroncd cm/argocd-cm --type "application/json-patch+json" -p argocdResource; log("executed argocd setup command"); +#rollout +rollout = kubectl apply -n devtroncd rollout -u rolloutOverride; +log("executed rollout setup command"); + +#git-sensor +kubeYamlEdit(gitSensor, "data.PG_PASSWORD", postgresqlPassword, `/Secret//git-sensor-secret`); + +#lens +kubeYamlEdit(lens, "data.PG_PASSWORD", postgresqlPassword, `/Secret//lens-secret`); migDelete = kubectl delete -n devtroncd job devtron-housekeeping; if !migDelete { @@ -201,8 +230,50 @@ if !helmInstallation { devtron = kubectl apply -n devtroncd devtron -u devtronOverride; log("executed devtron setup"); +if !helmInstallation { + if devtronIngressAnnotations { + log("editing ingress"); + kubeYamlEdit(devtronIngress, "metadata.annotations", devtronIngressAnnotations, `extensions/Ingress//devtron-ingress`, "asObject"); + } + + if setupDevtronIngress { + log("fetch ingress"); + existingIngress = kubectl get -n devtroncd ing devtron-ingress; + } + + if existingIngress { + annotations = jsonSelect(existingIngress, "metadata.annotations"); + } + + if annotations { + kubeYamlEdit(devtronIngress, "metadata.annotations", annotations, `extensions/Ingress//devtron-ingress`, "asObject"); + } + + if setupDevtronIngress { + log("setup ingress"); + log(devtronIngress); + devtronIngress = kubectl apply -n devtroncd devtronIngress -u devtronIngressOverride; + } + + log("executed devtron ingress setup"); +} + +if !helmInstallation { + dashboard = kubectl apply -n devtroncd dashboard -u dashboardOverride; + log("executed dashboard setup"); +} +gitSensor = kubectl apply -n devtroncd gitSensor -u gitSensorOverride; +log("executed git sensor setup"); +##imageScanner = kubectl apply -n devtroncd imageScanner -u imageScannerOverride; +log("executed image scanner setup"); +if !helmInstallation { + kubelink = kubectl apply -n devtroncd kubelink -u kubelinkOverride; + log("executed kubelink setup"); +} kubewatch = kubectl apply -n devtroncd kubewatch -u kubewatchOverride; log("executed kubewatch setup"); +lens = kubectl apply -n devtroncd lens -u lensOverride; +log("executed lens setup"); ## Applying Housekeeping Job appHousekeeping = kubectl apply -n devtroncd devtronHousekeeping -u devtronHousekeepingOverride; diff --git a/manifests/release.txt b/manifests/release.txt index 9543b3f3f9..5b8cfb00ed 100644 --- a/manifests/release.txt +++ b/manifests/release.txt @@ -1 +1 @@ -stable -1 v0.6.23 +stable -1 v0.6.22 diff --git a/manifests/version.txt b/manifests/version.txt index d44996fff6..635026fb80 100644 --- a/manifests/version.txt +++ b/manifests/version.txt @@ -1 +1 @@ -v0.6.23 +v0.6.22 diff --git a/manifests/yamls/dashboard.yaml b/manifests/yamls/dashboard.yaml index 4113536571..585154c84f 100644 --- a/manifests/yamls/dashboard.yaml +++ b/manifests/yamls/dashboard.yaml @@ -235,7 +235,7 @@ spec: - name: envoy-config-volume mountPath: /etc/envoy-config/ - name: dashboard - image: "quay.io/devtron/dashboard:ba04f4f4-325-18824" + image: "quay.io/devtron/dashboard:12717798-325-16265" imagePullPolicy: IfNotPresent securityContext: allowPrivilegeEscalation: false diff --git a/manifests/yamls/devtron.yaml b/manifests/yamls/devtron.yaml index efc37b5e90..dbb58c74e8 100644 --- a/manifests/yamls/devtron.yaml +++ b/manifests/yamls/devtron.yaml @@ -53,7 +53,7 @@ data: CD_NODE_TAINTS_VALUE: "ci" CD_ARTIFACT_LOCATION_FORMAT: "%d/%d.zip" DEFAULT_CD_NAMESPACE: "devtron-cd" - DEFAULT_CI_IMAGE: "quay.io/devtron/ci-runner:ad3af321-138-18662" + DEFAULT_CI_IMAGE: "quay.io/devtron/ci-runner:d8d774c3-138-16238" DEFAULT_CD_TIMEOUT: "3600" WF_CONTROLLER_INSTANCE_ID: "devtron-runner" CI_LOGS_KEY_PREFIX: "ci-artifacts" @@ -94,13 +94,6 @@ data: GIT_SENSOR_PROTOCOL: GRPC GIT_SENSOR_URL: git-sensor-service.devtroncd:90 ENABLE_BUILD_CONTEXT: "true" - CI_SUCCESS_AUTO_TRIGGER_BATCH_SIZE: "1" - SKIP_GITOPS_VALIDATION: "false" - SKIP_CREATING_ECR_REPO: "false" - SCOPED_VARIABLE_ENABLED: "true" - SCOPED_VARIABLE_HANDLE_PRIMITIVES: "true" - MAX_CI_WORKFLOW_RETRIES: "0" - MAX_CD_WORKFLOW_RUNNER_RETRIES: "0" --- apiVersion: v1 kind: ConfigMap @@ -169,7 +162,7 @@ spec: runAsUser: 1000 containers: - name: devtron - image: "quay.io/devtron/devtron:50ac85e6-434-18829" + image: "quay.io/devtron/devtron:3c1ba1ad-434-16260" securityContext: allowPrivilegeEscalation: false runAsUser: 1000 diff --git a/manifests/yamls/gitsensor.yaml b/manifests/yamls/gitsensor.yaml index b274a4bb12..69d499c96f 100644 --- a/manifests/yamls/gitsensor.yaml +++ b/manifests/yamls/gitsensor.yaml @@ -67,7 +67,7 @@ spec: - /bin/sh - -c - mkdir -p /git-base/ssh-keys && chown -R devtron:devtron /git-base && chmod 777 /git-base/ssh-keys - image: "quay.io/devtron/git-sensor:b6c3ea0e-200-16327" + image: "quay.io/devtron/git-sensor:46b8f0f1-200-16195" imagePullPolicy: IfNotPresent name: chown-git-base resources: {} @@ -80,7 +80,7 @@ spec: name: git-volume containers: - name: git-sensor - image: "quay.io/devtron/git-sensor:b6c3ea0e-200-16327" + image: "quay.io/devtron/git-sensor:46b8f0f1-200-16195" securityContext: allowPrivilegeEscalation: false runAsUser: 1000 diff --git a/manifests/yamls/kubelink.yaml b/manifests/yamls/kubelink.yaml index b8eb486fe1..0db8070821 100644 --- a/manifests/yamls/kubelink.yaml +++ b/manifests/yamls/kubelink.yaml @@ -25,7 +25,7 @@ spec: runAsUser: 1000 containers: - name: kubelink - image: "quay.io/devtron/kubelink:25052130-318-18795" + image: "quay.io/devtron/kubelink:aefc1baf-318-16208" securityContext: allowPrivilegeEscalation: false runAsUser: 1000 diff --git a/manifests/yamls/kubewatch.yaml b/manifests/yamls/kubewatch.yaml index 65db0c5d12..96a2c34f0e 100644 --- a/manifests/yamls/kubewatch.yaml +++ b/manifests/yamls/kubewatch.yaml @@ -164,7 +164,7 @@ spec: runAsUser: 1000 containers: - name: kubewatch - image: "quay.io/devtron/kubewatch:79d44ddc-370-18559" + image: "quay.io/devtron/kubewatch:49f906a5-419-14814" securityContext: allowPrivilegeEscalation: false runAsUser: 1000 diff --git a/manifests/yamls/migrator.yaml b/manifests/yamls/migrator.yaml index c9971d3008..009c28505a 100644 --- a/manifests/yamls/migrator.yaml +++ b/manifests/yamls/migrator.yaml @@ -47,7 +47,7 @@ spec: - name: MIGRATE_TO_VERSION value: "0" - name: GIT_HASH - value: 50ac85e68d6e020797b0db342527c79a89c9c969 + value: 3c1ba1ad06cf134743c08667e8589dbd2f97c57d envFrom: - secretRef: name: postgresql-migrator @@ -96,7 +96,7 @@ spec: - name: MIGRATE_TO_VERSION value: "0" - name: GIT_HASH - value: 50ac85e68d6e020797b0db342527c79a89c9c969 + value: 3c1ba1ad06cf134743c08667e8589dbd2f97c57d - name: GIT_BRANCH value: main envFrom: @@ -148,7 +148,7 @@ spec: - name: GIT_BRANCH value: main - name: GIT_HASH - value: b6c3ea0ef2d3dff004b572916ff804914b8d938a + value: 46b8f0f18a3402234663ba963496e2b8ced271ae envFrom: - secretRef: name: postgresql-migrator diff --git a/manifests/yamls/notifier.yaml b/manifests/yamls/notifier.yaml index 5c7cd4f2a9..8424138a09 100644 --- a/manifests/yamls/notifier.yaml +++ b/manifests/yamls/notifier.yaml @@ -66,7 +66,7 @@ spec: restartPolicy: Always containers: - name: notifier - image: quay.io/devtron/notifier:d71bcbcd-372-18717 + image: quay.io/devtron/notifier:d9c72180-372-14306 imagePullPolicy: IfNotPresent ports: - name: app diff --git a/manifests/yamls/serviceaccount.yaml b/manifests/yamls/serviceaccount.yaml index b29127e812..6b9bee776f 100644 --- a/manifests/yamls/serviceaccount.yaml +++ b/manifests/yamls/serviceaccount.yaml @@ -158,6 +158,15 @@ rules: - update - patch - delete +- apiGroups: + - argoproj.io + resources: + - workflowtemplates + - workflowtemplates/finalizers + verbs: + - get + - list + - watch - apiGroups: - "" resources: @@ -170,10 +179,8 @@ rules: - "" resources: - persistentvolumeclaims - - persistentvolumeclaims/finalizers verbs: - create - - update - delete - get - apiGroups: @@ -181,9 +188,6 @@ rules: resources: - workflows - workflows/finalizers - - workflowtasksets - - workflowtasksets/finalizers - - workflowartifactgctasks verbs: - get - list @@ -191,27 +195,15 @@ rules: - update - patch - delete - - create - apiGroups: - argoproj.io resources: - workflowtemplates - workflowtemplates/finalizers - - clusterworkflowtemplates - - clusterworkflowtemplates/finalizers verbs: - get - list - watch -- apiGroups: - - argoproj.io - resources: - - workflowtaskresults - - workflowtaskresults/finalizers - verbs: - - list - - watch - - deletecollection - apiGroups: - "" resources: diff --git a/pkg/pipeline/CustomTagService.go b/pkg/CustomTagService.go similarity index 71% rename from pkg/pipeline/CustomTagService.go rename to pkg/CustomTagService.go index 76cfc766ac..b8e067b0d2 100644 --- a/pkg/pipeline/CustomTagService.go +++ b/pkg/CustomTagService.go @@ -1,10 +1,9 @@ -package pipeline +package pkg import ( "fmt" "github.com/devtron-labs/devtron/api/bean" "github.com/devtron-labs/devtron/internal/sql/repository" - bean2 "github.com/devtron-labs/devtron/pkg/pipeline/bean" "github.com/go-pg/pg" "go.uber.org/zap" "regexp" @@ -12,6 +11,24 @@ import ( "strings" ) +const ( + EntityNull = iota + EntityTypeCiPipelineId + EntityTypePreCD + EntityTypePostCD +) + +const ( + imagePathPattern = "%s/%s:%s" // dockerReg/dockerRepo:Tag + ImageTagUnavailableMessage = "Desired image tag already exists" + REGEX_PATTERN_FOR_ENSURING_ONLY_ONE_VARIABLE_BETWEEN_BRACKETS = `\{.{2,}\}` + REGEX_PATTERN_FOR_CHARACTER_OTHER_THEN_X_OR_x = `\{[^xX]|{}\}` +) + +var ( + ErrImagePathInUse = fmt.Errorf(ImageTagUnavailableMessage) +) + type CustomTagService interface { CreateOrUpdateCustomTag(tag *bean.CustomTag) error GetCustomTagByEntityKeyAndValue(entityKey int, entityValue string) (*repository.CustomTag, error) @@ -44,7 +61,7 @@ func (impl *CustomTagServiceImpl) CreateOrUpdateCustomTag(tag *bean.CustomTag) e customTagData := repository.CustomTag{ EntityKey: tag.EntityKey, EntityValue: tag.EntityValue, - TagPattern: strings.ReplaceAll(tag.TagPattern, bean2.IMAGE_TAG_VARIABLE_NAME_X, bean2.IMAGE_TAG_VARIABLE_NAME_x), + TagPattern: strings.ReplaceAll(tag.TagPattern, "{X}", "{x}"), AutoIncreasingNumber: tag.AutoIncreasingNumber, Metadata: tag.Metadata, Active: true, @@ -89,13 +106,13 @@ func (impl *CustomTagServiceImpl) GenerateImagePath(entityKey int, entityValue s if err != nil { return nil, err } - imagePath := fmt.Sprintf(bean2.ImagePathPattern, dockerRegistryURL, dockerRepo, tag) + imagePath := fmt.Sprintf(imagePathPattern, dockerRegistryURL, dockerRepo, tag) imagePathReservations, err := impl.customTagRepository.FindByImagePath(tx, imagePath) if err != nil && err != pg.ErrNoRows { return nil, err } if len(imagePathReservations) > 0 { - return nil, bean2.ErrImagePathInUse + return nil, ErrImagePathInUse } imagePathReservation := &repository.ImagePathReservation{ ImagePath: imagePath, @@ -120,9 +137,10 @@ func validateAndConstructTag(customTagData *repository.CustomTag) (string, error if customTagData.AutoIncreasingNumber < 0 { return "", fmt.Errorf("counter {x} can not be negative") } - dockerImageTag := strings.ReplaceAll(customTagData.TagPattern, bean2.IMAGE_TAG_VARIABLE_NAME_x, strconv.Itoa(customTagData.AutoIncreasingNumber-1)) //-1 because number is already incremented, current value will be used next time - if !isValidDockerImageTag(dockerImageTag) { - return dockerImageTag, fmt.Errorf("invalid docker tag") + dockerImageTag := strings.ReplaceAll(customTagData.TagPattern, "{x}", strconv.Itoa(customTagData.AutoIncreasingNumber-1)) //-1 because number is already incremented, current value will be used next time + err = validateTag(dockerImageTag) + if err != nil { + return "", err } return dockerImageTag, nil } @@ -132,32 +150,38 @@ func validateTagPattern(customTagPattern string) error { return fmt.Errorf("tag length can not be zero") } - variableCount := 0 - variableCount = variableCount + strings.Count(customTagPattern, bean2.IMAGE_TAG_VARIABLE_NAME_x) - variableCount = variableCount + strings.Count(customTagPattern, bean2.IMAGE_TAG_VARIABLE_NAME_X) - - if variableCount == 0 { - // there can be case when there is only one {x} or {x} - return fmt.Errorf("variable with format {x} or {X} not found") - } else if variableCount > 1 { - return fmt.Errorf("only one variable with format {x} or {X} allowed") + if IsInvalidVariableFormat(customTagPattern) { + return fmt.Errorf("only one variable is allowed. Allowed variable format : {x} or {X}") } - // replacing variable with 1 (dummy value) and checking if resulting string is valid tag - tagWithDummyValue := strings.ReplaceAll(customTagPattern, bean2.IMAGE_TAG_VARIABLE_NAME_x, "1") - tagWithDummyValue = strings.ReplaceAll(tagWithDummyValue, bean2.IMAGE_TAG_VARIABLE_NAME_X, "1") - - if !isValidDockerImageTag(tagWithDummyValue) { - return fmt.Errorf("not a valid image tag") + remainingString := strings.ReplaceAll(customTagPattern, ".{x}", "") + remainingString = strings.ReplaceAll(remainingString, ".{X}", "") + if len(remainingString) == 0 { + return nil } + n := len(remainingString) + if remainingString[0] == '.' || remainingString[0] == '-' { + return fmt.Errorf("tag can not start with an hyphen or a period") + } + if n != 0 && (remainingString[n-1] == '.' || remainingString[n-1] == '-') { + return fmt.Errorf("tag can not end with an hyphen or a period") + } return nil } -func isValidDockerImageTag(tag string) bool { - // Define the regular expression for a valid Docker image tag - re := regexp.MustCompile(bean2.REGEX_PATTERN_FOR_IMAGE_TAG) - return re.MatchString(tag) +func IsInvalidVariableFormat(customTagPattern string) bool { + regex := regexp.MustCompile(REGEX_PATTERN_FOR_ENSURING_ONLY_ONE_VARIABLE_BETWEEN_BRACKETS) + matches := regex.FindAllString(customTagPattern, -1) + if len(matches) > 0 { + return true + } + regex = regexp.MustCompile(REGEX_PATTERN_FOR_CHARACTER_OTHER_THEN_X_OR_x) + matches = regex.FindAllString(customTagPattern, -1) + if len(matches) > 0 { + return true + } + return false } func validateTag(imageTag string) error { diff --git a/pkg/app/AppCrudOperationService.go b/pkg/app/AppCrudOperationService.go index 9a3bb90b3d..f139b09b61 100644 --- a/pkg/app/AppCrudOperationService.go +++ b/pkg/app/AppCrudOperationService.go @@ -206,7 +206,7 @@ func (impl AppCrudOperationServiceImpl) UpdateLabelsInApp(request *bean.CreateAp appLabelMap[uniqueLabelExists] = appLabel } } - appLabelDeleteMap := make(map[string]bool, 0) + for _, label := range request.AppLabels { uniqueLabelRequest := fmt.Sprintf("%s:%s:%t", label.Key, label.Value, label.Propagate) if _, ok := appLabelMap[uniqueLabelRequest]; !ok { @@ -227,13 +227,10 @@ func (impl AppCrudOperationServiceImpl) UpdateLabelsInApp(request *bean.CreateAp return nil, err } } else { - // storing this unique so that item remain live, all other item will be delete from this app - appLabelDeleteMap[uniqueLabelRequest] = true + // delete from map so that item remain live, all other item will be delete from this app + delete(appLabelMap, uniqueLabelRequest) } } - for labelReq, _ := range appLabelDeleteMap { - delete(appLabelMap, labelReq) - } for _, appLabel := range appLabelMap { err = impl.appLabelRepository.Delete(appLabel, tx) if err != nil { diff --git a/pkg/app/AppService.go b/pkg/app/AppService.go index 3fc92f185b..05622bd4ee 100644 --- a/pkg/app/AppService.go +++ b/pkg/app/AppService.go @@ -20,11 +20,15 @@ package app import ( "context" "encoding/json" + error2 "errors" "fmt" "github.com/caarlos0/env" + pubsub "github.com/devtron-labs/common-lib/pubsub-lib" + k8s2 "github.com/devtron-labs/common-lib/utils/k8s" k8sCommonBean "github.com/devtron-labs/common-lib/utils/k8s/commonBean" "github.com/devtron-labs/common-lib/utils/k8s/health" client2 "github.com/devtron-labs/devtron/api/helm-app" + bean3 "github.com/devtron-labs/devtron/pkg/app/bean" status2 "github.com/devtron-labs/devtron/pkg/app/status" repository4 "github.com/devtron-labs/devtron/pkg/appStore/deployment/repository" "github.com/devtron-labs/devtron/pkg/appStore/deployment/service" @@ -34,13 +38,18 @@ import ( "github.com/devtron-labs/devtron/pkg/k8s" repository3 "github.com/devtron-labs/devtron/pkg/pipeline/history/repository" repository5 "github.com/devtron-labs/devtron/pkg/pipeline/repository" + "github.com/devtron-labs/devtron/pkg/resourceQualifiers" "github.com/devtron-labs/devtron/pkg/variables" "github.com/devtron-labs/devtron/pkg/variables/parsers" _ "github.com/devtron-labs/devtron/pkg/variables/repository" + repository6 "github.com/devtron-labs/devtron/pkg/variables/repository" "github.com/devtron-labs/devtron/util/argo" + "github.com/tidwall/gjson" + "github.com/tidwall/sjson" "go.opentelemetry.io/otel" "io/ioutil" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" chart2 "k8s.io/helm/pkg/proto/hapi/chart" "net/url" "os" @@ -61,10 +70,13 @@ import ( application2 "github.com/argoproj/argo-cd/v2/pkg/apiclient/application" "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1" + "github.com/aws/aws-sdk-go/service/autoscaling" "github.com/devtron-labs/devtron/api/bean" "github.com/devtron-labs/devtron/client/argocdServer" "github.com/devtron-labs/devtron/client/argocdServer/application" client "github.com/devtron-labs/devtron/client/events" + "github.com/devtron-labs/devtron/internal/middleware" + "github.com/devtron-labs/devtron/internal/sql/models" "github.com/devtron-labs/devtron/internal/sql/repository" "github.com/devtron-labs/devtron/internal/sql/repository/chartConfig" "github.com/devtron-labs/devtron/internal/sql/repository/pipelineConfig" @@ -76,7 +88,11 @@ import ( util "github.com/devtron-labs/devtron/util/event" "github.com/devtron-labs/devtron/util/rbac" "github.com/go-pg/pg" + errors2 "github.com/juju/errors" + "github.com/pkg/errors" "go.uber.org/zap" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" ) type AppServiceConfig struct { @@ -164,30 +180,27 @@ type AppServiceImpl struct { scopedVariableService variables.ScopedVariableService variableEntityMappingService variables.VariableEntityMappingService variableTemplateParser parsers.VariableTemplateParser - argoClientWrapperService argocdServer.ArgoClientWrapperService } type AppService interface { - //TriggerRelease(overrideRequest *bean.ValuesOverrideRequest, ctx context.Context, triggeredAt time.Time, deployedBy int32) (releaseNo int, manifest []byte, err error) + TriggerRelease(overrideRequest *bean.ValuesOverrideRequest, ctx context.Context, triggeredAt time.Time, deployedBy int32) (releaseNo int, manifest []byte, err error) UpdateReleaseStatus(request *bean.ReleaseStatusUpdateRequest) (bool, error) UpdateDeploymentStatusAndCheckIsSucceeded(app *v1alpha1.Application, statusTime time.Time, isAppStore bool) (bool, *chartConfig.PipelineOverride, error) - //TriggerCD(artifact *repository.CiArtifact, cdWorkflowId, wfrId int, pipeline *pipelineConfig.Pipeline, triggeredAt time.Time) error + TriggerCD(artifact *repository.CiArtifact, cdWorkflowId, wfrId int, pipeline *pipelineConfig.Pipeline, triggeredAt time.Time) error GetConfigMapAndSecretJson(appId int, envId int, pipelineId int) ([]byte, error) UpdateCdWorkflowRunnerByACDObject(app *v1alpha1.Application, cdWfrId int, updateTimedOutStatus bool) error GetCmSecretNew(appId int, envId int, isJob bool) (*bean.ConfigMapJson, *bean.ConfigSecretJson, error) - //MarkImageScanDeployed(appId int, envId int, imageDigest string, clusterId int, isScanEnabled bool) error + MarkImageScanDeployed(appId int, envId int, imageDigest string, clusterId int, isScanEnabled bool) error UpdateDeploymentStatusForGitOpsPipelines(app *v1alpha1.Application, statusTime time.Time, isAppStore bool) (bool, bool, *chartConfig.PipelineOverride, error) WriteCDSuccessEvent(appId int, envId int, override *chartConfig.PipelineOverride) GetGitOpsRepoPrefix() string - //GetValuesOverrideForTrigger(overrideRequest *bean.ValuesOverrideRequest, triggeredAt time.Time, ctx context.Context) (*ValuesOverrideResponse, error) - //GetEnvOverrideByTriggerType(overrideRequest *bean.ValuesOverrideRequest, triggeredAt time.Time, ctx context.Context) (*chartConfig.EnvConfigOverride, error) - //GetAppMetricsByTriggerType(overrideRequest *bean.ValuesOverrideRequest, ctx context.Context) (bool, error) - //GetDeploymentStrategyByTriggerType(overrideRequest *bean.ValuesOverrideRequest, ctx context.Context) (*chartConfig.PipelineStrategy, error) + GetValuesOverrideForTrigger(overrideRequest *bean.ValuesOverrideRequest, triggeredAt time.Time, ctx context.Context) (*ValuesOverrideResponse, error) + GetEnvOverrideByTriggerType(overrideRequest *bean.ValuesOverrideRequest, triggeredAt time.Time, ctx context.Context) (*chartConfig.EnvConfigOverride, error) + GetAppMetricsByTriggerType(overrideRequest *bean.ValuesOverrideRequest, ctx context.Context) (bool, error) + GetDeploymentStrategyByTriggerType(overrideRequest *bean.ValuesOverrideRequest, ctx context.Context) (*chartConfig.PipelineStrategy, error) CreateGitopsRepo(app *app.App, userId int32) (gitopsRepoName string, chartGitAttr *ChartGitAttribute, err error) GetDeployedManifestByPipelineIdAndCDWorkflowId(appId int, envId int, cdWorkflowId int, ctx context.Context) ([]byte, error) - //SetPipelineFieldsInOverrideRequest(overrideRequest *bean.ValuesOverrideRequest, pipeline *pipelineConfig.Pipeline) - - BuildChartAndGetPath(appName string, envOverride *chartConfig.EnvConfigOverride, ctx context.Context) (string, error) + SetPipelineFieldsInOverrideRequest(overrideRequest *bean.ValuesOverrideRequest, pipeline *pipelineConfig.Pipeline) } func NewAppService( @@ -248,9 +261,7 @@ func NewAppService( variableSnapshotHistoryService variables.VariableSnapshotHistoryService, scopedVariableService variables.ScopedVariableService, variableEntityMappingService variables.VariableEntityMappingService, - variableTemplateParser parsers.VariableTemplateParser, - argoClientWrapperService argocdServer.ArgoClientWrapperService, -) *AppServiceImpl { + variableTemplateParser parsers.VariableTemplateParser) *AppServiceImpl { appServiceImpl := &AppServiceImpl{ environmentConfigRepository: environmentConfigRepository, mergeUtil: mergeUtil, @@ -315,7 +326,6 @@ func NewAppService( scopedVariableService: scopedVariableService, variableEntityMappingService: variableEntityMappingService, variableTemplateParser: variableTemplateParser, - argoClientWrapperService: argoClientWrapperService, } return appServiceImpl } @@ -325,6 +335,66 @@ const ( Failure = "FAILURE" ) +func (impl *AppServiceImpl) SetPipelineFieldsInOverrideRequest(overrideRequest *bean.ValuesOverrideRequest, pipeline *pipelineConfig.Pipeline) { + overrideRequest.PipelineId = pipeline.Id + overrideRequest.PipelineName = pipeline.Name + overrideRequest.EnvId = pipeline.EnvironmentId + overrideRequest.EnvName = pipeline.Environment.Name + overrideRequest.ClusterId = pipeline.Environment.ClusterId + overrideRequest.AppId = pipeline.AppId + overrideRequest.AppName = pipeline.App.AppName + overrideRequest.DeploymentAppType = pipeline.DeploymentAppType +} + +func (impl *AppServiceImpl) getValuesFileForEnv(environmentId int) string { + return fmt.Sprintf("_%d-values.yaml", environmentId) //-{envId}-values.yaml +} +func (impl *AppServiceImpl) createArgoApplicationIfRequired(appId int, envConfigOverride *chartConfig.EnvConfigOverride, pipeline *pipelineConfig.Pipeline, userId int32) (string, error) { + //repo has been registered while helm create + chart, err := impl.chartRepository.FindLatestChartForAppByAppId(appId) + if err != nil { + impl.logger.Errorw("no chart found ", "app", appId) + return "", err + } + envModel, err := impl.envRepository.FindById(envConfigOverride.TargetEnvironment) + if err != nil { + return "", err + } + argoAppName := pipeline.DeploymentAppName + if pipeline.DeploymentAppCreated { + return argoAppName, nil + } else { + //create + appNamespace := envConfigOverride.Namespace + if appNamespace == "" { + appNamespace = "default" + } + namespace := argocdServer.DevtronInstalationNs + appRequest := &argocdServer.AppTemplate{ + ApplicationName: argoAppName, + Namespace: namespace, + TargetNamespace: appNamespace, + TargetServer: envModel.Cluster.ServerUrl, + Project: "default", + ValuesFile: impl.getValuesFileForEnv(envModel.Id), + RepoPath: chart.ChartLocation, + RepoUrl: chart.GitRepoUrl, + } + + argoAppName, err := impl.ArgoK8sClient.CreateAcdApp(appRequest, envModel.Cluster) + if err != nil { + return "", err + } + //update cd pipeline to mark deployment app created + _, err = impl.updatePipeline(pipeline, userId) + if err != nil { + impl.logger.Errorw("error in update cd pipeline for deployment app created or not", "err", err) + return "", err + } + return argoAppName, nil + } +} + func (impl *AppServiceImpl) UpdateReleaseStatus(updateStatusRequest *bean.ReleaseStatusUpdateRequest) (bool, error) { count, err := impl.pipelineOverrideRepository.UpdateStatusByRequestIdentifier(updateStatusRequest.RequestId, updateStatusRequest.NewStatus) if err != nil { @@ -943,6 +1013,84 @@ type ValuesOverrideResponse struct { AppMetrics bool } +type EnvironmentOverride struct { + Enabled bool `json:"enabled"` + EnvValues []*KeyValue `json:"envValues"` +} + +type KeyValue struct { + Key string `json:"key"` + Value string `json:"value"` +} + +func (conf *EnvironmentOverride) appendEnvironmentVariable(key, value string) { + item := &KeyValue{Key: key, Value: value} + conf.EnvValues = append(conf.EnvValues, item) +} + +func (impl *AppServiceImpl) TriggerCD(artifact *repository.CiArtifact, cdWorkflowId, wfrId int, pipeline *pipelineConfig.Pipeline, triggeredAt time.Time) error { + impl.logger.Debugw("automatic pipeline trigger attempt async", "artifactId", artifact.Id) + + return impl.triggerReleaseAsync(artifact, cdWorkflowId, wfrId, pipeline, triggeredAt) +} + +func (impl *AppServiceImpl) triggerReleaseAsync(artifact *repository.CiArtifact, cdWorkflowId, wfrId int, pipeline *pipelineConfig.Pipeline, triggeredAt time.Time) error { + err := impl.validateAndTrigger(pipeline, artifact, cdWorkflowId, wfrId, triggeredAt) + if err != nil { + impl.logger.Errorw("error in trigger for pipeline", "pipelineId", strconv.Itoa(pipeline.Id)) + } + impl.logger.Debugw("trigger attempted for all pipeline ", "artifactId", artifact.Id) + return err +} + +func (impl *AppServiceImpl) validateAndTrigger(p *pipelineConfig.Pipeline, artifact *repository.CiArtifact, cdWorkflowId, wfrId int, triggeredAt time.Time) error { + object := impl.enforcerUtil.GetAppRBACNameByAppId(p.AppId) + envApp := strings.Split(object, "/") + if len(envApp) != 2 { + impl.logger.Error("invalid req, app and env not found from rbac") + return errors.New("invalid req, app and env not found from rbac") + } + err := impl.releasePipeline(p, artifact, cdWorkflowId, wfrId, triggeredAt) + return err +} + +func (impl *AppServiceImpl) releasePipeline(pipeline *pipelineConfig.Pipeline, artifact *repository.CiArtifact, cdWorkflowId, wfrId int, triggeredAt time.Time) error { + impl.logger.Debugw("triggering release for ", "cdPipelineId", pipeline.Id, "artifactId", artifact.Id) + + pipeline, err := impl.pipelineRepository.FindById(pipeline.Id) + if err != nil { + impl.logger.Errorw("error in fetching pipeline by pipelineId", "err", err) + return err + } + + request := &bean.ValuesOverrideRequest{ + PipelineId: pipeline.Id, + UserId: artifact.CreatedBy, + CiArtifactId: artifact.Id, + AppId: pipeline.AppId, + CdWorkflowId: cdWorkflowId, + ForceTrigger: true, + DeploymentWithConfig: bean.DEPLOYMENT_CONFIG_TYPE_LAST_SAVED, + WfrId: wfrId, + } + impl.SetPipelineFieldsInOverrideRequest(request, pipeline) + + ctx, err := impl.buildACDContext() + if err != nil { + impl.logger.Errorw("error in creating acd synch context", "pipelineId", pipeline.Id, "artifactId", artifact.Id, "err", err) + return err + } + //setting deployedBy as 1(system user) since case of auto trigger + id, _, err := impl.TriggerRelease(request, ctx, triggeredAt, 1) + if err != nil { + impl.logger.Errorw("error in auto cd pipeline trigger", "pipelineId", pipeline.Id, "artifactId", artifact.Id, "err", err) + } else { + impl.logger.Infow("pipeline successfully triggered ", "cdPipelineId", pipeline.Id, "artifactId", artifact.Id, "releaseId", id) + } + return err + +} + func (impl *AppServiceImpl) buildACDContext() (acdContext context.Context, err error) { //this method should only call in case of argo-integration and gitops configured acdToken, err := impl.argoUserService.GetLatestDevtronArgoCdUserToken() @@ -955,6 +1103,515 @@ func (impl *AppServiceImpl) buildACDContext() (acdContext context.Context, err e return ctx, nil } +func (impl *AppServiceImpl) getDbMigrationOverride(overrideRequest *bean.ValuesOverrideRequest, artifact *repository.CiArtifact, isRollback bool) (overrideJson []byte, err error) { + if isRollback { + return nil, fmt.Errorf("rollback not supported ye") + } + notConfigured := false + config, err := impl.dbMigrationConfigRepository.FindByPipelineId(overrideRequest.PipelineId) + if err != nil && !IsErrNoRows(err) { + impl.logger.Errorw("error in fetching pipeline override config", "req", overrideRequest, "err", err) + return nil, err + } else if IsErrNoRows(err) { + notConfigured = true + } + envVal := &EnvironmentOverride{} + if notConfigured { + impl.logger.Warnw("no active db migration found", "pipeline", overrideRequest.PipelineId) + envVal.Enabled = false + } else { + materialInfos, err := artifact.ParseMaterialInfo() + if err != nil { + return nil, err + } + + hash, ok := materialInfos[config.GitMaterial.Url] + if !ok { + impl.logger.Errorf("wrong url map ", "map", materialInfos, "url", config.GitMaterial.Url) + return nil, fmt.Errorf("configured url not found in material %s", config.GitMaterial.Url) + } + + envVal.Enabled = true + if config.GitMaterial.GitProvider.AuthMode != repository.AUTH_MODE_USERNAME_PASSWORD && + config.GitMaterial.GitProvider.AuthMode != repository.AUTH_MODE_ACCESS_TOKEN && + config.GitMaterial.GitProvider.AuthMode != repository.AUTH_MODE_ANONYMOUS { + return nil, fmt.Errorf("auth mode %s not supported for migration", config.GitMaterial.GitProvider.AuthMode) + } + envVal.appendEnvironmentVariable("GIT_REPO_URL", config.GitMaterial.Url) + envVal.appendEnvironmentVariable("GIT_USER", config.GitMaterial.GitProvider.UserName) + var password string + if config.GitMaterial.GitProvider.AuthMode == repository.AUTH_MODE_USERNAME_PASSWORD { + password = config.GitMaterial.GitProvider.Password + } else { + password = config.GitMaterial.GitProvider.AccessToken + } + envVal.appendEnvironmentVariable("GIT_AUTH_TOKEN", password) + // parse git-tag not required + //envVal.appendEnvironmentVariable("GIT_TAG", "") + envVal.appendEnvironmentVariable("GIT_HASH", hash) + envVal.appendEnvironmentVariable("SCRIPT_LOCATION", config.ScriptSource) + envVal.appendEnvironmentVariable("DB_TYPE", string(config.DbConfig.Type)) + envVal.appendEnvironmentVariable("DB_USER_NAME", config.DbConfig.UserName) + envVal.appendEnvironmentVariable("DB_PASSWORD", config.DbConfig.Password) + envVal.appendEnvironmentVariable("DB_HOST", config.DbConfig.Host) + envVal.appendEnvironmentVariable("DB_PORT", config.DbConfig.Port) + envVal.appendEnvironmentVariable("DB_NAME", config.DbConfig.DbName) + //Will be used for rollback don't delete it + //envVal.appendEnvironmentVariable("MIGRATE_TO_VERSION", strconv.Itoa(overrideRequest.TargetDbVersion)) + } + dbMigrationConfig := map[string]interface{}{"dbMigrationConfig": envVal} + confByte, err := json.Marshal(dbMigrationConfig) + if err != nil { + return nil, err + } + return confByte, nil +} + +func (impl *AppServiceImpl) GetAppMetricsByTriggerType(overrideRequest *bean.ValuesOverrideRequest, ctx context.Context) (bool, error) { + + var appMetrics bool + if overrideRequest.DeploymentWithConfig == bean.DEPLOYMENT_CONFIG_TYPE_SPECIFIC_TRIGGER { + _, span := otel.Tracer("orchestrator").Start(ctx, "deploymentTemplateHistoryRepository.GetHistoryByPipelineIdAndWfrId") + deploymentTemplateHistory, err := impl.deploymentTemplateHistoryRepository.GetHistoryByPipelineIdAndWfrId(overrideRequest.PipelineId, overrideRequest.WfrIdForDeploymentWithSpecificTrigger) + span.End() + if err != nil { + impl.logger.Errorw("error in getting deployed deployment template history by pipelineId and wfrId", "err", err, "pipelineId", &overrideRequest, "wfrId", overrideRequest.WfrIdForDeploymentWithSpecificTrigger) + return appMetrics, err + } + appMetrics = deploymentTemplateHistory.IsAppMetricsEnabled + + } else if overrideRequest.DeploymentWithConfig == bean.DEPLOYMENT_CONFIG_TYPE_LAST_SAVED { + _, span := otel.Tracer("orchestrator").Start(ctx, "appLevelMetricsRepository.FindByAppId") + appLevelMetrics, err := impl.appLevelMetricsRepository.FindByAppId(overrideRequest.AppId) + span.End() + if err != nil && !IsErrNoRows(err) { + impl.logger.Errorw("err", err) + return appMetrics, &ApiError{InternalMessage: "unable to fetch app level metrics flag"} + } + appMetrics = appLevelMetrics.AppMetrics + + _, span = otel.Tracer("orchestrator").Start(ctx, "envLevelMetricsRepository.FindByAppIdAndEnvId") + envLevelMetrics, err := impl.envLevelMetricsRepository.FindByAppIdAndEnvId(overrideRequest.AppId, overrideRequest.EnvId) + span.End() + if err != nil && !IsErrNoRows(err) { + impl.logger.Errorw("err", err) + return appMetrics, &ApiError{InternalMessage: "unable to fetch env level metrics flag"} + } + if envLevelMetrics.Id != 0 && envLevelMetrics.AppMetrics != nil { + appMetrics = *envLevelMetrics.AppMetrics + } + } + return appMetrics, nil +} + +func (impl *AppServiceImpl) GetDeploymentStrategyByTriggerType(overrideRequest *bean.ValuesOverrideRequest, ctx context.Context) (*chartConfig.PipelineStrategy, error) { + + strategy := &chartConfig.PipelineStrategy{} + var err error + if overrideRequest.DeploymentWithConfig == bean.DEPLOYMENT_CONFIG_TYPE_SPECIFIC_TRIGGER { + _, span := otel.Tracer("orchestrator").Start(ctx, "strategyHistoryRepository.GetHistoryByPipelineIdAndWfrId") + strategyHistory, err := impl.strategyHistoryRepository.GetHistoryByPipelineIdAndWfrId(overrideRequest.PipelineId, overrideRequest.WfrIdForDeploymentWithSpecificTrigger) + span.End() + if err != nil { + impl.logger.Errorw("error in getting deployed strategy history by pipleinId and wfrId", "err", err, "pipelineId", overrideRequest.PipelineId, "wfrId", overrideRequest.WfrIdForDeploymentWithSpecificTrigger) + return nil, err + } + strategy.Strategy = strategyHistory.Strategy + strategy.Config = strategyHistory.Config + strategy.PipelineId = overrideRequest.PipelineId + } else if overrideRequest.DeploymentWithConfig == bean.DEPLOYMENT_CONFIG_TYPE_LAST_SAVED { + if overrideRequest.ForceTrigger { + _, span := otel.Tracer("orchestrator").Start(ctx, "pipelineConfigRepository.GetDefaultStrategyByPipelineId") + strategy, err = impl.pipelineConfigRepository.GetDefaultStrategyByPipelineId(overrideRequest.PipelineId) + span.End() + } else { + var deploymentTemplate chartRepoRepository.DeploymentStrategy + if overrideRequest.DeploymentTemplate == "ROLLING" { + deploymentTemplate = chartRepoRepository.DEPLOYMENT_STRATEGY_ROLLING + } else if overrideRequest.DeploymentTemplate == "BLUE-GREEN" { + deploymentTemplate = chartRepoRepository.DEPLOYMENT_STRATEGY_BLUE_GREEN + } else if overrideRequest.DeploymentTemplate == "CANARY" { + deploymentTemplate = chartRepoRepository.DEPLOYMENT_STRATEGY_CANARY + } else if overrideRequest.DeploymentTemplate == "RECREATE" { + deploymentTemplate = chartRepoRepository.DEPLOYMENT_STRATEGY_RECREATE + } + + if len(deploymentTemplate) > 0 { + _, span := otel.Tracer("orchestrator").Start(ctx, "pipelineConfigRepository.FindByStrategyAndPipelineId") + strategy, err = impl.pipelineConfigRepository.FindByStrategyAndPipelineId(deploymentTemplate, overrideRequest.PipelineId) + span.End() + } else { + _, span := otel.Tracer("orchestrator").Start(ctx, "pipelineConfigRepository.GetDefaultStrategyByPipelineId") + strategy, err = impl.pipelineConfigRepository.GetDefaultStrategyByPipelineId(overrideRequest.PipelineId) + span.End() + } + } + if err != nil && errors2.IsNotFound(err) == false { + impl.logger.Errorf("invalid state", "err", err, "req", strategy) + return nil, err + } + } + return strategy, nil +} + +func (impl *AppServiceImpl) GetEnvOverrideByTriggerType(overrideRequest *bean.ValuesOverrideRequest, triggeredAt time.Time, ctx context.Context) (*chartConfig.EnvConfigOverride, error) { + + envOverride := &chartConfig.EnvConfigOverride{} + + var err error + if overrideRequest.DeploymentWithConfig == bean.DEPLOYMENT_CONFIG_TYPE_SPECIFIC_TRIGGER { + _, span := otel.Tracer("orchestrator").Start(ctx, "deploymentTemplateHistoryRepository.GetHistoryByPipelineIdAndWfrId") + deploymentTemplateHistory, err := impl.deploymentTemplateHistoryRepository.GetHistoryByPipelineIdAndWfrId(overrideRequest.PipelineId, overrideRequest.WfrIdForDeploymentWithSpecificTrigger) + //VARIABLE_SNAPSHOT_GET and resolve + + span.End() + if err != nil { + impl.logger.Errorw("error in getting deployed deployment template history by pipelineId and wfrId", "err", err, "pipelineId", &overrideRequest, "wfrId", overrideRequest.WfrIdForDeploymentWithSpecificTrigger) + return nil, err + } + templateName := deploymentTemplateHistory.TemplateName + templateVersion := deploymentTemplateHistory.TemplateVersion + if templateName == "Rollout Deployment" { + templateName = "" + } + //getting chart_ref by id + _, span = otel.Tracer("orchestrator").Start(ctx, "chartRefRepository.FindByVersionAndName") + chartRef, err := impl.chartRefRepository.FindByVersionAndName(templateName, templateVersion) + span.End() + if err != nil { + impl.logger.Errorw("error in getting chartRef by version and name", "err", err, "version", templateVersion, "name", templateName) + return nil, err + } + //assuming that if a chartVersion is deployed then it's envConfigOverride will be available + _, span = otel.Tracer("orchestrator").Start(ctx, "environmentConfigRepository.GetByAppIdEnvIdAndChartRefId") + envOverride, err = impl.environmentConfigRepository.GetByAppIdEnvIdAndChartRefId(overrideRequest.AppId, overrideRequest.EnvId, chartRef.Id) + span.End() + if err != nil { + impl.logger.Errorw("error in getting envConfigOverride for pipeline for specific chartVersion", "err", err, "appId", overrideRequest.AppId, "envId", overrideRequest.EnvId, "chartRefId", chartRef.Id) + return nil, err + } + + _, span = otel.Tracer("orchestrator").Start(ctx, "envRepository.FindById") + env, err := impl.envRepository.FindById(envOverride.TargetEnvironment) + span.End() + if err != nil { + impl.logger.Errorw("unable to find env", "err", err) + return nil, err + } + envOverride.Environment = env + + //updating historical data in envConfigOverride and appMetrics flag + envOverride.IsOverride = true + envOverride.EnvOverrideValues = deploymentTemplateHistory.Template + + resolvedTemplate, variableMap, err := impl.getResolvedTemplateWithSnapshot(deploymentTemplateHistory.Id, envOverride.EnvOverrideValues) + if err != nil { + return nil, err + } + envOverride.ResolvedEnvOverrideValues = resolvedTemplate + envOverride.VariableSnapshot = variableMap + + } else if overrideRequest.DeploymentWithConfig == bean.DEPLOYMENT_CONFIG_TYPE_LAST_SAVED { + _, span := otel.Tracer("orchestrator").Start(ctx, "environmentConfigRepository.ActiveEnvConfigOverride") + envOverride, err = impl.environmentConfigRepository.ActiveEnvConfigOverride(overrideRequest.AppId, overrideRequest.EnvId) + + var chart *chartRepoRepository.Chart + span.End() + if err != nil { + impl.logger.Errorw("invalid state", "err", err, "req", overrideRequest) + return nil, err + } + if envOverride.Id == 0 { + _, span = otel.Tracer("orchestrator").Start(ctx, "chartRepository.FindLatestChartForAppByAppId") + chart, err = impl.chartRepository.FindLatestChartForAppByAppId(overrideRequest.AppId) + span.End() + if err != nil { + impl.logger.Errorw("invalid state", "err", err, "req", overrideRequest) + return nil, err + } + _, span = otel.Tracer("orchestrator").Start(ctx, "environmentConfigRepository.FindChartByAppIdAndEnvIdAndChartRefId") + envOverride, err = impl.environmentConfigRepository.FindChartByAppIdAndEnvIdAndChartRefId(overrideRequest.AppId, overrideRequest.EnvId, chart.ChartRefId) + span.End() + if err != nil && !errors2.IsNotFound(err) { + impl.logger.Errorw("invalid state", "err", err, "req", overrideRequest) + return nil, err + } + + //creating new env override config + if errors2.IsNotFound(err) || envOverride == nil { + _, span = otel.Tracer("orchestrator").Start(ctx, "envRepository.FindById") + environment, err := impl.envRepository.FindById(overrideRequest.EnvId) + span.End() + if err != nil && !IsErrNoRows(err) { + return nil, err + } + envOverride = &chartConfig.EnvConfigOverride{ + Active: true, + ManualReviewed: true, + Status: models.CHARTSTATUS_SUCCESS, + TargetEnvironment: overrideRequest.EnvId, + ChartId: chart.Id, + AuditLog: sql.AuditLog{UpdatedBy: overrideRequest.UserId, UpdatedOn: triggeredAt, CreatedOn: triggeredAt, CreatedBy: overrideRequest.UserId}, + Namespace: environment.Namespace, + IsOverride: false, + EnvOverrideValues: "{}", + Latest: false, + IsBasicViewLocked: chart.IsBasicViewLocked, + CurrentViewEditor: chart.CurrentViewEditor, + } + _, span = otel.Tracer("orchestrator").Start(ctx, "environmentConfigRepository.Save") + err = impl.environmentConfigRepository.Save(envOverride) + span.End() + if err != nil { + impl.logger.Errorw("error in creating envconfig", "data", envOverride, "error", err) + return nil, err + } + } + envOverride.Chart = chart + } else if envOverride.Id > 0 && !envOverride.IsOverride { + _, span = otel.Tracer("orchestrator").Start(ctx, "chartRepository.FindLatestChartForAppByAppId") + chart, err = impl.chartRepository.FindLatestChartForAppByAppId(overrideRequest.AppId) + span.End() + if err != nil { + impl.logger.Errorw("invalid state", "err", err, "req", overrideRequest) + return nil, err + } + envOverride.Chart = chart + } + + _, span = otel.Tracer("orchestrator").Start(ctx, "envRepository.FindById") + env, err := impl.envRepository.FindById(envOverride.TargetEnvironment) + span.End() + if err != nil { + impl.logger.Errorw("unable to find env", "err", err) + return nil, err + } + envOverride.Environment = env + + //VARIABLE different cases for variable resolution + scope := resourceQualifiers.Scope{ + AppId: overrideRequest.AppId, + EnvId: overrideRequest.EnvId, + ClusterId: overrideRequest.ClusterId, + SystemMetadata: &resourceQualifiers.SystemMetadata{ + EnvironmentName: env.Name, + ClusterName: env.Cluster.ClusterName, + Namespace: env.Namespace, + ImageTag: overrideRequest.ImageTag, + AppName: overrideRequest.AppName, + }, + } + + if envOverride.IsOverride { + + resolvedTemplate, variableMap, err := impl.extractVariablesAndResolveTemplate(scope, envOverride.EnvOverrideValues, repository6.Entity{ + EntityType: repository6.EntityTypeDeploymentTemplateEnvLevel, + EntityId: envOverride.Id, + }) + if err != nil { + return nil, err + } + envOverride.ResolvedEnvOverrideValues = resolvedTemplate + envOverride.VariableSnapshot = variableMap + } else { + resolvedTemplate, variableMap, err := impl.extractVariablesAndResolveTemplate(scope, chart.GlobalOverride, repository6.Entity{ + EntityType: repository6.EntityTypeDeploymentTemplateAppLevel, + EntityId: chart.Id, + }) + if err != nil { + return nil, err + } + envOverride.Chart.ResolvedGlobalOverride = resolvedTemplate + envOverride.VariableSnapshot = variableMap + } + } + + return envOverride, nil +} + +func (impl *AppServiceImpl) getResolvedTemplateWithSnapshot(deploymentTemplateHistoryId int, template string) (string, map[string]string, error) { + reference := repository6.HistoryReference{ + HistoryReferenceId: deploymentTemplateHistoryId, + HistoryReferenceType: repository6.HistoryReferenceTypeDeploymentTemplate, + } + variableSnapshot, err := impl.variableSnapshotHistoryService.GetVariableHistoryForReferences([]repository6.HistoryReference{reference}) + if err != nil { + return "", nil, err + } + + variableSnapshotMap := make(map[string]string) + + if _, ok := variableSnapshot[reference]; !ok { + return template, variableSnapshotMap, nil + } + + err = json.Unmarshal(variableSnapshot[reference].VariableSnapshot, &variableSnapshotMap) + if err != nil { + return "", nil, err + } + + if len(variableSnapshotMap) == 0 { + return template, variableSnapshotMap, nil + } + scopedVariableData := parsers.GetScopedVarData(variableSnapshotMap) + request := parsers.VariableParserRequest{Template: template, TemplateType: parsers.JsonVariableTemplate, Variables: scopedVariableData} + parserResponse := impl.variableTemplateParser.ParseTemplate(request) + err = parserResponse.Error + if err != nil { + return "", nil, err + } + resolvedTemplate := parserResponse.ResolvedTemplate + return resolvedTemplate, variableSnapshotMap, nil +} + +func (impl *AppServiceImpl) extractVariablesAndResolveTemplate(scope resourceQualifiers.Scope, template string, entity repository6.Entity) (string, map[string]string, error) { + + entityToVariables, err := impl.variableEntityMappingService.GetAllMappingsForEntities([]repository6.Entity{entity}) + if err != nil { + return "", nil, err + } + + variableMap := make(map[string]string) + if vars, ok := entityToVariables[entity]; !ok || len(vars) == 0 { + return template, variableMap, nil + } + scopedVariables, err := impl.scopedVariableService.GetScopedVariables(scope, entityToVariables[entity], true) + if err != nil { + return "", nil, err + } + + parserRequest := parsers.VariableParserRequest{Template: template, Variables: scopedVariables, TemplateType: parsers.JsonVariableTemplate} + parserResponse := impl.variableTemplateParser.ParseTemplate(parserRequest) + err = parserResponse.Error + if err != nil { + return "", nil, err + } + + for _, variable := range scopedVariables { + variableMap[variable.VariableName] = variable.VariableValue.StringValue() + } + + resolvedTemplate := parserResponse.ResolvedTemplate + return resolvedTemplate, variableMap, nil +} + +func (impl *AppServiceImpl) GetValuesOverrideForTrigger(overrideRequest *bean.ValuesOverrideRequest, triggeredAt time.Time, ctx context.Context) (*ValuesOverrideResponse, error) { + if overrideRequest.DeploymentType == models.DEPLOYMENTTYPE_UNKNOWN { + overrideRequest.DeploymentType = models.DEPLOYMENTTYPE_DEPLOY + } + if len(overrideRequest.DeploymentWithConfig) == 0 { + overrideRequest.DeploymentWithConfig = bean.DEPLOYMENT_CONFIG_TYPE_LAST_SAVED + } + valuesOverrideResponse := &ValuesOverrideResponse{} + + pipeline, err := impl.pipelineRepository.FindById(overrideRequest.PipelineId) + if err != nil { + impl.logger.Errorw("error in fetching pipeline by pipeline id", "err", err, "pipeline-id-", overrideRequest.PipelineId) + return valuesOverrideResponse, err + } + + _, span := otel.Tracer("orchestrator").Start(ctx, "ciArtifactRepository.Get") + artifact, err := impl.ciArtifactRepository.Get(overrideRequest.CiArtifactId) + span.End() + if err != nil { + return valuesOverrideResponse, err + } + overrideRequest.ImageTag = artifact.Image + + envOverride, err := impl.GetEnvOverrideByTriggerType(overrideRequest, triggeredAt, ctx) + if err != nil { + impl.logger.Errorw("error in getting env override by trigger type", "err", err) + return valuesOverrideResponse, err + } + appMetrics, err := impl.GetAppMetricsByTriggerType(overrideRequest, ctx) + if err != nil { + impl.logger.Errorw("error in getting app metrics by trigger type", "err", err) + return valuesOverrideResponse, err + } + strategy, err := impl.GetDeploymentStrategyByTriggerType(overrideRequest, ctx) + if err != nil { + impl.logger.Errorw("error in getting strategy by trigger type", "err", err) + return valuesOverrideResponse, err + } + _, span = otel.Tracer("orchestrator").Start(ctx, "getDbMigrationOverride") + //FIXME: how to determine rollback + //we can't depend on ciArtifact ID because CI pipeline can be manually triggered in any order regardless of sourcecode status + dbMigrationOverride, err := impl.getDbMigrationOverride(overrideRequest, artifact, false) + span.End() + if err != nil { + impl.logger.Errorw("error in fetching db migration config", "req", overrideRequest, "err", err) + return valuesOverrideResponse, err + } + chartVersion := envOverride.Chart.ChartVersion + _, span = otel.Tracer("orchestrator").Start(ctx, "getConfigMapAndSecretJsonV2") + configMapJson, err := impl.getConfigMapAndSecretJsonV2(overrideRequest.AppId, envOverride.TargetEnvironment, overrideRequest.PipelineId, chartVersion, overrideRequest.DeploymentWithConfig, overrideRequest.WfrIdForDeploymentWithSpecificTrigger) + span.End() + if err != nil { + impl.logger.Errorw("error in fetching config map n secret ", "err", err) + configMapJson = nil + } + _, span = otel.Tracer("orchestrator").Start(ctx, "appCrudOperationService.GetLabelsByAppIdForDeployment") + appLabelJsonByte, err := impl.appCrudOperationService.GetLabelsByAppIdForDeployment(overrideRequest.AppId) + span.End() + if err != nil { + impl.logger.Errorw("error in fetching app labels for gitOps commit", "err", err) + appLabelJsonByte = nil + } + _, span = otel.Tracer("orchestrator").Start(ctx, "mergeAndSave") + pipelineOverride, err := impl.savePipelineOverride(overrideRequest, envOverride.Id, triggeredAt) + if err != nil { + return valuesOverrideResponse, err + } + //TODO: check status and apply lock + releaseOverrideJson, err := impl.getReleaseOverride(envOverride, overrideRequest, artifact, pipelineOverride, strategy, &appMetrics) + if err != nil { + return valuesOverrideResponse, err + } + mergedValues, err := impl.mergeOverrideValues(envOverride, dbMigrationOverride, releaseOverrideJson, configMapJson, appLabelJsonByte, strategy) + + appName := fmt.Sprintf("%s-%s", overrideRequest.AppName, envOverride.Environment.Name) + mergedValues = impl.autoscalingCheckBeforeTrigger(ctx, appName, envOverride.Namespace, mergedValues, overrideRequest) + + _, span = otel.Tracer("orchestrator").Start(ctx, "dockerRegistryIpsConfigService.HandleImagePullSecretOnApplicationDeployment") + // handle image pull secret if access given + mergedValues, err = impl.dockerRegistryIpsConfigService.HandleImagePullSecretOnApplicationDeployment(envOverride.Environment, pipeline.CiPipelineId, mergedValues) + span.End() + if err != nil { + return valuesOverrideResponse, err + } + pipelineOverride.PipelineMergedValues = string(mergedValues) + err = impl.pipelineOverrideRepository.Update(pipelineOverride) + if err != nil { + return valuesOverrideResponse, err + } + //valuesOverrideResponse. + valuesOverrideResponse.MergedValues = string(mergedValues) + valuesOverrideResponse.EnvOverride = envOverride + valuesOverrideResponse.PipelineOverride = pipelineOverride + valuesOverrideResponse.AppMetrics = appMetrics + valuesOverrideResponse.PipelineStrategy = strategy + valuesOverrideResponse.ReleaseOverrideJSON = releaseOverrideJson + valuesOverrideResponse.Artifact = artifact + valuesOverrideResponse.Pipeline = pipeline + return valuesOverrideResponse, err +} + +func (impl *AppServiceImpl) BuildManifestForTrigger(overrideRequest *bean.ValuesOverrideRequest, triggeredAt time.Time, ctx context.Context) (valuesOverrideResponse *ValuesOverrideResponse, builtChartPath string, err error) { + + valuesOverrideResponse = &ValuesOverrideResponse{} + valuesOverrideResponse, err = impl.GetValuesOverrideForTrigger(overrideRequest, triggeredAt, ctx) + if err != nil { + impl.logger.Errorw("error in fetching values for trigger", "err", err) + return valuesOverrideResponse, "", err + } + builtChartPath, err = impl.BuildChartAndGetPath(overrideRequest.AppName, valuesOverrideResponse.EnvOverride, ctx) + if err != nil { + impl.logger.Errorw("error in parsing reference chart", "err", err) + return valuesOverrideResponse, "", err + } + return valuesOverrideResponse, builtChartPath, err +} + func (impl *AppServiceImpl) GetDeployedManifestByPipelineIdAndCDWorkflowId(appId int, envId int, cdWorkflowId int, ctx context.Context) ([]byte, error) { manifestByteArray := make([]byte, 0) @@ -1056,29 +1713,244 @@ func (impl *AppServiceImpl) CreateGitopsRepo(app *app.App, userId int32) (gitops return gitOpsRepoName, chartGitAttr, nil } -func (impl *AppServiceImpl) saveTimeline(overrideRequest *bean.ValuesOverrideRequest, status string, statusDetail string, ctx context.Context) { - // creating cd pipeline status timeline for git commit - timeline := &pipelineConfig.PipelineStatusTimeline{ - CdWorkflowRunnerId: overrideRequest.WfrId, - Status: status, - StatusDetail: statusDetail, - StatusTime: time.Now(), - AuditLog: sql.AuditLog{ - CreatedBy: overrideRequest.UserId, - CreatedOn: time.Now(), - UpdatedBy: overrideRequest.UserId, - UpdatedOn: time.Now(), - }, - } - _, span := otel.Tracer("orchestrator").Start(ctx, "cdPipelineStatusTimelineRepo.SaveTimeline") - timelineErr := impl.pipelineStatusTimelineService.SaveTimeline(timeline, nil, false) +func (impl *AppServiceImpl) DeployArgocdApp(overrideRequest *bean.ValuesOverrideRequest, valuesOverrideResponse *ValuesOverrideResponse, ctx context.Context) error { + + impl.logger.Debugw("new pipeline found", "pipeline", valuesOverrideResponse.Pipeline) + _, span := otel.Tracer("orchestrator").Start(ctx, "createArgoApplicationIfRequired") + name, err := impl.createArgoApplicationIfRequired(overrideRequest.AppId, valuesOverrideResponse.EnvOverride, valuesOverrideResponse.Pipeline, overrideRequest.UserId) span.End() - if timelineErr != nil { - impl.logger.Errorw("error in creating timeline status for git commit", "err", timelineErr, "timeline", timeline) + if err != nil { + impl.logger.Errorw("acd application create error on cd trigger", "err", err, "req", overrideRequest) + return err } -} + impl.logger.Debugw("argocd application created", "name", name) -func (impl *AppServiceImpl) autoHealChartLocationInChart(ctx context.Context, envOverride *chartConfig.EnvConfigOverride) error { + _, span = otel.Tracer("orchestrator").Start(ctx, "updateArgoPipeline") + updateAppInArgocd, err := impl.updateArgoPipeline(overrideRequest.AppId, valuesOverrideResponse.Pipeline.Name, valuesOverrideResponse.EnvOverride, ctx) + span.End() + if err != nil { + impl.logger.Errorw("error in updating argocd app ", "err", err) + return err + } + if updateAppInArgocd { + impl.logger.Debug("argo-cd successfully updated") + } else { + impl.logger.Debug("argo-cd failed to update, ignoring it") + } + return nil +} + +func (impl *AppServiceImpl) DeployApp(overrideRequest *bean.ValuesOverrideRequest, valuesOverrideResponse *ValuesOverrideResponse, triggeredAt time.Time, ctx context.Context) error { + + if IsAcdApp(overrideRequest.DeploymentAppType) { + _, span := otel.Tracer("orchestrator").Start(ctx, "DeployArgocdApp") + err := impl.DeployArgocdApp(overrideRequest, valuesOverrideResponse, ctx) + span.End() + if err != nil { + impl.logger.Errorw("error in deploying app on argocd", "err", err) + return err + } + } else if IsHelmApp(overrideRequest.DeploymentAppType) { + _, span := otel.Tracer("orchestrator").Start(ctx, "createHelmAppForCdPipeline") + _, err := impl.createHelmAppForCdPipeline(overrideRequest, valuesOverrideResponse, triggeredAt, ctx) + span.End() + if err != nil { + impl.logger.Errorw("error in creating or updating helm application for cd pipeline", "err", err) + return err + } + } + return nil +} + +func (impl *AppServiceImpl) ValidateTriggerEvent(triggerEvent bean.TriggerEvent) (bool, error) { + + switch triggerEvent.DeploymentAppType { + case bean2.ArgoCd: + if !triggerEvent.PerformChartPush { + return false, errors2.New("For deployment type ArgoCd, PerformChartPush flag expected value = true, got false") + } + case bean2.Helm: + return true, nil + case bean2.GitOpsWithoutDeployment: + if triggerEvent.PerformDeploymentOnCluster { + return false, errors2.New("For deployment type GitOpsWithoutDeployment, PerformDeploymentOnCluster flag expected value = false, got value = true") + } + case bean2.ManifestDownload: + if triggerEvent.PerformChartPush { + return false, error2.New("For deployment type ManifestDownload, PerformChartPush flag expected value = false, got true") + } + if triggerEvent.PerformDeploymentOnCluster { + return false, error2.New("For deployment type ManifestDownload, PerformDeploymentOnCluster flag expected value = false, got true") + } + } + return true, nil + +} + +// write integration/unit test for each function +func (impl *AppServiceImpl) TriggerPipeline(overrideRequest *bean.ValuesOverrideRequest, triggerEvent bean.TriggerEvent, ctx context.Context) (releaseNo int, manifest []byte, err error) { + + isRequestValid, err := impl.ValidateTriggerEvent(triggerEvent) + if !isRequestValid { + return releaseNo, manifest, err + } + + valuesOverrideResponse, builtChartPath, err := impl.BuildManifestForTrigger(overrideRequest, triggerEvent.TriggerdAt, ctx) + if err != nil { + return releaseNo, manifest, err + } + + _, span := otel.Tracer("orchestrator").Start(ctx, "CreateHistoriesForDeploymentTrigger") + err = impl.CreateHistoriesForDeploymentTrigger(valuesOverrideResponse.Pipeline, valuesOverrideResponse.PipelineStrategy, valuesOverrideResponse.EnvOverride, triggerEvent.TriggerdAt, triggerEvent.TriggeredBy) + span.End() + + if triggerEvent.PerformChartPush { + manifestPushTemplate, err := impl.BuildManifestPushTemplate(overrideRequest, valuesOverrideResponse, builtChartPath, &manifest) + if err != nil { + impl.logger.Errorw("error in building manifest push template", "err", err) + return releaseNo, manifest, err + } + manifestPushService := impl.GetManifestPushService(triggerEvent) + manifestPushResponse := manifestPushService.PushChart(manifestPushTemplate, ctx) + if manifestPushResponse.Error != nil { + impl.logger.Errorw("Error in pushing manifest to git", "err", err, "git_repo_url", manifestPushTemplate.RepoUrl) + return releaseNo, manifest, err + } + pipelineOverrideUpdateRequest := &chartConfig.PipelineOverride{ + Id: valuesOverrideResponse.PipelineOverride.Id, + GitHash: manifestPushResponse.CommitHash, + CommitTime: manifestPushResponse.CommitTime, + EnvConfigOverrideId: valuesOverrideResponse.EnvOverride.Id, + PipelineOverrideValues: valuesOverrideResponse.ReleaseOverrideJSON, + PipelineId: overrideRequest.PipelineId, + CiArtifactId: overrideRequest.CiArtifactId, + PipelineMergedValues: valuesOverrideResponse.MergedValues, + AuditLog: sql.AuditLog{UpdatedOn: triggerEvent.TriggerdAt, UpdatedBy: overrideRequest.UserId}, + } + _, span := otel.Tracer("orchestrator").Start(ctx, "pipelineOverrideRepository.Update") + err = impl.pipelineOverrideRepository.Update(pipelineOverrideUpdateRequest) + span.End() + } + + if triggerEvent.PerformDeploymentOnCluster { + err = impl.DeployApp(overrideRequest, valuesOverrideResponse, triggerEvent.TriggerdAt, ctx) + if err != nil { + impl.logger.Errorw("error in deploying app", "err", err) + return releaseNo, manifest, err + } + } + + go impl.WriteCDTriggerEvent(overrideRequest, valuesOverrideResponse.Artifact, valuesOverrideResponse.PipelineOverride.PipelineReleaseCounter, valuesOverrideResponse.PipelineOverride.Id) + + _, spann := otel.Tracer("orchestrator").Start(ctx, "MarkImageScanDeployed") + _ = impl.MarkImageScanDeployed(overrideRequest.AppId, valuesOverrideResponse.EnvOverride.TargetEnvironment, valuesOverrideResponse.Artifact.ImageDigest, overrideRequest.ClusterId, valuesOverrideResponse.Artifact.ScanEnabled) + spann.End() + + middleware.CdTriggerCounter.WithLabelValues(overrideRequest.AppName, overrideRequest.EnvName).Inc() + + return valuesOverrideResponse.PipelineOverride.PipelineReleaseCounter, manifest, nil + +} + +func (impl *AppServiceImpl) GetTriggerEvent(deploymentAppType string, triggeredAt time.Time, deployedBy int32) bean.TriggerEvent { + // trigger event will decide whether to perform GitOps or deployment for a particular deployment app type + triggerEvent := bean.TriggerEvent{ + TriggeredBy: deployedBy, + TriggerdAt: triggeredAt, + } + switch deploymentAppType { + case bean2.ArgoCd: + triggerEvent.PerformChartPush = true + triggerEvent.PerformDeploymentOnCluster = true + triggerEvent.GetManifestInResponse = false + triggerEvent.DeploymentAppType = bean2.ArgoCd + triggerEvent.ManifestStorageType = bean2.ManifestStorageGit + case bean2.Helm: + triggerEvent.PerformChartPush = false + triggerEvent.PerformDeploymentOnCluster = true + triggerEvent.GetManifestInResponse = false + triggerEvent.DeploymentAppType = bean2.Helm + } + return triggerEvent +} + +func (impl *AppServiceImpl) TriggerRelease(overrideRequest *bean.ValuesOverrideRequest, ctx context.Context, triggeredAt time.Time, deployedBy int32) (releaseNo int, manifest []byte, err error) { + triggerEvent := impl.GetTriggerEvent(overrideRequest.DeploymentAppType, triggeredAt, deployedBy) + releaseNo, manifest, err = impl.TriggerPipeline(overrideRequest, triggerEvent, ctx) + if err != nil { + return 0, manifest, err + } + return releaseNo, manifest, nil +} + +func (impl *AppServiceImpl) GetManifestPushService(triggerEvent bean.TriggerEvent) ManifestPushService { + var manifestPushService ManifestPushService + if triggerEvent.ManifestStorageType == bean2.ManifestStorageGit { + manifestPushService = impl.GitOpsManifestPushService + } + return manifestPushService +} + +func (impl *AppServiceImpl) BuildManifestPushTemplate(overrideRequest *bean.ValuesOverrideRequest, valuesOverrideResponse *ValuesOverrideResponse, builtChartPath string, manifest *[]byte) (*bean3.ManifestPushTemplate, error) { + + manifestPushTemplate := &bean3.ManifestPushTemplate{ + WorkflowRunnerId: overrideRequest.WfrId, + AppId: overrideRequest.AppId, + ChartRefId: valuesOverrideResponse.EnvOverride.Chart.ChartRefId, + EnvironmentId: valuesOverrideResponse.EnvOverride.Environment.Id, + UserId: overrideRequest.UserId, + PipelineOverrideId: valuesOverrideResponse.PipelineOverride.Id, + AppName: overrideRequest.AppName, + TargetEnvironmentName: valuesOverrideResponse.EnvOverride.TargetEnvironment, + BuiltChartPath: builtChartPath, + BuiltChartBytes: manifest, + MergedValues: valuesOverrideResponse.MergedValues, + } + + manifestPushConfig, err := impl.manifestPushConfigRepository.GetManifestPushConfigByAppIdAndEnvId(overrideRequest.AppId, overrideRequest.EnvId) + if err != nil && err != pg.ErrNoRows { + impl.logger.Errorw("error in fetching manifest push config from db", "err", err) + return manifestPushTemplate, err + } + + if manifestPushConfig != nil { + if manifestPushConfig.StorageType == bean2.ManifestStorageGit { + // need to implement for git repo push + // currently manifest push config doesn't have git push config. Gitops config is derived from charts, chart_env_config_override and chart_ref table + } + } else { + manifestPushTemplate.ChartReferenceTemplate = valuesOverrideResponse.EnvOverride.Chart.ReferenceTemplate + manifestPushTemplate.ChartName = valuesOverrideResponse.EnvOverride.Chart.ChartName + manifestPushTemplate.ChartVersion = valuesOverrideResponse.EnvOverride.Chart.ChartVersion + manifestPushTemplate.ChartLocation = valuesOverrideResponse.EnvOverride.Chart.ChartLocation + manifestPushTemplate.RepoUrl = valuesOverrideResponse.EnvOverride.Chart.GitRepoUrl + } + return manifestPushTemplate, err +} + +func (impl *AppServiceImpl) saveTimeline(overrideRequest *bean.ValuesOverrideRequest, status string, statusDetail string, ctx context.Context) { + // creating cd pipeline status timeline for git commit + timeline := &pipelineConfig.PipelineStatusTimeline{ + CdWorkflowRunnerId: overrideRequest.WfrId, + Status: status, + StatusDetail: statusDetail, + StatusTime: time.Now(), + AuditLog: sql.AuditLog{ + CreatedBy: overrideRequest.UserId, + CreatedOn: time.Now(), + UpdatedBy: overrideRequest.UserId, + UpdatedOn: time.Now(), + }, + } + _, span := otel.Tracer("orchestrator").Start(ctx, "cdPipelineStatusTimelineRepo.SaveTimeline") + timelineErr := impl.pipelineStatusTimelineService.SaveTimeline(timeline, nil, false) + span.End() + if timelineErr != nil { + impl.logger.Errorw("error in creating timeline status for git commit", "err", timelineErr, "timeline", timeline) + } +} + +func (impl *AppServiceImpl) autoHealChartLocationInChart(ctx context.Context, envOverride *chartConfig.EnvConfigOverride) error { chartId := envOverride.Chart.Id impl.logger.Infow("auto-healing: Chart location in chart not correct. modifying ", "chartId", chartId, "current chartLocation", envOverride.Chart.ChartLocation, "current chartVersion", envOverride.Chart.ChartVersion) @@ -1121,6 +1993,59 @@ func (impl *AppServiceImpl) autoHealChartLocationInChart(ctx context.Context, en return nil } +func (impl *AppServiceImpl) MarkImageScanDeployed(appId int, envId int, imageDigest string, clusterId int, isScanEnabled bool) error { + impl.logger.Debugw("mark image scan deployed for normal app, from cd auto or manual trigger", "imageDigest", imageDigest) + executionHistory, err := impl.imageScanHistoryRepository.FindByImageDigest(imageDigest) + if err != nil && err != pg.ErrNoRows { + impl.logger.Errorw("error in fetching execution history", "err", err) + return err + } + if executionHistory == nil || executionHistory.Id == 0 { + impl.logger.Errorw("no execution history found for digest", "digest", imageDigest) + return fmt.Errorf("no execution history found for digest - %s", imageDigest) + } + impl.logger.Debugw("mark image scan deployed for normal app, from cd auto or manual trigger", "executionHistory", executionHistory) + var ids []int + ids = append(ids, executionHistory.Id) + + ot, err := impl.imageScanDeployInfoRepository.FindByTypeMetaAndTypeId(appId, security.ScanObjectType_APP) //todo insure this touple unique in db + if err != nil && err != pg.ErrNoRows { + return err + } else if err == pg.ErrNoRows && isScanEnabled { + imageScanDeployInfo := &security.ImageScanDeployInfo{ + ImageScanExecutionHistoryId: ids, + ScanObjectMetaId: appId, + ObjectType: security.ScanObjectType_APP, + EnvId: envId, + ClusterId: clusterId, + AuditLog: sql.AuditLog{ + CreatedOn: time.Now(), + CreatedBy: 1, + UpdatedOn: time.Now(), + UpdatedBy: 1, + }, + } + impl.logger.Debugw("mark image scan deployed for normal app, from cd auto or manual trigger", "imageScanDeployInfo", imageScanDeployInfo) + err = impl.imageScanDeployInfoRepository.Save(imageScanDeployInfo) + if err != nil { + impl.logger.Errorw("error in creating deploy info", "err", err) + } + } else { + // Updating Execution history for Latest Deployment to fetch out security Vulnerabilities for latest deployed info + if isScanEnabled { + ot.ImageScanExecutionHistoryId = ids + } else { + arr := []int{-1} + ot.ImageScanExecutionHistoryId = arr + } + err = impl.imageScanDeployInfoRepository.Update(ot) + if err != nil { + impl.logger.Errorw("error in updating deploy info for latest deployed image", "err", err) + } + } + return err +} + // FIXME tmp workaround func (impl *AppServiceImpl) GetCmSecretNew(appId int, envId int, isJob bool) (*bean.ConfigMapJson, *bean.ConfigSecretJson, error) { var configMapJson string @@ -1245,6 +2170,99 @@ func (impl *AppServiceImpl) GetConfigMapAndSecretJson(appId int, envId int, pipe return merged, nil } +func (impl *AppServiceImpl) getConfigMapAndSecretJsonV2(appId int, envId int, pipelineId int, chartVersion string, deploymentWithConfig bean.DeploymentConfigurationType, wfrIdForDeploymentWithSpecificTrigger int) ([]byte, error) { + + var configMapJson string + var secretDataJson string + var configMapJsonApp string + var secretDataJsonApp string + var configMapJsonEnv string + var secretDataJsonEnv string + var err error + //var configMapJsonPipeline string + //var secretDataJsonPipeline string + + merged := []byte("{}") + if deploymentWithConfig == bean.DEPLOYMENT_CONFIG_TYPE_LAST_SAVED { + configMapA, err := impl.configMapRepository.GetByAppIdAppLevel(appId) + if err != nil && pg.ErrNoRows != err { + return []byte("{}"), err + } + if configMapA != nil && configMapA.Id > 0 { + configMapJsonApp = configMapA.ConfigMapData + secretDataJsonApp = configMapA.SecretData + } + configMapE, err := impl.configMapRepository.GetByAppIdAndEnvIdEnvLevel(appId, envId) + if err != nil && pg.ErrNoRows != err { + return []byte("{}"), err + } + if configMapE != nil && configMapE.Id > 0 { + configMapJsonEnv = configMapE.ConfigMapData + secretDataJsonEnv = configMapE.SecretData + } + } else if deploymentWithConfig == bean.DEPLOYMENT_CONFIG_TYPE_SPECIFIC_TRIGGER { + //fetching history and setting envLevelConfig and not appLevelConfig because history already contains merged appLevel and envLevel configs + configMapHistory, err := impl.configMapHistoryRepository.GetHistoryByPipelineIdAndWfrId(pipelineId, wfrIdForDeploymentWithSpecificTrigger, repository3.CONFIGMAP_TYPE) + if err != nil { + impl.logger.Errorw("error in getting config map history config by pipelineId and wfrId ", "err", err, "pipelineId", pipelineId, "wfrid", wfrIdForDeploymentWithSpecificTrigger) + return []byte("{}"), err + } + configMapJsonEnv = configMapHistory.Data + secretHistory, err := impl.configMapHistoryRepository.GetHistoryByPipelineIdAndWfrId(pipelineId, wfrIdForDeploymentWithSpecificTrigger, repository3.SECRET_TYPE) + if err != nil { + impl.logger.Errorw("error in getting config map history config by pipelineId and wfrId ", "err", err, "pipelineId", pipelineId, "wfrid", wfrIdForDeploymentWithSpecificTrigger) + return []byte("{}"), err + } + secretDataJsonEnv = secretHistory.Data + } + configMapJson, err = impl.mergeUtil.ConfigMapMerge(configMapJsonApp, configMapJsonEnv) + if err != nil { + return []byte("{}"), err + } + chartMajorVersion, chartMinorVersion, err := util2.ExtractChartVersion(chartVersion) + if err != nil { + impl.logger.Errorw("chart version parsing", "err", err) + return []byte("{}"), err + } + secretDataJson, err = impl.mergeUtil.ConfigSecretMerge(secretDataJsonApp, secretDataJsonEnv, chartMajorVersion, chartMinorVersion, false) + if err != nil { + return []byte("{}"), err + } + configResponseR := bean.ConfigMapRootJson{} + configResponse := bean.ConfigMapJson{} + if configMapJson != "" { + err = json.Unmarshal([]byte(configMapJson), &configResponse) + if err != nil { + return []byte("{}"), err + } + } + configResponseR.ConfigMapJson = configResponse + secretResponseR := bean.ConfigSecretRootJson{} + secretResponse := bean.ConfigSecretJson{} + if configMapJson != "" { + err = json.Unmarshal([]byte(secretDataJson), &secretResponse) + if err != nil { + return []byte("{}"), err + } + } + secretResponseR.ConfigSecretJson = secretResponse + + configMapByte, err := json.Marshal(configResponseR) + if err != nil { + return []byte("{}"), err + } + secretDataByte, err := json.Marshal(secretResponseR) + if err != nil { + return []byte("{}"), err + } + + merged, err = impl.mergeUtil.JsonPatch(configMapByte, secretDataByte) + if err != nil { + return []byte("{}"), err + } + return merged, nil +} + func (impl *AppServiceImpl) synchCD(pipeline *pipelineConfig.Pipeline, ctx context.Context, overrideRequest *bean.ValuesOverrideRequest, envOverride *chartConfig.EnvConfigOverride) { req := new(application2.ApplicationSyncRequest) @@ -1261,6 +2279,44 @@ func (impl *AppServiceImpl) synchCD(pipeline *pipelineConfig.Pipeline, ctx conte } } +func (impl *AppServiceImpl) WriteCDTriggerEvent(overrideRequest *bean.ValuesOverrideRequest, artifact *repository.CiArtifact, releaseId, pipelineOverrideId int) { + + event := impl.eventFactory.Build(util.Trigger, &overrideRequest.PipelineId, overrideRequest.AppId, &overrideRequest.EnvId, util.CD) + impl.logger.Debugw("event WriteCDTriggerEvent", "event", event) + event = impl.eventFactory.BuildExtraCDData(event, nil, pipelineOverrideId, bean.CD_WORKFLOW_TYPE_DEPLOY) + _, evtErr := impl.eventClient.WriteNotificationEvent(event) + if evtErr != nil { + impl.logger.Errorw("CD trigger event not sent", "error", evtErr) + } + deploymentEvent := DeploymentEvent{ + ApplicationId: overrideRequest.AppId, + EnvironmentId: overrideRequest.EnvId, //check for production Environment + ReleaseId: releaseId, + PipelineOverrideId: pipelineOverrideId, + TriggerTime: time.Now(), + CiArtifactId: overrideRequest.CiArtifactId, + } + ciPipelineMaterials, err := impl.ciPipelineMaterialRepository.GetByPipelineId(artifact.PipelineId) + if err != nil { + impl.logger.Errorw("error in ") + } + materialInfoMap, mErr := artifact.ParseMaterialInfo() + if mErr != nil { + impl.logger.Errorw("material info map error", mErr) + return + } + for _, ciPipelineMaterial := range ciPipelineMaterials { + hash := materialInfoMap[ciPipelineMaterial.GitMaterial.Url] + pipelineMaterialInfo := &PipelineMaterialInfo{PipelineMaterialId: ciPipelineMaterial.Id, CommitHash: hash} + deploymentEvent.PipelineMaterials = append(deploymentEvent.PipelineMaterials, pipelineMaterialInfo) + } + impl.logger.Infow("triggering deployment event", "event", deploymentEvent) + err = impl.eventClient.WriteNatsEvent(pubsub.CD_SUCCESS, deploymentEvent) + if err != nil { + impl.logger.Errorw("error in writing cd trigger event", "err", err) + } +} + type DeploymentEvent struct { ApplicationId int EnvironmentId int @@ -1309,6 +2365,358 @@ type ReleaseAttributes struct { AppMetrics *bool } +func (impl *AppServiceImpl) getReleaseOverride(envOverride *chartConfig.EnvConfigOverride, overrideRequest *bean.ValuesOverrideRequest, artifact *repository.CiArtifact, pipelineOverride *chartConfig.PipelineOverride, strategy *chartConfig.PipelineStrategy, appMetrics *bool) (releaseOverride string, err error) { + + artifactImage := artifact.Image + imageTag := strings.Split(artifactImage, ":") + + imageTagLen := len(imageTag) + + imageName := "" + + for i := 0; i < imageTagLen-1; i++ { + if i != imageTagLen-2 { + imageName = imageName + imageTag[i] + ":" + } else { + imageName = imageName + imageTag[i] + } + } + + appId := strconv.Itoa(overrideRequest.AppId) + envId := strconv.Itoa(overrideRequest.EnvId) + + deploymentStrategy := "" + if strategy != nil { + deploymentStrategy = string(strategy.Strategy) + } + releaseAttribute := ReleaseAttributes{ + Name: imageName, + Tag: imageTag[imageTagLen-1], + PipelineName: overrideRequest.PipelineName, + ReleaseVersion: strconv.Itoa(pipelineOverride.PipelineReleaseCounter), + DeploymentType: deploymentStrategy, + App: appId, + Env: envId, + AppMetrics: appMetrics, + } + override, err := util2.Tprintf(envOverride.Chart.ImageDescriptorTemplate, releaseAttribute) + if err != nil { + return "", &ApiError{InternalMessage: "unable to render ImageDescriptorTemplate"} + } + if overrideRequest.AdditionalOverride != nil { + userOverride, err := overrideRequest.AdditionalOverride.MarshalJSON() + if err != nil { + return "", err + } + data, err := impl.mergeUtil.JsonPatch(userOverride, []byte(override)) + if err != nil { + return "", err + } + override = string(data) + } + return override, nil +} + +func (impl *AppServiceImpl) mergeOverrideValues(envOverride *chartConfig.EnvConfigOverride, + dbMigrationOverride []byte, + releaseOverrideJson string, + configMapJson []byte, + appLabelJsonByte []byte, + strategy *chartConfig.PipelineStrategy, +) (mergedValues []byte, err error) { + + //merge three values on the fly + //ordering is important here + //global < environment < db< release + var merged []byte + if !envOverride.IsOverride { + merged, err = impl.mergeUtil.JsonPatch([]byte("{}"), []byte(envOverride.Chart.ResolvedGlobalOverride)) + if err != nil { + return nil, err + } + } else { + merged, err = impl.mergeUtil.JsonPatch([]byte("{}"), []byte(envOverride.ResolvedEnvOverrideValues)) + if err != nil { + return nil, err + } + } + if strategy != nil && len(strategy.Config) > 0 { + merged, err = impl.mergeUtil.JsonPatch(merged, []byte(strategy.Config)) + if err != nil { + return nil, err + } + } + merged, err = impl.mergeUtil.JsonPatch(merged, dbMigrationOverride) + if err != nil { + return nil, err + } + merged, err = impl.mergeUtil.JsonPatch(merged, []byte(releaseOverrideJson)) + if err != nil { + return nil, err + } + if configMapJson != nil { + merged, err = impl.mergeUtil.JsonPatch(merged, configMapJson) + if err != nil { + return nil, err + } + } + if appLabelJsonByte != nil { + merged, err = impl.mergeUtil.JsonPatch(merged, appLabelJsonByte) + if err != nil { + return nil, err + } + } + return merged, nil +} + +func (impl *AppServiceImpl) mergeAndSave(envOverride *chartConfig.EnvConfigOverride, + overrideRequest *bean.ValuesOverrideRequest, + dbMigrationOverride []byte, + artifact *repository.CiArtifact, + pipeline *pipelineConfig.Pipeline, configMapJson, appLabelJsonByte []byte, strategy *chartConfig.PipelineStrategy, ctx context.Context, + triggeredAt time.Time, deployedBy int32, appMetrics *bool) (releaseId int, overrideId int, mergedValues string, err error) { + + //register release , obtain release id TODO: populate releaseId to template + override, err := impl.savePipelineOverride(overrideRequest, envOverride.Id, triggeredAt) + if err != nil { + return 0, 0, "", err + } + //TODO: check status and apply lock + overrideJson, err := impl.getReleaseOverride(envOverride, overrideRequest, artifact, override, strategy, appMetrics) + if err != nil { + return 0, 0, "", err + } + + //merge three values on the fly + //ordering is important here + //global < environment < db< release + var merged []byte + if !envOverride.IsOverride { + merged, err = impl.mergeUtil.JsonPatch([]byte("{}"), []byte(envOverride.Chart.GlobalOverride)) + if err != nil { + return 0, 0, "", err + } + } else { + merged, err = impl.mergeUtil.JsonPatch([]byte("{}"), []byte(envOverride.EnvOverrideValues)) + if err != nil { + return 0, 0, "", err + } + } + + //pipeline override here comes from pipeline strategy table + if strategy != nil && len(strategy.Config) > 0 { + merged, err = impl.mergeUtil.JsonPatch(merged, []byte(strategy.Config)) + if err != nil { + return 0, 0, "", err + } + } + merged, err = impl.mergeUtil.JsonPatch(merged, dbMigrationOverride) + if err != nil { + return 0, 0, "", err + } + merged, err = impl.mergeUtil.JsonPatch(merged, []byte(overrideJson)) + if err != nil { + return 0, 0, "", err + } + + if configMapJson != nil { + merged, err = impl.mergeUtil.JsonPatch(merged, configMapJson) + if err != nil { + return 0, 0, "", err + } + } + + if appLabelJsonByte != nil { + merged, err = impl.mergeUtil.JsonPatch(merged, appLabelJsonByte) + if err != nil { + return 0, 0, "", err + } + } + + appName := fmt.Sprintf("%s-%s", pipeline.App.AppName, envOverride.Environment.Name) + merged = impl.autoscalingCheckBeforeTrigger(ctx, appName, envOverride.Namespace, merged, overrideRequest) + + _, span := otel.Tracer("orchestrator").Start(ctx, "dockerRegistryIpsConfigService.HandleImagePullSecretOnApplicationDeployment") + // handle image pull secret if access given + merged, err = impl.dockerRegistryIpsConfigService.HandleImagePullSecretOnApplicationDeployment(envOverride.Environment, pipeline.CiPipelineId, merged) + span.End() + if err != nil { + return 0, 0, "", err + } + + commitHash := "" + commitTime := time.Time{} + if IsAcdApp(pipeline.DeploymentAppType) { + chartRepoName := impl.chartTemplateService.GetGitOpsRepoNameFromUrl(envOverride.Chart.GitRepoUrl) + _, span = otel.Tracer("orchestrator").Start(ctx, "chartTemplateService.GetUserEmailIdAndNameForGitOpsCommit") + //getting username & emailId for commit author data + userEmailId, userName := impl.chartTemplateService.GetUserEmailIdAndNameForGitOpsCommit(overrideRequest.UserId) + span.End() + chartGitAttr := &ChartConfig{ + FileName: fmt.Sprintf("_%d-values.yaml", envOverride.TargetEnvironment), + FileContent: string(merged), + ChartName: envOverride.Chart.ChartName, + ChartLocation: envOverride.Chart.ChartLocation, + ChartRepoName: chartRepoName, + ReleaseMessage: fmt.Sprintf("release-%d-env-%d ", override.Id, envOverride.TargetEnvironment), + UserName: userName, + UserEmailId: userEmailId, + } + gitOpsConfigBitbucket, err := impl.gitOpsConfigRepository.GetGitOpsConfigByProvider(BITBUCKET_PROVIDER) + if err != nil { + if err == pg.ErrNoRows { + gitOpsConfigBitbucket.BitBucketWorkspaceId = "" + } else { + return 0, 0, "", err + } + } + gitOpsConfig := &bean.GitOpsConfigDto{BitBucketWorkspaceId: gitOpsConfigBitbucket.BitBucketWorkspaceId} + _, span = otel.Tracer("orchestrator").Start(ctx, "gitFactory.Client.CommitValues") + commitHash, commitTime, err = impl.gitFactory.Client.CommitValues(chartGitAttr, gitOpsConfig) + span.End() + if err != nil { + impl.logger.Errorw("error in git commit", "err", err) + return 0, 0, "", err + } + } + if commitTime.IsZero() { + commitTime = time.Now() + } + pipelineOverride := &chartConfig.PipelineOverride{ + Id: override.Id, + GitHash: commitHash, + CommitTime: commitTime, + EnvConfigOverrideId: envOverride.Id, + PipelineOverrideValues: overrideJson, + PipelineId: overrideRequest.PipelineId, + CiArtifactId: overrideRequest.CiArtifactId, + PipelineMergedValues: string(merged), + AuditLog: sql.AuditLog{UpdatedOn: triggeredAt, UpdatedBy: deployedBy}, + } + _, span = otel.Tracer("orchestrator").Start(ctx, "pipelineOverrideRepository.Update") + err = impl.pipelineOverrideRepository.Update(pipelineOverride) + span.End() + if err != nil { + return 0, 0, "", err + } + mergedValues = string(merged) + return override.PipelineReleaseCounter, override.Id, mergedValues, nil +} + +func (impl *AppServiceImpl) savePipelineOverride(overrideRequest *bean.ValuesOverrideRequest, envOverrideId int, triggeredAt time.Time) (override *chartConfig.PipelineOverride, err error) { + currentReleaseNo, err := impl.pipelineOverrideRepository.GetCurrentPipelineReleaseCounter(overrideRequest.PipelineId) + if err != nil { + return nil, err + } + po := &chartConfig.PipelineOverride{ + EnvConfigOverrideId: envOverrideId, + Status: models.CHARTSTATUS_NEW, + PipelineId: overrideRequest.PipelineId, + CiArtifactId: overrideRequest.CiArtifactId, + PipelineReleaseCounter: currentReleaseNo + 1, + CdWorkflowId: overrideRequest.CdWorkflowId, + AuditLog: sql.AuditLog{CreatedBy: overrideRequest.UserId, CreatedOn: triggeredAt, UpdatedOn: triggeredAt, UpdatedBy: overrideRequest.UserId}, + DeploymentType: overrideRequest.DeploymentType, + } + + err = impl.pipelineOverrideRepository.Save(po) + if err != nil { + return nil, err + } + err = impl.checkAndFixDuplicateReleaseNo(po) + if err != nil { + impl.logger.Errorw("error in checking release no duplicacy", "pipeline", po, "err", err) + return nil, err + } + return po, nil +} + +func (impl *AppServiceImpl) checkAndFixDuplicateReleaseNo(override *chartConfig.PipelineOverride) error { + + uniqueVerified := false + retryCount := 0 + + for !uniqueVerified && retryCount < 5 { + retryCount = retryCount + 1 + overrides, err := impl.pipelineOverrideRepository.GetByPipelineIdAndReleaseNo(override.PipelineId, override.PipelineReleaseCounter) + if err != nil { + return err + } + if overrides[0].Id == override.Id { + uniqueVerified = true + } else { + //duplicate might be due to concurrency, lets fix it + currentReleaseNo, err := impl.pipelineOverrideRepository.GetCurrentPipelineReleaseCounter(override.PipelineId) + if err != nil { + return err + } + override.PipelineReleaseCounter = currentReleaseNo + 1 + err = impl.pipelineOverrideRepository.Save(override) + if err != nil { + return err + } + } + } + if !uniqueVerified { + return fmt.Errorf("duplicate verification retry count exide max overrideId: %d ,count: %d", override.Id, retryCount) + } + return nil +} + +func (impl *AppServiceImpl) updateArgoPipeline(appId int, pipelineName string, envOverride *chartConfig.EnvConfigOverride, ctx context.Context) (bool, error) { + //repo has been registered while helm create + if ctx == nil { + impl.logger.Errorw("err in syncing ACD, ctx is NULL", "pipelineName", pipelineName) + return false, nil + } + app, err := impl.appRepository.FindById(appId) + if err != nil { + impl.logger.Errorw("no app found ", "err", err) + return false, err + } + envModel, err := impl.envRepository.FindById(envOverride.TargetEnvironment) + if err != nil { + return false, err + } + argoAppName := fmt.Sprintf("%s-%s", app.AppName, envModel.Name) + impl.logger.Infow("received payload, updateArgoPipeline", "appId", appId, "pipelineName", pipelineName, "envId", envOverride.TargetEnvironment, "argoAppName", argoAppName, "context", ctx) + application, err := impl.acdClient.Get(ctx, &application2.ApplicationQuery{Name: &argoAppName}) + if err != nil { + impl.logger.Errorw("no argo app exists", "app", argoAppName, "pipeline", pipelineName) + return false, err + } + //if status, ok:=status.FromError(err);ok{ + appStatus, _ := status.FromError(err) + + if appStatus.Code() == codes.OK { + impl.logger.Debugw("argo app exists", "app", argoAppName, "pipeline", pipelineName) + if application.Spec.Source.Path != envOverride.Chart.ChartLocation || application.Spec.Source.TargetRevision != "master" { + patchReq := v1alpha1.Application{Spec: v1alpha1.ApplicationSpec{Source: v1alpha1.ApplicationSource{Path: envOverride.Chart.ChartLocation, RepoURL: envOverride.Chart.GitRepoUrl, TargetRevision: "master"}}} + reqbyte, err := json.Marshal(patchReq) + if err != nil { + impl.logger.Errorw("error in creating patch", "err", err) + } + reqString := string(reqbyte) + patchType := "merge" + _, err = impl.acdClient.Patch(ctx, &application2.ApplicationPatchRequest{Patch: &reqString, Name: &argoAppName, PatchType: &patchType}) + if err != nil { + impl.logger.Errorw("error in creating argo pipeline ", "name", pipelineName, "patch", string(reqbyte), "err", err) + return false, err + } + impl.logger.Debugw("pipeline update req ", "res", patchReq) + } else { + impl.logger.Debug("pipeline no need to update ") + } + return true, nil + } else if appStatus.Code() == codes.NotFound { + impl.logger.Errorw("argo app not found", "app", argoAppName, "pipeline", pipelineName) + return false, nil + } else { + impl.logger.Errorw("err in checking application on gocd", "err", err, "pipeline", pipelineName) + return false, err + } +} + func (impl *AppServiceImpl) UpdateInstalledAppVersionHistoryByACDObject(app *v1alpha1.Application, installedAppVersionHistoryId int, updateTimedOutStatus bool) error { installedAppVersionHistory, err := impl.installedAppVersionHistoryRepository.GetInstalledAppVersionHistory(installedAppVersionHistoryId) if err != nil { @@ -1376,6 +2784,443 @@ const nameOverride = "nameOverride" const enabled = "enabled" const replicaCount = "replicaCount" +func (impl *AppServiceImpl) getAutoScalingReplicaCount(templateMap map[string]interface{}, appName string) *util2.HpaResourceRequest { + hasOverride := false + if _, ok := templateMap[fullnameOverride]; ok { + appNameOverride := templateMap[fullnameOverride].(string) + if len(appNameOverride) > 0 { + appName = appNameOverride + hasOverride = true + } + } + if !hasOverride { + if _, ok := templateMap[nameOverride]; ok { + nameOverride := templateMap[nameOverride].(string) + if len(nameOverride) > 0 { + appName = fmt.Sprintf("%s-%s", appName, nameOverride) + } + } + } + hpaResourceRequest := &util2.HpaResourceRequest{} + hpaResourceRequest.Version = "" + hpaResourceRequest.Group = autoscaling.ServiceName + hpaResourceRequest.Kind = HorizontalPodAutoscaler + impl.logger.Infow("getAutoScalingReplicaCount", "hpaResourceRequest", hpaResourceRequest) + if _, ok := templateMap[kedaAutoscaling]; ok { + as := templateMap[kedaAutoscaling] + asd := as.(map[string]interface{}) + if _, ok := asd[enabled]; ok { + impl.logger.Infow("getAutoScalingReplicaCount", "hpaResourceRequest", hpaResourceRequest) + enable := asd[enabled].(bool) + if enable { + hpaResourceRequest.IsEnable = enable + hpaResourceRequest.ReqReplicaCount = templateMap[replicaCount].(float64) + hpaResourceRequest.ReqMaxReplicas = asd["maxReplicaCount"].(float64) + hpaResourceRequest.ReqMinReplicas = asd["minReplicaCount"].(float64) + hpaResourceRequest.ResourceName = fmt.Sprintf("%s-%s-%s", "keda-hpa", appName, "keda") + impl.logger.Infow("getAutoScalingReplicaCount", "hpaResourceRequest", hpaResourceRequest) + return hpaResourceRequest + } + } + } + + if _, ok := templateMap[autoscaling.ServiceName]; ok { + as := templateMap[autoscaling.ServiceName] + asd := as.(map[string]interface{}) + if _, ok := asd[enabled]; ok { + enable := asd[enabled].(bool) + if enable { + hpaResourceRequest.IsEnable = asd[enabled].(bool) + hpaResourceRequest.ReqReplicaCount = templateMap[replicaCount].(float64) + hpaResourceRequest.ReqMaxReplicas = asd["MaxReplicas"].(float64) + hpaResourceRequest.ReqMinReplicas = asd["MinReplicas"].(float64) + hpaResourceRequest.ResourceName = fmt.Sprintf("%s-%s", appName, "hpa") + return hpaResourceRequest + } + } + } + return hpaResourceRequest + +} + +func (impl *AppServiceImpl) autoscalingCheckBeforeTrigger(ctx context.Context, appName string, namespace string, merged []byte, overrideRequest *bean.ValuesOverrideRequest) []byte { + //pipeline := overrideRequest.Pipeline + var appId = overrideRequest.AppId + pipelineId := overrideRequest.PipelineId + var appDeploymentType = overrideRequest.DeploymentAppType + var clusterId = overrideRequest.ClusterId + deploymentType := overrideRequest.DeploymentType + templateMap := make(map[string]interface{}) + err := json.Unmarshal(merged, &templateMap) + if err != nil { + return merged + } + + hpaResourceRequest := impl.getAutoScalingReplicaCount(templateMap, appName) + impl.logger.Debugw("autoscalingCheckBeforeTrigger", "hpaResourceRequest", hpaResourceRequest) + if hpaResourceRequest.IsEnable { + resourceManifest := make(map[string]interface{}) + if IsAcdApp(appDeploymentType) { + query := &application2.ApplicationResourceRequest{ + Name: &appName, + Version: &hpaResourceRequest.Version, + Group: &hpaResourceRequest.Group, + Kind: &hpaResourceRequest.Kind, + ResourceName: &hpaResourceRequest.ResourceName, + Namespace: &namespace, + } + recv, err := impl.acdClient.GetResource(ctx, query) + impl.logger.Debugw("resource manifest get replica count", "response", recv) + if err != nil { + impl.logger.Errorw("ACD Get Resource API Failed", "err", err) + middleware.AcdGetResourceCounter.WithLabelValues(strconv.Itoa(appId), namespace, appName).Inc() + return merged + } + if recv != nil && len(*recv.Manifest) > 0 { + err := json.Unmarshal([]byte(*recv.Manifest), &resourceManifest) + if err != nil { + impl.logger.Errorw("unmarshal failed for hpa check", "err", err) + return merged + } + } + } else { + version := "v2beta2" + k8sResource, err := impl.K8sCommonService.GetResource(ctx, &k8s.ResourceRequestBean{ClusterId: clusterId, + K8sRequest: &k8s2.K8sRequestBean{ResourceIdentifier: k8s2.ResourceIdentifier{Name: hpaResourceRequest.ResourceName, + Namespace: namespace, GroupVersionKind: schema.GroupVersionKind{Group: hpaResourceRequest.Group, Kind: hpaResourceRequest.Kind, Version: version}}}}) + if err != nil { + impl.logger.Errorw("error occurred while fetching resource for app", "resourceName", hpaResourceRequest.ResourceName, "err", err) + return merged + } + resourceManifest = k8sResource.Manifest.Object + } + if len(resourceManifest) > 0 { + statusMap := resourceManifest["status"].(map[string]interface{}) + currentReplicaVal := statusMap["currentReplicas"] + currentReplicaCount, err := util2.ParseFloatNumber(currentReplicaVal) + if err != nil { + impl.logger.Errorw("error occurred while parsing replica count", "currentReplicas", currentReplicaVal, "err", err) + return merged + } + + reqReplicaCount := impl.fetchRequiredReplicaCount(currentReplicaCount, hpaResourceRequest.ReqMaxReplicas, hpaResourceRequest.ReqMinReplicas) + templateMap["replicaCount"] = reqReplicaCount + merged, err = json.Marshal(&templateMap) + if err != nil { + impl.logger.Errorw("marshaling failed for hpa check", "err", err) + return merged + } + } + } else { + impl.logger.Errorw("autoscaling is not enabled", "pipelineId", pipelineId) + } + + //check for custom chart support + if autoscalingEnabledPath, ok := templateMap[bean2.CustomAutoScalingEnabledPathKey]; ok { + if deploymentType == models.DEPLOYMENTTYPE_STOP { + merged, err = impl.setScalingValues(templateMap, bean2.CustomAutoScalingEnabledPathKey, merged, false) + if err != nil { + return merged + } + merged, err = impl.setScalingValues(templateMap, bean2.CustomAutoscalingReplicaCountPathKey, merged, 0) + if err != nil { + return merged + } + } else { + autoscalingEnabled := false + autoscalingEnabledValue := gjson.Get(string(merged), autoscalingEnabledPath.(string)).Value() + if val, ok := autoscalingEnabledValue.(bool); ok { + autoscalingEnabled = val + } + if autoscalingEnabled { + // extract replica count, min, max and check for required value + replicaCount, err := impl.getReplicaCountFromCustomChart(templateMap, merged) + if err != nil { + return merged + } + merged, err = impl.setScalingValues(templateMap, bean2.CustomAutoscalingReplicaCountPathKey, merged, replicaCount) + if err != nil { + return merged + } + } + } + } + + return merged +} + +func (impl *AppServiceImpl) getReplicaCountFromCustomChart(templateMap map[string]interface{}, merged []byte) (float64, error) { + autoscalingMinVal, err := impl.extractParamValue(templateMap, bean2.CustomAutoscalingMinPathKey, merged) + if err != nil { + return 0, err + } + autoscalingMaxVal, err := impl.extractParamValue(templateMap, bean2.CustomAutoscalingMaxPathKey, merged) + if err != nil { + return 0, err + } + autoscalingReplicaCountVal, err := impl.extractParamValue(templateMap, bean2.CustomAutoscalingReplicaCountPathKey, merged) + if err != nil { + return 0, err + } + return impl.fetchRequiredReplicaCount(autoscalingReplicaCountVal, autoscalingMaxVal, autoscalingMinVal), nil +} + +func (impl *AppServiceImpl) extractParamValue(inputMap map[string]interface{}, key string, merged []byte) (float64, error) { + if _, ok := inputMap[key]; !ok { + return 0, errors.New("empty-val-err") + } + floatNumber, err := util2.ParseFloatNumber(gjson.Get(string(merged), inputMap[key].(string)).Value()) + if err != nil { + impl.logger.Errorw("error occurred while parsing float number", "key", key, "err", err) + } + return floatNumber, err +} + +func (impl *AppServiceImpl) setScalingValues(templateMap map[string]interface{}, customScalingKey string, merged []byte, value interface{}) ([]byte, error) { + autoscalingJsonPath := templateMap[customScalingKey] + autoscalingJsonPathKey := autoscalingJsonPath.(string) + mergedRes, err := sjson.Set(string(merged), autoscalingJsonPathKey, value) + if err != nil { + impl.logger.Errorw("error occurred while setting autoscaling key", "JsonPathKey", autoscalingJsonPathKey, "err", err) + return []byte{}, err + } + return []byte(mergedRes), nil +} + +func (impl *AppServiceImpl) fetchRequiredReplicaCount(currentReplicaCount float64, reqMaxReplicas float64, reqMinReplicas float64) float64 { + var reqReplicaCount float64 + if currentReplicaCount <= reqMaxReplicas && currentReplicaCount >= reqMinReplicas { + reqReplicaCount = currentReplicaCount + } else if currentReplicaCount > reqMaxReplicas { + reqReplicaCount = reqMaxReplicas + } else if currentReplicaCount < reqMinReplicas { + reqReplicaCount = reqMinReplicas + } + return reqReplicaCount +} + +func (impl *AppServiceImpl) CreateHistoriesForDeploymentTrigger(pipeline *pipelineConfig.Pipeline, strategy *chartConfig.PipelineStrategy, envOverride *chartConfig.EnvConfigOverride, deployedOn time.Time, deployedBy int32) error { + //creating history for deployment template + deploymentTemplateHistory, err := impl.deploymentTemplateHistoryService.CreateDeploymentTemplateHistoryForDeploymentTrigger(pipeline, envOverride, envOverride.Chart.ImageDescriptorTemplate, deployedOn, deployedBy) + if err != nil { + impl.logger.Errorw("error in creating deployment template history for deployment trigger", "err", err) + return err + } + err = impl.configMapHistoryService.CreateCMCSHistoryForDeploymentTrigger(pipeline, deployedOn, deployedBy) + if err != nil { + impl.logger.Errorw("error in creating CM/CS history for deployment trigger", "err", err) + return err + } + if strategy != nil { + err = impl.pipelineStrategyHistoryService.CreateStrategyHistoryForDeploymentTrigger(strategy, deployedOn, deployedBy, pipeline.TriggerType) + if err != nil { + impl.logger.Errorw("error in creating strategy history for deployment trigger", "err", err) + return err + } + } + //VARIABLE_SNAPSHOT_SAVE + if envOverride.VariableSnapshot != nil && len(envOverride.VariableSnapshot) > 0 { + variableMapBytes, _ := json.Marshal(envOverride.VariableSnapshot) + variableSnapshotHistory := &repository6.VariableSnapshotHistoryBean{ + VariableSnapshot: variableMapBytes, + HistoryReference: repository6.HistoryReference{ + HistoryReferenceId: deploymentTemplateHistory.Id, + HistoryReferenceType: repository6.HistoryReferenceTypeDeploymentTemplate, + }, + } + err = impl.variableSnapshotHistoryService.SaveVariableHistoriesForTrigger([]*repository6.VariableSnapshotHistoryBean{variableSnapshotHistory}, deployedBy) + if err != nil { + return err + } + } + return nil +} + +func (impl *AppServiceImpl) updatePipeline(pipeline *pipelineConfig.Pipeline, userId int32) (bool, error) { + err := impl.pipelineRepository.SetDeploymentAppCreatedInPipeline(true, pipeline.Id, userId) + if err != nil { + impl.logger.Errorw("error on updating cd pipeline for setting deployment app created", "err", err) + return false, err + } + return true, nil +} + +func (impl *AppServiceImpl) createHelmAppForCdPipeline(overrideRequest *bean.ValuesOverrideRequest, valuesOverrideResponse *ValuesOverrideResponse, triggeredAt time.Time, ctx context.Context) (bool, error) { + + pipeline := valuesOverrideResponse.Pipeline + envOverride := valuesOverrideResponse.EnvOverride + mergeAndSave := valuesOverrideResponse.MergedValues + + chartMetaData := &chart2.Metadata{ + Name: pipeline.App.AppName, + Version: envOverride.Chart.ChartVersion, + } + referenceTemplatePath := path.Join(string(impl.refChartDir), envOverride.Chart.ReferenceTemplate) + + if IsHelmApp(pipeline.DeploymentAppType) { + referenceChartByte := envOverride.Chart.ReferenceChart + // here updating reference chart into database. + if len(envOverride.Chart.ReferenceChart) == 0 { + refChartByte, err := impl.chartTemplateService.GetByteArrayRefChart(chartMetaData, referenceTemplatePath) + if err != nil { + impl.logger.Errorw("ref chart commit error on cd trigger", "err", err, "req", overrideRequest) + return false, err + } + ch := envOverride.Chart + ch.ReferenceChart = refChartByte + ch.UpdatedOn = time.Now() + ch.UpdatedBy = overrideRequest.UserId + err = impl.chartRepository.Update(ch) + if err != nil { + impl.logger.Errorw("chart update error", "err", err, "req", overrideRequest) + return false, err + } + referenceChartByte = refChartByte + } + + releaseName := pipeline.DeploymentAppName + cluster := envOverride.Environment.Cluster + bearerToken := cluster.Config[k8s2.BearerToken] + clusterConfig := &client2.ClusterConfig{ + ClusterName: cluster.ClusterName, + Token: bearerToken, + ApiServerUrl: cluster.ServerUrl, + InsecureSkipTLSVerify: cluster.InsecureSkipTlsVerify, + } + if cluster.InsecureSkipTlsVerify == false { + clusterConfig.KeyData = cluster.Config[k8s2.TlsKey] + clusterConfig.CertData = cluster.Config[k8s2.CertData] + clusterConfig.CaData = cluster.Config[k8s2.CertificateAuthorityData] + } + releaseIdentifier := &client2.ReleaseIdentifier{ + ReleaseName: releaseName, + ReleaseNamespace: envOverride.Namespace, + ClusterConfig: clusterConfig, + } + + if pipeline.DeploymentAppCreated { + req := &client2.UpgradeReleaseRequest{ + ReleaseIdentifier: releaseIdentifier, + ValuesYaml: mergeAndSave, + HistoryMax: impl.helmAppService.GetRevisionHistoryMaxValue(client2.SOURCE_DEVTRON_APP), + ChartContent: &client2.ChartContent{Content: referenceChartByte}, + } + + updateApplicationResponse, err := impl.helmAppClient.UpdateApplication(ctx, req) + + // For cases where helm release was not found but db flag for deployment app created was true + if err != nil && strings.Contains(err.Error(), "release: not found") { + + // retry install + _, err = impl.helmInstallReleaseWithCustomChart(ctx, releaseIdentifier, referenceChartByte, mergeAndSave) + + // if retry failed, return + if err != nil { + impl.logger.Errorw("release not found, failed to re-install helm application", "err", err) + return false, err + } + } else if err != nil { + impl.logger.Errorw("error in updating helm application for cd pipeline", "err", err) + return false, err + } else { + impl.logger.Debugw("updated helm application", "response", updateApplicationResponse, "isSuccess", updateApplicationResponse.Success) + } + + } else { + + helmResponse, err := impl.helmInstallReleaseWithCustomChart(ctx, releaseIdentifier, referenceChartByte, mergeAndSave) + + // For connection related errors, no need to update the db + if err != nil && strings.Contains(err.Error(), "connection error") { + impl.logger.Errorw("error in helm install custom chart", "err", err) + return false, err + } + + // IMP: update cd pipeline to mark deployment app created, even if helm install fails + // If the helm install fails, it still creates the app in failed state, so trying to + // re-create the app results in error from helm that cannot re-use name which is still in use + _, pgErr := impl.updatePipeline(pipeline, overrideRequest.UserId) + + if err != nil { + impl.logger.Errorw("error in helm install custom chart", "err", err) + + if pgErr != nil { + impl.logger.Errorw("failed to update deployment app created flag in pipeline table", "err", err) + } + return false, err + } + + if pgErr != nil { + impl.logger.Errorw("failed to update deployment app created flag in pipeline table", "err", err) + return false, err + } + + impl.logger.Debugw("received helm release response", "helmResponse", helmResponse, "isSuccess", helmResponse.Success) + } + + //update workflow runner status, used in app workflow view + cdWf, err := impl.cdWorkflowRepository.FindByWorkflowIdAndRunnerType(ctx, overrideRequest.CdWorkflowId, bean.CD_WORKFLOW_TYPE_DEPLOY) + if err != nil && err != pg.ErrNoRows { + impl.logger.Errorw("err on fetching cd workflow", "err", err) + return false, err + } + cdWorkflowId := cdWf.CdWorkflowId + if cdWf.CdWorkflowId == 0 { + cdWf := &pipelineConfig.CdWorkflow{ + CiArtifactId: overrideRequest.CiArtifactId, + PipelineId: overrideRequest.PipelineId, + AuditLog: sql.AuditLog{CreatedOn: triggeredAt, CreatedBy: overrideRequest.UserId, UpdatedOn: triggeredAt, UpdatedBy: overrideRequest.UserId}, + } + err := impl.cdWorkflowRepository.SaveWorkFlow(ctx, cdWf) + if err != nil { + impl.logger.Errorw("err on updating cd workflow for status update", "err", err) + return false, err + } + cdWorkflowId = cdWf.Id + runner := &pipelineConfig.CdWorkflowRunner{ + Id: cdWf.Id, + Name: pipeline.Name, + WorkflowType: bean.CD_WORKFLOW_TYPE_DEPLOY, + ExecutorType: pipelineConfig.WORKFLOW_EXECUTOR_TYPE_AWF, + Status: pipelineConfig.WorkflowInProgress, + TriggeredBy: overrideRequest.UserId, + StartedOn: triggeredAt, + CdWorkflowId: cdWorkflowId, + AuditLog: sql.AuditLog{CreatedOn: triggeredAt, CreatedBy: overrideRequest.UserId, UpdatedOn: triggeredAt, UpdatedBy: overrideRequest.UserId}, + } + _, err = impl.cdWorkflowRepository.SaveWorkFlowRunner(runner) + if err != nil { + impl.logger.Errorw("err on updating cd workflow runner for status update", "err", err) + return false, err + } + } else { + cdWf.Status = pipelineConfig.WorkflowInProgress + cdWf.FinishedOn = time.Now() + cdWf.UpdatedBy = overrideRequest.UserId + cdWf.UpdatedOn = time.Now() + err = impl.cdWorkflowRepository.UpdateWorkFlowRunner(&cdWf) + if err != nil { + impl.logger.Errorw("error on update cd workflow runner", "cdWf", cdWf, "err", err) + return false, err + } + } + } + return true, nil +} + +// helmInstallReleaseWithCustomChart performs helm install with custom chart +func (impl *AppServiceImpl) helmInstallReleaseWithCustomChart(ctx context.Context, releaseIdentifier *client2.ReleaseIdentifier, referenceChartByte []byte, valuesYaml string) (*client2.HelmInstallCustomResponse, error) { + + helmInstallRequest := client2.HelmInstallCustomRequest{ + ValuesYaml: valuesYaml, + ChartContent: &client2.ChartContent{Content: referenceChartByte}, + ReleaseIdentifier: releaseIdentifier, + } + + // Request exec + return impl.helmAppClient.InstallReleaseWithCustomChart(ctx, &helmInstallRequest) +} + func (impl *AppServiceImpl) GetGitOpsRepoPrefix() string { return impl.globalEnvVariables.GitOpsRepoPrefix } diff --git a/pkg/appClone/AppCloneService.go b/pkg/appClone/AppCloneService.go index 73b8996f61..b7dd35f1b6 100644 --- a/pkg/appClone/AppCloneService.go +++ b/pkg/appClone/AppCloneService.go @@ -53,9 +53,6 @@ type AppCloneServiceImpl struct { pipelineStageService pipeline.PipelineStageService ciTemplateService pipeline.CiTemplateService appRepository app2.AppRepository - ciPipelineRepository pipelineConfig.CiPipelineRepository - pipelineRepository pipelineConfig.PipelineRepository - appWorkflowRepository appWorkflow2.AppWorkflowRepository } func NewAppCloneServiceImpl(logger *zap.SugaredLogger, @@ -68,8 +65,7 @@ func NewAppCloneServiceImpl(logger *zap.SugaredLogger, propertiesConfigService pipeline.PropertiesConfigService, ciTemplateOverrideRepository pipelineConfig.CiTemplateOverrideRepository, pipelineStageService pipeline.PipelineStageService, ciTemplateService pipeline.CiTemplateService, - appRepository app2.AppRepository, ciPipelineRepository pipelineConfig.CiPipelineRepository, - pipelineRepository pipelineConfig.PipelineRepository, appWorkflowRepository appWorkflow2.AppWorkflowRepository) *AppCloneServiceImpl { + appRepository app2.AppRepository) *AppCloneServiceImpl { return &AppCloneServiceImpl{ logger: logger, pipelineBuilder: pipelineBuilder, @@ -82,9 +78,6 @@ func NewAppCloneServiceImpl(logger *zap.SugaredLogger, pipelineStageService: pipelineStageService, ciTemplateService: ciTemplateService, appRepository: appRepository, - ciPipelineRepository: ciPipelineRepository, - pipelineRepository: pipelineRepository, - appWorkflowRepository: appWorkflowRepository, } } @@ -97,15 +90,6 @@ type CloneRequest struct { AppType helper.AppType `json:"appType"` } -type CreateWorkflowMappingDto struct { - oldAppId int - newAppId int - userId int32 - newWfId int - gitMaterialMapping map[int]int - externalCiPipelineId int -} - func (impl *AppCloneServiceImpl) CloneApp(createReq *bean.CreateAppDTO, context context.Context) (*bean.CreateAppDTO, error) { //validate template app templateApp, err := impl.appRepository.FindById(createReq.TemplateId) @@ -138,7 +122,18 @@ func (impl *AppCloneServiceImpl) CloneApp(createReq *bean.CreateAppDTO, context if err != nil { return nil, err } - + refApp, err := impl.pipelineBuilder.GetApp(cloneReq.RefAppId) + if err != nil { + return nil, err + } + isSameProject := refApp.TeamId == cloneReq.ProjectId + /* appStageStatus = append(appStageStatus, impl.makeAppStageStatus(0, "APP", stages.AppId)) + appStageStatus = append(appStageStatus, impl.makeAppStageStatus(1, "MATERIAL", materialExists)) + appStageStatus = append(appStageStatus, impl.makeAppStageStatus(2, "TEMPLATE", stages.CiTemplateId)) + appStageStatus = append(appStageStatus, impl.makeAppStageStatus(3, "CI_PIPELINE", stages.CiPipelineId)) + appStageStatus = append(appStageStatus, impl.makeAppStageStatus(4, "CHART", stages.ChartId)) + appStageStatus = append(appStageStatus, impl.makeAppStageStatus(5, "CD_PIPELINE", stages.PipelineId)) + */ refAppStatus := make(map[string]bool) for _, as := range appStatus { refAppStatus[as.StageName] = as.Status @@ -198,31 +193,32 @@ func (impl *AppCloneServiceImpl) CloneApp(createReq *bean.CreateAppDTO, context impl.logger.Errorw("error in creating global secret", "ref", cloneReq.RefAppId, "new", newAppId, "err", err) return nil, err } - - if createReq.AppType != helper.Job { - _, err = impl.CreateEnvCm(context, cloneReq.RefAppId, newAppId, userId) - if err != nil { - impl.logger.Errorw("error in creating env cm", "err", err) - return nil, err - } - _, err = impl.CreateEnvSecret(context, cloneReq.RefAppId, newAppId, userId) - if err != nil { - impl.logger.Errorw("error in creating env secret", "err", err) - return nil, err - } - _, err = impl.createEnvOverride(cloneReq.RefAppId, newAppId, userId, context) - if err != nil { - impl.logger.Errorw("error in cloning env override", "err", err) - return nil, err - } - } else { - _, err := impl.configMapService.ConfigSecretEnvironmentClone(cloneReq.RefAppId, newAppId, userId) - if err != nil { - impl.logger.Errorw("error in cloning cm cs env override", "err", err) - return nil, err + if isSameProject { + if createReq.AppType != helper.Job { + _, err = impl.CreateEnvCm(context, cloneReq.RefAppId, newAppId, userId) + if err != nil { + impl.logger.Errorw("error in creating env cm", "err", err) + return nil, err + } + _, err = impl.CreateEnvSecret(context, cloneReq.RefAppId, newAppId, userId) + if err != nil { + impl.logger.Errorw("error in creating env secret", "err", err) + return nil, err + } + _, err = impl.createEnvOverride(cloneReq.RefAppId, newAppId, userId, context) + if err != nil { + impl.logger.Errorw("error in cloning env override", "err", err) + return nil, err + } + } else { + _, err := impl.configMapService.ConfigSecretEnvironmentClone(cloneReq.RefAppId, newAppId, userId) + if err != nil { + impl.logger.Errorw("error in cloning cm cs env override", "err", err) + return nil, err + } } } - _, err = impl.CreateWf(cloneReq.RefAppId, newAppId, userId, gitMaerialMap, context) + _, err = impl.CreateWf(cloneReq.RefAppId, newAppId, userId, gitMaerialMap, context, isSameProject) if err != nil { impl.logger.Errorw("error in creating wf", "ref", cloneReq.RefAppId, "new", newAppId, "err", err) return nil, err @@ -316,7 +312,6 @@ func (impl *AppCloneServiceImpl) CreateCiTemplate(oldAppId, newAppId int, userId UserId: userId, BeforeDockerBuild: refCiConf.BeforeDockerBuild, AfterDockerBuild: refCiConf.AfterDockerBuild, - ScanEnabled: refCiConf.ScanEnabled, } res, err := impl.pipelineBuilder.CreateCiPipeline(ciConfRequest) @@ -585,13 +580,12 @@ func (impl *AppCloneServiceImpl) CreateGlobalSecret(oldAppId, newAppId int, user return thisCm, err } -func (impl *AppCloneServiceImpl) CreateWf(oldAppId, newAppId int, userId int32, gitMaterialMapping map[int]int, ctx context.Context) (interface{}, error) { +func (impl *AppCloneServiceImpl) CreateWf(oldAppId, newAppId int, userId int32, gitMaterialMapping map[int]int, ctx context.Context, isSameProject bool) (interface{}, error) { refAppWFs, err := impl.appWorkflowService.FindAppWorkflows(oldAppId) if err != nil { return nil, err } impl.logger.Debugw("workflow found", "wf", refAppWFs) - for _, refAppWF := range refAppWFs { thisWf := appWorkflow.AppWorkflowDto{ Id: 0, @@ -600,37 +594,24 @@ func (impl *AppCloneServiceImpl) CreateWf(oldAppId, newAppId int, userId int32, AppWorkflowMappingDto: nil, //first create new mapping then add it UserId: userId, } - thisWf, err = impl.appWorkflowService.CreateAppWorkflow(thisWf) - if err != nil { - impl.logger.Errorw("error in creating workflow without external-ci", "err", err) - return nil, err - } isExternalCiPresent := false for _, awm := range refAppWF.AppWorkflowMappingDto { if awm.Type == appWorkflow2.WEBHOOK { isExternalCiPresent = true - break } } - createWorkflowMappingDto := CreateWorkflowMappingDto{ - newAppId: newAppId, - oldAppId: oldAppId, - newWfId: thisWf.Id, - userId: userId, - } - var externalCiPipelineId int - if isExternalCiPresent { - externalCiPipelineId, err = impl.createExternalCiAndAppWorkflowMapping(createWorkflowMappingDto) + + if !isExternalCiPresent { + thisWf, err = impl.appWorkflowService.CreateAppWorkflow(thisWf) + impl.logger.Debugw("workflow found", thisWf) if err != nil { - impl.logger.Errorw("error in createExternalCiAndAppWorkflowMapping", "err", err) + impl.logger.Errorw("errir in creating workflow without extenal-ci", "err", err) return nil, err } } - createWorkflowMappingDto.gitMaterialMapping = gitMaterialMapping - createWorkflowMappingDto.externalCiPipelineId = externalCiPipelineId - err = impl.createWfInstances(refAppWF.AppWorkflowMappingDto, createWorkflowMappingDto, ctx) + err = impl.createWfMappings(refAppWF.AppWorkflowMappingDto, oldAppId, newAppId, userId, thisWf.Id, gitMaterialMapping, ctx, isSameProject) if err != nil { impl.logger.Errorw("error in creating workflow mapping", "err", err) return nil, err @@ -639,28 +620,7 @@ func (impl *AppCloneServiceImpl) CreateWf(oldAppId, newAppId int, userId int32, return nil, nil } -func (impl *AppCloneServiceImpl) createExternalCiAndAppWorkflowMapping(createWorkflowMappingDto CreateWorkflowMappingDto) (int, error) { - dbConnection := impl.pipelineRepository.GetConnection() - tx, err := dbConnection.Begin() - if err != nil { - impl.logger.Errorw("error in beginning transaction", "err", err) - return 0, err - } - // Rollback tx on error. - defer tx.Rollback() - externalCiPipelineId, err := impl.pipelineBuilder.CreateExternalCiAndAppWorkflowMapping(createWorkflowMappingDto.newAppId, createWorkflowMappingDto.newWfId, createWorkflowMappingDto.userId, tx) - if err != nil { - impl.logger.Errorw("error in creating new external ci pipeline and new app workflow mapping", "refAppId", createWorkflowMappingDto.oldAppId, "newAppId", createWorkflowMappingDto.newAppId, "err", err) - return 0, err - } - err = tx.Commit() - if err != nil { - return 0, err - } - return externalCiPipelineId, nil -} - -func (impl *AppCloneServiceImpl) createWfInstances(refWfMappings []appWorkflow.AppWorkflowMappingDto, createWorkflowMappingDto CreateWorkflowMappingDto, ctx context.Context) error { +func (impl *AppCloneServiceImpl) createWfMappings(refWfMappings []appWorkflow.AppWorkflowMappingDto, oldAppId, newAppId int, userId int32, thisWfId int, gitMaterialMapping map[int]int, ctx context.Context, isSameProject bool) error { impl.logger.Debugw("wf mapping cloning", "refWfMappings", refWfMappings) var ciMapping []appWorkflow.AppWorkflowMappingDto var cdMappings []appWorkflow.AppWorkflowMappingDto @@ -676,31 +636,28 @@ func (impl *AppCloneServiceImpl) createWfInstances(refWfMappings []appWorkflow.A return fmt.Errorf("unsupported wf type: %s", appWf.Type) } } - sourceToNewPipelineIdMapping := make(map[int]int) - refApp, err := impl.pipelineBuilder.GetApp(createWorkflowMappingDto.oldAppId) - if err != nil { - impl.logger.Errorw("error in getting app from refAppId", "refAppId", createWorkflowMappingDto.oldAppId) - return err - } + refApp, err := impl.pipelineBuilder.GetApp(oldAppId) if len(webhookMappings) > 0 { - for _, refwebhookMappings := range cdMappings { - cdCloneReq := &cloneCdPipelineRequest{ - refCdPipelineId: refwebhookMappings.ComponentId, - refAppId: createWorkflowMappingDto.oldAppId, - appId: createWorkflowMappingDto.newAppId, - userId: createWorkflowMappingDto.userId, - ciPipelineId: 0, - appWfId: createWorkflowMappingDto.newWfId, - refAppName: refApp.AppName, - sourceToNewPipelineId: sourceToNewPipelineIdMapping, - externalCiPipelineId: createWorkflowMappingDto.externalCiPipelineId, - } - pipeline, err := impl.CreateCdPipeline(cdCloneReq, ctx) - impl.logger.Debugw("cd pipeline created", "pipeline", pipeline) - if err != nil { - impl.logger.Errorw("error in getting cd-pipeline", "refAppId", createWorkflowMappingDto.oldAppId, "newAppId", createWorkflowMappingDto.newAppId, "err", err) - return err + if isSameProject { + for _, refwebhookMappings := range cdMappings { + cdCloneReq := &cloneCdPipelineRequest{ + refCdPipelineId: refwebhookMappings.ComponentId, + refAppId: oldAppId, + appId: newAppId, + userId: userId, + ciPipelineId: 0, + appWfId: thisWfId, + refAppName: refApp.AppName, + } + pipeline, err := impl.CreateCdPipeline(cdCloneReq, ctx) + impl.logger.Debugw("cd pipeline created", "pipeline", pipeline) + if err != nil { + impl.logger.Errorw("error in getting cd-pipeling", "err", err) + return err + } } + } else { + impl.logger.Debug("not the same project, skipping cd pipeline creation") } return nil } @@ -709,7 +666,7 @@ func (impl *AppCloneServiceImpl) createWfInstances(refWfMappings []appWorkflow.A impl.logger.Warn("no ci pipeline found") return nil } else if len(ciMapping) != 1 { - impl.logger.Warn("more than one ci pipeline not supported") + impl.logger.Warn("more than one cd pipeline not supported") return nil } @@ -721,12 +678,12 @@ func (impl *AppCloneServiceImpl) createWfInstances(refWfMappings []appWorkflow.A impl.logger.Debugw("creating ci", "ref", refCiMapping) cloneCiPipelineRequest := &cloneCiPipelineRequest{ - refAppId: createWorkflowMappingDto.oldAppId, + refAppId: oldAppId, refCiPipelineId: refCiMapping.ComponentId, - userId: createWorkflowMappingDto.userId, - appId: createWorkflowMappingDto.newAppId, - wfId: createWorkflowMappingDto.newWfId, - gitMaterialMapping: createWorkflowMappingDto.gitMaterialMapping, + userId: userId, + appId: newAppId, + wfId: thisWfId, + gitMaterialMapping: gitMaterialMapping, refAppName: refApp.AppName, } ci, err = impl.CreateCiPipeline(cloneCiPipelineRequest) @@ -736,24 +693,26 @@ func (impl *AppCloneServiceImpl) createWfInstances(refWfMappings []appWorkflow.A } impl.logger.Debugw("ci created", "ci", ci) } - - for _, refCdMapping := range cdMappings { - cdCloneReq := &cloneCdPipelineRequest{ - refCdPipelineId: refCdMapping.ComponentId, - refAppId: createWorkflowMappingDto.oldAppId, - appId: createWorkflowMappingDto.newAppId, - userId: createWorkflowMappingDto.userId, - ciPipelineId: ci.CiPipelines[0].Id, - appWfId: createWorkflowMappingDto.newWfId, - refAppName: refApp.AppName, - sourceToNewPipelineId: sourceToNewPipelineIdMapping, - } - pipeline, err := impl.CreateCdPipeline(cdCloneReq, ctx) - if err != nil { - impl.logger.Errorw("error in creating cd pipeline, app clone", "err", err) - return err + if isSameProject { + for _, refCdMapping := range cdMappings { + cdCloneReq := &cloneCdPipelineRequest{ + refCdPipelineId: refCdMapping.ComponentId, + refAppId: oldAppId, + appId: newAppId, + userId: userId, + ciPipelineId: ci.CiPipelines[0].Id, + appWfId: thisWfId, + refAppName: refApp.AppName, + } + pipeline, err := impl.CreateCdPipeline(cdCloneReq, ctx) + if err != nil { + impl.logger.Errorw("error in creating cd pipeline, app clone", "err", err) + return err + } + impl.logger.Debugw("cd pipeline created", "pipeline", pipeline) } - impl.logger.Debugw("cd pipeline created", "pipeline", pipeline) + } else { + impl.logger.Debug("not the same project, skipping cd pipeline creation") } //find ci @@ -854,7 +813,6 @@ func (impl *AppCloneServiceImpl) CreateCiPipeline(req *cloneCiPipelineRequest) ( PreBuildStage: preStageDetail, PostBuildStage: postStageDetail, EnvironmentId: refCiPipeline.EnvironmentId, - ScanEnabled: refCiPipeline.ScanEnabled, PipelineType: refCiPipeline.PipelineType, }, AppId: req.appId, @@ -909,15 +867,13 @@ func (impl *AppCloneServiceImpl) CreateCiPipeline(req *cloneCiPipelineRequest) ( } type cloneCdPipelineRequest struct { - refCdPipelineId int - refAppId int - appId int - userId int32 - ciPipelineId int - appWfId int - refAppName string - sourceToNewPipelineId map[int]int - externalCiPipelineId int + refCdPipelineId int + refAppId int + appId int + userId int32 + ciPipelineId int + appWfId int + refAppName string } func (impl *AppCloneServiceImpl) CreateCdPipeline(req *cloneCdPipelineRequest, ctx context.Context) (*bean.CdPipelines, error) { @@ -935,7 +891,6 @@ func (impl *AppCloneServiceImpl) CreateCdPipeline(req *cloneCdPipelineRequest, c if refCdPipeline == nil { return nil, fmt.Errorf("no cd pipeline found") } - refCdPipeline.SourceToNewPipelineId = req.sourceToNewPipelineId pipelineName := refCdPipeline.Name if strings.HasPrefix(pipelineName, req.refAppName) { pipelineName = strings.Replace(pipelineName, req.refAppName+"-", "", 1) @@ -966,6 +921,35 @@ func (impl *AppCloneServiceImpl) CreateCdPipeline(req *cloneCdPipelineRequest, c deploymentAppType = util.PIPELINE_DEPLOYMENT_TYPE_HELM } + if refCdPipeline.ParentPipelineType == "WEBHOOK" { + cdPipeline := &bean.CDPipelineConfigObject{ + Id: 0, + EnvironmentId: refCdPipeline.EnvironmentId, + CiPipelineId: 0, + TriggerType: refCdPipeline.TriggerType, + Name: pipelineName, + Strategies: refCdPipeline.Strategies, + Namespace: refCdPipeline.Namespace, + AppWorkflowId: 0, + DeploymentTemplate: refCdPipeline.DeploymentTemplate, + PreStage: refCdPipeline.PreStage, //FIXME + PostStage: refCdPipeline.PostStage, + PreStageConfigMapSecretNames: refCdPipeline.PreStageConfigMapSecretNames, + PostStageConfigMapSecretNames: refCdPipeline.PostStageConfigMapSecretNames, + RunPostStageInEnv: refCdPipeline.RunPostStageInEnv, + RunPreStageInEnv: refCdPipeline.RunPreStageInEnv, + DeploymentAppType: refCdPipeline.DeploymentAppType, + ParentPipelineId: 0, + ParentPipelineType: refCdPipeline.ParentPipelineType, + } + cdPipelineReq := &bean.CdPipelines{ + Pipelines: []*bean.CDPipelineConfigObject{cdPipeline}, + AppId: req.appId, + UserId: req.userId, + } + cdPipelineRes, err := impl.pipelineBuilder.CreateCdPipelines(cdPipelineReq, ctx) + return cdPipelineRes, err + } cdPipeline := &bean.CDPipelineConfigObject{ Id: 0, EnvironmentId: refCdPipeline.EnvironmentId, @@ -985,15 +969,6 @@ func (impl *AppCloneServiceImpl) CreateCdPipeline(req *cloneCdPipelineRequest, c DeploymentAppType: deploymentAppType, PreDeployStage: refCdPipeline.PreDeployStage, PostDeployStage: refCdPipeline.PostDeployStage, - SourceToNewPipelineId: refCdPipeline.SourceToNewPipelineId, - RefPipelineId: refCdPipeline.Id, - ParentPipelineType: refCdPipeline.ParentPipelineType, - } - if refCdPipeline.ParentPipelineType == "WEBHOOK" { - cdPipeline.CiPipelineId = 0 - cdPipeline.ParentPipelineId = req.externalCiPipelineId - } else if refCdPipeline.ParentPipelineType != appWorkflow.CI_PIPELINE_TYPE { - cdPipeline.ParentPipelineId = refCdPipeline.ParentPipelineId } cdPipelineReq := &bean.CdPipelines{ Pipelines: []*bean.CDPipelineConfigObject{cdPipeline}, diff --git a/pkg/appStore/deployment/fullMode/AppStoreDeploymentFullModeService.go b/pkg/appStore/deployment/fullMode/AppStoreDeploymentFullModeService.go index 3673437ef2..8aa2e56db9 100644 --- a/pkg/appStore/deployment/fullMode/AppStoreDeploymentFullModeService.go +++ b/pkg/appStore/deployment/fullMode/AppStoreDeploymentFullModeService.go @@ -86,7 +86,6 @@ type AppStoreDeploymentFullModeServiceImpl struct { gitOpsConfigRepository repository3.GitOpsConfigRepository pipelineStatusTimelineService status.PipelineStatusTimelineService appStoreDeploymentCommonService appStoreDeploymentCommon.AppStoreDeploymentCommonService - argoClientWrapperService argocdServer.ArgoClientWrapperService } func NewAppStoreDeploymentFullModeServiceImpl(logger *zap.SugaredLogger, @@ -102,7 +101,6 @@ func NewAppStoreDeploymentFullModeServiceImpl(logger *zap.SugaredLogger, argoUserService argo.ArgoUserService, gitOpsConfigRepository repository3.GitOpsConfigRepository, pipelineStatusTimelineService status.PipelineStatusTimelineService, appStoreDeploymentCommonService appStoreDeploymentCommon.AppStoreDeploymentCommonService, - argoClientWrapperService argocdServer.ArgoClientWrapperService, ) *AppStoreDeploymentFullModeServiceImpl { return &AppStoreDeploymentFullModeServiceImpl{ logger: logger, @@ -122,7 +120,6 @@ func NewAppStoreDeploymentFullModeServiceImpl(logger *zap.SugaredLogger, gitOpsConfigRepository: gitOpsConfigRepository, pipelineStatusTimelineService: pipelineStatusTimelineService, appStoreDeploymentCommonService: appStoreDeploymentCommonService, - argoClientWrapperService: argoClientWrapperService, } } @@ -319,12 +316,6 @@ func (impl AppStoreDeploymentFullModeServiceImpl) AppStoreDeployOperationACD(ins //STEP 6: Force Sync ACD - works like trigger deployment //impl.SyncACD(installAppVersionRequest.ACDAppName, ctx) - //STEP 7: normal refresh ACD - update for step 6 to avoid delay - err = impl.argoClientWrapperService.GetArgoAppWithNormalRefresh(ctx, installAppVersionRequest.ACDAppName) - if err != nil { - impl.logger.Errorw("error in getting the argo application with normal refresh", "err", err) - } - return installAppVersionRequest, nil } diff --git a/pkg/appStore/deployment/service/AppStoreDeploymentService.go b/pkg/appStore/deployment/service/AppStoreDeploymentService.go index 8c73baf6a9..99b0788d93 100644 --- a/pkg/appStore/deployment/service/AppStoreDeploymentService.go +++ b/pkg/appStore/deployment/service/AppStoreDeploymentService.go @@ -91,7 +91,6 @@ func GetDeploymentServiceTypeConfig() (*DeploymentServiceTypeConfig, error) { type AppStoreDeploymentServiceImpl struct { logger *zap.SugaredLogger installedAppRepository repository.InstalledAppRepository - chartGroupDeploymentRepository repository.ChartGroupDeploymentRepository appStoreApplicationVersionRepository appStoreDiscoverRepository.AppStoreApplicationVersionRepository environmentRepository clusterRepository.EnvironmentRepository clusterInstalledAppsRepository repository.ClusterInstalledAppsRepository @@ -111,7 +110,7 @@ type AppStoreDeploymentServiceImpl struct { } func NewAppStoreDeploymentServiceImpl(logger *zap.SugaredLogger, installedAppRepository repository.InstalledAppRepository, - chartGroupDeploymentRepository repository.ChartGroupDeploymentRepository, appStoreApplicationVersionRepository appStoreDiscoverRepository.AppStoreApplicationVersionRepository, environmentRepository clusterRepository.EnvironmentRepository, + appStoreApplicationVersionRepository appStoreDiscoverRepository.AppStoreApplicationVersionRepository, environmentRepository clusterRepository.EnvironmentRepository, clusterInstalledAppsRepository repository.ClusterInstalledAppsRepository, appRepository app.AppRepository, appStoreDeploymentHelmService appStoreDeploymentTool.AppStoreDeploymentHelmService, appStoreDeploymentArgoCdService appStoreDeploymentGitopsTool.AppStoreDeploymentArgoCdService, environmentService cluster.EnvironmentService, @@ -123,7 +122,6 @@ func NewAppStoreDeploymentServiceImpl(logger *zap.SugaredLogger, installedAppRep appStoreDeploymentServiceImpl := &AppStoreDeploymentServiceImpl{ logger: logger, installedAppRepository: installedAppRepository, - chartGroupDeploymentRepository: chartGroupDeploymentRepository, appStoreApplicationVersionRepository: appStoreApplicationVersionRepository, environmentRepository: environmentRepository, clusterInstalledAppsRepository: clusterInstalledAppsRepository, @@ -767,21 +765,6 @@ func (impl AppStoreDeploymentServiceImpl) DeleteInstalledApp(ctx context.Context } } - // soft delete chart-group deployment - chartGroupDeployment, err := impl.chartGroupDeploymentRepository.FindByInstalledAppId(model.Id) - if err != nil && err != pg.ErrNoRows { - impl.logger.Errorw("error while fetching chart group deployment", "error", err) - return nil, err - } - if chartGroupDeployment.Id != 0 { - chartGroupDeployment.Deleted = true - _, err = impl.chartGroupDeploymentRepository.Update(chartGroupDeployment, tx) - if err != nil { - impl.logger.Errorw("error while updating chart group deployment", "error", err) - return nil, err - } - } - if util2.IsBaseStack() || util2.IsHelmApp(app.AppOfferingMode) || util.IsHelmApp(model.DeploymentAppType) { // there might be a case if helm release gets uninstalled from helm cli. //in this case on deleting the app from API, it should not give error as it should get deleted from db, otherwise due to delete error, db does not get clean @@ -1487,11 +1470,13 @@ func (impl *AppStoreDeploymentServiceImpl) UpdateInstalledApp(ctx context.Contex } if installAppVersionRequest.PerformACDDeployment { - // refresh update repo details on ArgoCD if repo is changed - err = impl.appStoreDeploymentArgoCdService.RefreshAndUpdateACDApp(installAppVersionRequest, gitOpsResponse.ChartGitAttribute, monoRepoMigrationRequired, ctx) - if err != nil { - impl.logger.Errorw("error in acd patch request", "err", err) - return nil, err + if monoRepoMigrationRequired { + // update repo details on ArgoCD as repo is changed + err = impl.appStoreDeploymentArgoCdService.UpdateChartInfo(installAppVersionRequest, gitOpsResponse.ChartGitAttribute, 0, ctx) + if err != nil { + impl.logger.Errorw("error in acd patch request", "err", err) + return nil, err + } } } else if installAppVersionRequest.PerformHelmDeployment { err = impl.appStoreDeploymentHelmService.UpdateChartInfo(installAppVersionRequest, gitOpsResponse.ChartGitAttribute, installAppVersionRequest.InstalledAppVersionHistoryId, ctx) diff --git a/pkg/appStore/deployment/tool/AppStoreDeploymentHelmService.go b/pkg/appStore/deployment/tool/AppStoreDeploymentHelmService.go index 38371338c2..f5a99a1ec7 100644 --- a/pkg/appStore/deployment/tool/AppStoreDeploymentHelmService.go +++ b/pkg/appStore/deployment/tool/AppStoreDeploymentHelmService.go @@ -473,10 +473,3 @@ func (impl *AppStoreDeploymentHelmServiceImpl) SaveTimelineForACDHelmApps(instal func (impl *AppStoreDeploymentHelmServiceImpl) UpdateInstalledAppAndPipelineStatusForFailedDeploymentStatus(installAppVersionRequest *appStoreBean.InstallAppVersionDTO, triggeredAt time.Time, err error) error { return nil } - -// TODO: Need to refactor this,refer below reason -// This is being done as in ea mode wire argocd service is being binded to helmServiceImpl due to which we are restricted to implement this here. -// RefreshAndUpdateACDApp this will update chart info in acd app if required in case of mono repo migration and will refresh argo app -func (impl *AppStoreDeploymentHelmServiceImpl) RefreshAndUpdateACDApp(installAppVersionRequest *appStoreBean.InstallAppVersionDTO, ChartGitAttribute *util.ChartGitAttribute, isMonoRepoMigrationRequired bool, ctx context.Context) error { - return errors.New("this is not implemented") -} diff --git a/pkg/appStore/deployment/tool/gitops/AppStoreDeploymentArgoCdService.go b/pkg/appStore/deployment/tool/gitops/AppStoreDeploymentArgoCdService.go index d0492eed88..9f9f9b049e 100644 --- a/pkg/appStore/deployment/tool/gitops/AppStoreDeploymentArgoCdService.go +++ b/pkg/appStore/deployment/tool/gitops/AppStoreDeploymentArgoCdService.go @@ -11,7 +11,6 @@ import ( client "github.com/devtron-labs/devtron/api/helm-app" openapi "github.com/devtron-labs/devtron/api/helm-app/openapiClient" openapi2 "github.com/devtron-labs/devtron/api/openapi/openapiClient" - "github.com/devtron-labs/devtron/client/argocdServer" application2 "github.com/devtron-labs/devtron/client/argocdServer/application" "github.com/devtron-labs/devtron/internal/constants" repository3 "github.com/devtron-labs/devtron/internal/sql/repository" @@ -56,7 +55,6 @@ type AppStoreDeploymentArgoCdService interface { UpdateInstalledAppAndPipelineStatusForFailedDeploymentStatus(installAppVersionRequest *appStoreBean.InstallAppVersionDTO, triggeredAt time.Time, err error) error SaveTimelineForACDHelmApps(installAppVersionRequest *appStoreBean.InstallAppVersionDTO, status string, statusDetail string, tx *pg.Tx) error UpdateChartInfo(installAppVersionRequest *appStoreBean.InstallAppVersionDTO, ChartGitAttribute *util.ChartGitAttribute, installedAppVersionHistoryId int, ctx context.Context) error - RefreshAndUpdateACDApp(installAppVersionRequest *appStoreBean.InstallAppVersionDTO, ChartGitAttribute *util.ChartGitAttribute, isMonoRepoMigrationRequired bool, ctx context.Context) error } type AppStoreDeploymentArgoCdServiceImpl struct { @@ -77,7 +75,6 @@ type AppStoreDeploymentArgoCdServiceImpl struct { userService user.UserService pipelineStatusTimelineRepository pipelineConfig.PipelineStatusTimelineRepository appStoreApplicationVersionRepository appStoreDiscoverRepository.AppStoreApplicationVersionRepository - argoClientWrapperService argocdServer.ArgoClientWrapperService } func NewAppStoreDeploymentArgoCdServiceImpl(logger *zap.SugaredLogger, appStoreDeploymentFullModeService appStoreDeploymentFullMode.AppStoreDeploymentFullModeService, @@ -88,7 +85,7 @@ func NewAppStoreDeploymentArgoCdServiceImpl(logger *zap.SugaredLogger, appStoreD pipelineStatusTimelineService status.PipelineStatusTimelineService, userService user.UserService, pipelineStatusTimelineRepository pipelineConfig.PipelineStatusTimelineRepository, appStoreApplicationVersionRepository appStoreDiscoverRepository.AppStoreApplicationVersionRepository, - argoClientWrapperService argocdServer.ArgoClientWrapperService) *AppStoreDeploymentArgoCdServiceImpl { +) *AppStoreDeploymentArgoCdServiceImpl { return &AppStoreDeploymentArgoCdServiceImpl{ Logger: logger, appStoreDeploymentFullModeService: appStoreDeploymentFullModeService, @@ -107,28 +104,9 @@ func NewAppStoreDeploymentArgoCdServiceImpl(logger *zap.SugaredLogger, appStoreD userService: userService, pipelineStatusTimelineRepository: pipelineStatusTimelineRepository, appStoreApplicationVersionRepository: appStoreApplicationVersionRepository, - argoClientWrapperService: argoClientWrapperService, } } -// RefreshAndUpdateACDApp this will update chart info in acd app if required in case of mono repo migration and will refresh argo app -func (impl AppStoreDeploymentArgoCdServiceImpl) RefreshAndUpdateACDApp(installAppVersionRequest *appStoreBean.InstallAppVersionDTO, ChartGitAttribute *util.ChartGitAttribute, isMonoRepoMigrationRequired bool, ctx context.Context) error { - if isMonoRepoMigrationRequired { - // update repo details on ArgoCD as repo is changed - err := impl.UpdateChartInfo(installAppVersionRequest, ChartGitAttribute, 0, ctx) - if err != nil { - impl.Logger.Errorw("error in acd patch request", "err", err) - return err - } - } - // Doing this to refresh normally by getting app to avoid sync delay argo cd - err := impl.argoClientWrapperService.GetArgoAppWithNormalRefresh(ctx, installAppVersionRequest.ACDAppName) - if err != nil { - impl.Logger.Errorw("error in getting argocd application with normal refresh", "err", err, "argoAppName", installAppVersionRequest.ACDAppName) - } - return nil -} - // UpdateChartInfo this will update chart info in acd app, needed when repo for an app is changed func (impl AppStoreDeploymentArgoCdServiceImpl) UpdateChartInfo(installAppVersionRequest *appStoreBean.InstallAppVersionDTO, ChartGitAttribute *util.ChartGitAttribute, installedAppVersionHistoryId int, ctx context.Context) error { installAppVersionRequest, err := impl.patchAcdApp(ctx, installAppVersionRequest, ChartGitAttribute) diff --git a/pkg/auth/UserAuthOidcHelper.go b/pkg/auth/UserAuthOidcHelper.go index 829f905c1a..eefb93e2b1 100644 --- a/pkg/auth/UserAuthOidcHelper.go +++ b/pkg/auth/UserAuthOidcHelper.go @@ -21,7 +21,7 @@ import ( "github.com/devtron-labs/authenticator/client" authMiddleware "github.com/devtron-labs/authenticator/middleware" "github.com/devtron-labs/authenticator/oidc" - "github.com/devtron-labs/devtron/client/argocdServer/connection" + "github.com/devtron-labs/devtron/client/argocdServer" "github.com/devtron-labs/devtron/pkg/user" "go.uber.org/zap" "net/http" @@ -66,8 +66,8 @@ func NewUserAuthOidcHelperImpl(logger *zap.SugaredLogger, selfRegistrationRolesS // SanitiseRedirectUrl replaces initial "/orchestrator" from url func (impl UserAuthOidcHelperImpl) sanitiseRedirectUrl(redirectUrl string) string { - if strings.Contains(redirectUrl, connection.Dashboard) { - redirectUrl = strings.ReplaceAll(redirectUrl, connection.Orchestrator, "") + if strings.Contains(redirectUrl, argocdServer.Dashboard) { + redirectUrl = strings.ReplaceAll(redirectUrl, argocdServer.Orchestrator, "") } return redirectUrl } diff --git a/pkg/bean/app.go b/pkg/bean/app.go index 8743917f01..6fd01e4b6c 100644 --- a/pkg/bean/app.go +++ b/pkg/bean/app.go @@ -560,9 +560,6 @@ type CDPipelineConfigObject struct { ManifestStorageType string `json:"manifestStorageType"` PreDeployStage *bean.PipelineStageDto `json:"preDeployStage,omitempty"` PostDeployStage *bean.PipelineStageDto `json:"postDeployStage,omitempty"` - SourceToNewPipelineId map[int]int `json:"sourceToNewPipelineId,omitempty"` - RefPipelineId int `json:"refPipelineId,omitempty"` - ExternalCiPipelineId int `json:"externalCiPipelineId,omitempty"` CustomTagObject *CustomTagData `json:"customTag,omitempty"` CustomTagStage *repository.PipelineStageType `json:"customTagStage,omitempty"` } diff --git a/pkg/chart/ChartService.go b/pkg/chart/ChartService.go index 9be1e4872f..cf426dd8c2 100644 --- a/pkg/chart/ChartService.go +++ b/pkg/chart/ChartService.go @@ -24,7 +24,6 @@ import ( "fmt" "github.com/devtron-labs/devtron/pkg/resourceQualifiers" "github.com/devtron-labs/devtron/pkg/variables" - models2 "github.com/devtron-labs/devtron/pkg/variables/models" "github.com/devtron-labs/devtron/pkg/variables/parsers" repository5 "github.com/devtron-labs/devtron/pkg/variables/repository" @@ -101,7 +100,7 @@ type ChartService interface { FlaggerCanaryEnabled(values json.RawMessage) (bool, error) GetCustomChartInBytes(chatRefId int) ([]byte, error) GetRefChart(templateRequest TemplateRequest) (string, string, error, string, string) - ExtractVariablesAndResolveTemplate(scope resourceQualifiers.Scope, template string, templateType parsers.VariableTemplateType, isSuperAdmin bool, maskUnknownVariable bool) (string, map[string]string, error) + ExtractVariablesAndResolveTemplate(scope resourceQualifiers.Scope, template string, templateType parsers.VariableTemplateType) (string, error) } type ChartServiceImpl struct { @@ -1333,46 +1332,30 @@ const cpuPattern = `"50m" or "0.05"` const cpu = "cpu" const memory = "memory" -func (impl ChartServiceImpl) ExtractVariablesAndResolveTemplate(scope resourceQualifiers.Scope, template string, templateType parsers.VariableTemplateType, isSuperAdmin bool, maskUnknownVariable bool) (string, map[string]string, error) { - //Todo Subhashish manager layer - variableSnapshot := make(map[string]string) +func (impl ChartServiceImpl) ExtractVariablesAndResolveTemplate(scope resourceQualifiers.Scope, template string, templateType parsers.VariableTemplateType) (string, error) { + usedVariables, err := impl.variableTemplateParser.ExtractVariables(template, templateType) if err != nil { - return template, variableSnapshot, err + return "", err } if len(usedVariables) == 0 { - return template, variableSnapshot, err + return template, nil } - scopedVariables, err := impl.scopedVariableService.GetScopedVariables(scope, usedVariables, isSuperAdmin) + scopedVariables, err := impl.scopedVariableService.GetScopedVariables(scope, usedVariables, true) if err != nil { - return template, variableSnapshot, err - } - - for _, variable := range scopedVariables { - variableSnapshot[variable.VariableName] = variable.VariableValue.StringValue() - } - - if maskUnknownVariable { - for _, variable := range usedVariables { - if _, ok := variableSnapshot[variable]; !ok { - scopedVariables = append(scopedVariables, &models2.ScopedVariableData{ - VariableName: variable, - VariableValue: &models2.VariableValue{Value: models2.UndefinedValue}, - }) - } - } + return "", err } parserRequest := parsers.VariableParserRequest{Template: template, Variables: scopedVariables, TemplateType: templateType, IgnoreUnknownVariables: true} parserResponse := impl.variableTemplateParser.ParseTemplate(parserRequest) err = parserResponse.Error if err != nil { - return template, variableSnapshot, err + return "", err } resolvedTemplate := parserResponse.ResolvedTemplate - return resolvedTemplate, variableSnapshot, nil + return resolvedTemplate, nil } func (impl ChartServiceImpl) DeploymentTemplateValidate(ctx context.Context, template interface{}, chartRefId int, scope resourceQualifiers.Scope) (bool, error) { @@ -1392,7 +1375,7 @@ func (impl ChartServiceImpl) DeploymentTemplateValidate(ctx context.Context, tem //} templateBytes := template.(json.RawMessage) - templatejsonstring, _, err := impl.ExtractVariablesAndResolveTemplate(scope, string(templateBytes), parsers.JsonVariableTemplate, true, false) + templatejsonstring, err := impl.ExtractVariablesAndResolveTemplate(scope, string(templateBytes), parsers.JsonVariableTemplate) if err != nil { return false, err } diff --git a/pkg/cluster/ClusterCronService.go b/pkg/cluster/ClusterCronService.go index 5af2240960..d35d91191a 100644 --- a/pkg/cluster/ClusterCronService.go +++ b/pkg/cluster/ClusterCronService.go @@ -46,7 +46,7 @@ func (impl *ClusterCronServiceImpl) GetAndUpdateClusterConnectionStatus() { defer impl.logger.Debug("stopped cluster connection status fetch thread") //getting all clusters - clusters, err := impl.clusterService.FindAllExceptVirtual() + clusters, err := impl.clusterService.FindAll() if err != nil { impl.logger.Errorw("error in getting all clusters", "err", err) return diff --git a/pkg/cluster/ClusterService.go b/pkg/cluster/ClusterService.go index b7aab98214..990772950d 100644 --- a/pkg/cluster/ClusterService.go +++ b/pkg/cluster/ClusterService.go @@ -159,7 +159,6 @@ type ClusterService interface { FindOne(clusterName string) (*ClusterBean, error) FindOneActive(clusterName string) (*ClusterBean, error) FindAll() ([]*ClusterBean, error) - FindAllExceptVirtual() ([]*ClusterBean, error) FindAllWithoutConfig() ([]*ClusterBean, error) FindAllActive() ([]ClusterBean, error) DeleteFromDb(bean *ClusterBean, userId int32) error @@ -357,19 +356,6 @@ func (impl *ClusterServiceImpl) FindAll() ([]*ClusterBean, error) { return beans, nil } -func (impl *ClusterServiceImpl) FindAllExceptVirtual() ([]*ClusterBean, error) { - models, err := impl.clusterRepository.FindAllActiveExceptVirtual() - if err != nil { - return nil, err - } - var beans []*ClusterBean - for _, model := range models { - bean := GetClusterBean(model) - beans = append(beans, &bean) - } - return beans, nil -} - func (impl *ClusterServiceImpl) FindAllActive() ([]ClusterBean, error) { models, err := impl.clusterRepository.FindAllActive() if err != nil { diff --git a/pkg/cluster/ClusterServiceExtended.go b/pkg/cluster/ClusterServiceExtended.go index 1564867bce..8b62eb7681 100644 --- a/pkg/cluster/ClusterServiceExtended.go +++ b/pkg/cluster/ClusterServiceExtended.go @@ -71,7 +71,11 @@ func (impl *ClusterServiceImplExtended) FindAllWithoutConfig() ([]*ClusterBean, return beans, nil } -func (impl *ClusterServiceImplExtended) GetClusterFullModeDTO(beans []*ClusterBean) ([]*ClusterBean, error) { +func (impl *ClusterServiceImplExtended) FindAll() ([]*ClusterBean, error) { + beans, err := impl.ClusterServiceImpl.FindAll() + if err != nil { + return nil, err + } //devtron full mode logic var clusterIds []int for _, cluster := range beans { @@ -139,22 +143,6 @@ func (impl *ClusterServiceImplExtended) GetClusterFullModeDTO(beans []*ClusterBe return beans, nil } -func (impl *ClusterServiceImplExtended) FindAll() ([]*ClusterBean, error) { - beans, err := impl.ClusterServiceImpl.FindAll() - if err != nil { - return nil, err - } - return impl.GetClusterFullModeDTO(beans) -} - -func (impl *ClusterServiceImplExtended) FindAllExceptVirtual() ([]*ClusterBean, error) { - beans, err := impl.ClusterServiceImpl.FindAll() - if err != nil { - return nil, err - } - return impl.GetClusterFullModeDTO(beans) -} - func (impl *ClusterServiceImplExtended) Update(ctx context.Context, bean *ClusterBean, userId int32) (*ClusterBean, error) { isGitOpsConfigured, err1 := impl.gitOpsRepository.IsGitOpsConfigured() if err1 != nil { diff --git a/pkg/cluster/repository/ClusterRepository.go b/pkg/cluster/repository/ClusterRepository.go index 5fe9941e93..fa57ff0ba8 100644 --- a/pkg/cluster/repository/ClusterRepository.go +++ b/pkg/cluster/repository/ClusterRepository.go @@ -50,7 +50,6 @@ type ClusterRepository interface { FindOneActive(clusterName string) (*Cluster, error) FindAll() ([]Cluster, error) FindAllActive() ([]Cluster, error) - FindAllActiveExceptVirtual() ([]Cluster, error) FindById(id int) (*Cluster, error) FindByIds(id []int) ([]Cluster, error) Update(model *Cluster) error @@ -128,16 +127,6 @@ func (impl ClusterRepositoryImpl) FindAllActive() ([]Cluster, error) { return clusters, err } -func (impl ClusterRepositoryImpl) FindAllActiveExceptVirtual() ([]Cluster, error) { - var clusters []Cluster - err := impl.dbConnection. - Model(&clusters). - Where("active=?", true). - Where("is_virtual_cluster=?", false). - Select() - return clusters, err -} - func (impl ClusterRepositoryImpl) FindById(id int) (*Cluster, error) { cluster := &Cluster{} err := impl.dbConnection. diff --git a/pkg/generateManifest/DeployementTemplateService.go b/pkg/generateManifest/DeployementTemplateService.go index 681bee30d4..df5f0d5f5c 100644 --- a/pkg/generateManifest/DeployementTemplateService.go +++ b/pkg/generateManifest/DeployementTemplateService.go @@ -17,7 +17,6 @@ import ( "github.com/devtron-labs/devtron/pkg/pipeline/history" "github.com/devtron-labs/devtron/pkg/resourceQualifiers" "github.com/devtron-labs/devtron/pkg/variables/parsers" - util2 "github.com/devtron-labs/devtron/util" "github.com/go-pg/pg" "go.uber.org/zap" "os" @@ -57,9 +56,7 @@ var ReleaseIdentifier = &client.ReleaseIdentifier{ } type DeploymentTemplateResponse struct { - Data string `json:"data"` - ResolvedData string `json:"resolvedData"` - VariableSnapshot map[string]string `json:"variableSnapshot"` + Data string `json:"data"` } type DeploymentTemplateService interface { @@ -180,25 +177,25 @@ func (impl DeploymentTemplateServiceImpl) FetchDeploymentsWithChartRefs(appId in func (impl DeploymentTemplateServiceImpl) GetDeploymentTemplate(ctx context.Context, request DeploymentTemplateRequest) (DeploymentTemplateResponse, error) { var result DeploymentTemplateResponse - var values, resolvedValue string + var values string var err error - var variableSnapshot map[string]string if request.Values != "" { values = request.Values - resolvedValue, variableSnapshot, err = impl.resolveTemplateVariables(ctx, request.Values, request) - if err != nil { - return result, err + if request.RequestDataMode == Manifest { + values, err = impl.resolveTemplateVariables(request.Values, request) + if err != nil { + return result, err + } } } else { switch request.Type { case repository.DefaultVersions: _, values, err = impl.chartService.GetAppOverrideForDefaultTemplate(request.ChartRefId) - resolvedValue = values case repository.PublishedOnEnvironments: - values, resolvedValue, variableSnapshot, err = impl.fetchResolvedTemplateForPublishedEnvs(ctx, request) + values, err = impl.fetchResolvedTemplateForPublishedEnvs(request) case repository.DeployedOnSelfEnvironment, repository.DeployedOnOtherEnvironment: - values, resolvedValue, variableSnapshot, err = impl.fetchTemplateForDeployedEnv(ctx, request) + values, err = impl.fetchTemplateForDeployedEnv(request) } if err != nil { impl.Logger.Errorw("error in getting values", "err", err) @@ -208,12 +205,10 @@ func (impl DeploymentTemplateServiceImpl) GetDeploymentTemplate(ctx context.Cont if request.RequestDataMode == Values { result.Data = values - result.ResolvedData = resolvedValue - result.VariableSnapshot = variableSnapshot return result, nil } - manifest, err := impl.GenerateManifest(ctx, request.ChartRefId, resolvedValue) + manifest, err := impl.GenerateManifest(ctx, request.ChartRefId, values) if err != nil { return result, err } @@ -221,7 +216,7 @@ func (impl DeploymentTemplateServiceImpl) GetDeploymentTemplate(ctx context.Cont return result, nil } -func (impl DeploymentTemplateServiceImpl) fetchResolvedTemplateForPublishedEnvs(ctx context.Context, request DeploymentTemplateRequest) (string, string, map[string]string, error) { +func (impl DeploymentTemplateServiceImpl) fetchResolvedTemplateForPublishedEnvs(request DeploymentTemplateRequest) (string, error) { var values string override, err := impl.propertiesConfigService.GetEnvironmentProperties(request.AppId, request.EnvId, request.ChartRefId) if err == nil && override.GlobalConfig != nil { @@ -232,42 +227,42 @@ func (impl DeploymentTemplateServiceImpl) fetchResolvedTemplateForPublishedEnvs( } } else { impl.Logger.Errorw("error in getting overridden values", "err", err) - return "", "", nil, err + return "", err } - resolvedTemplate, variableSnapshot, err := impl.resolveTemplateVariables(ctx, values, request) - if err != nil { - return values, values, variableSnapshot, err + if request.RequestDataMode == Manifest { + resolvedTemplate, err := impl.resolveTemplateVariables(values, request) + if err != nil { + return values, err + } + values = resolvedTemplate } - return values, resolvedTemplate, variableSnapshot, nil + return values, nil } -func (impl DeploymentTemplateServiceImpl) fetchTemplateForDeployedEnv(ctx context.Context, request DeploymentTemplateRequest) (string, string, map[string]string, error) { - historyObject, err := impl.deploymentTemplateHistoryService.GetHistoryForDeployedTemplateById(ctx, request.DeploymentTemplateHistoryId, request.PipelineId) +func (impl DeploymentTemplateServiceImpl) fetchTemplateForDeployedEnv(request DeploymentTemplateRequest) (string, error) { + history, err := impl.deploymentTemplateHistoryService.GetHistoryForDeployedTemplateById(request.DeploymentTemplateHistoryId, request.PipelineId) if err != nil { impl.Logger.Errorw("error in getting deployment template history", "err", err, "id", request.DeploymentTemplateHistoryId, "pipelineId", request.PipelineId) - return "", "", nil, err + return "", err } - //todo Subhashish solve variable leak - return historyObject.CodeEditorValue.Value, historyObject.ResolvedTemplateData, historyObject.VariableSnapshot, nil + if request.RequestDataMode == Values { + return history.CodeEditorValue.Value, nil + } + return history.ResolvedTemplate, nil } -func (impl DeploymentTemplateServiceImpl) resolveTemplateVariables(ctx context.Context, values string, request DeploymentTemplateRequest) (string, map[string]string, error) { +func (impl DeploymentTemplateServiceImpl) resolveTemplateVariables(values string, request DeploymentTemplateRequest) (string, error) { - isSuperAdmin, err := util2.GetIsSuperAdminFromContext(ctx) - if err != nil { - return values, nil, err - } scope, err := impl.extractScopeData(request) if err != nil { - return values, nil, err + return "", err } - maskUnknownVariableForHelmGenerate := request.RequestDataMode == Manifest - resolvedTemplate, variableSnapshot, err := impl.chartService.ExtractVariablesAndResolveTemplate(scope, values, parsers.StringVariableTemplate, isSuperAdmin, maskUnknownVariableForHelmGenerate) + resolvedTemplate, err := impl.chartService.ExtractVariablesAndResolveTemplate(scope, values, parsers.StringVariableTemplate) if err != nil { - return values, variableSnapshot, err + return "", err } - return resolvedTemplate, variableSnapshot, nil + return resolvedTemplate, nil } func (impl DeploymentTemplateServiceImpl) extractScopeData(request DeploymentTemplateRequest) (resourceQualifiers.Scope, error) { diff --git a/pkg/k8s/K8sCommonService.go b/pkg/k8s/K8sCommonService.go index f8c7e3f372..c38519b952 100644 --- a/pkg/k8s/K8sCommonService.go +++ b/pkg/k8s/K8sCommonService.go @@ -345,10 +345,9 @@ func (impl *K8sCommonServiceImpl) GetCoreClientByClusterId(clusterId int) (*kube } func (impl K8sCommonServiceImpl) PortNumberExtraction(resp []BatchResourceResponse, resourceTree map[string]interface{}) map[string]interface{} { - servicePortMapping := make(map[string]interface{}) - endpointPortMapping := make(map[string]interface{}) - endpointSlicePortMapping := make(map[string]interface{}) - + portsService := make([]int64, 0) + portsEndpoint := make([]int64, 0) + portEndpointSlice := make([]int64, 0) for _, portHolder := range resp { if portHolder.ManifestResponse == nil { continue @@ -358,26 +357,6 @@ func (impl K8sCommonServiceImpl) PortNumberExtraction(resp []BatchResourceRespon impl.logger.Warnw("kind not found in resource tree, unable to extract port no") continue } - metadataResp, ok := portHolder.ManifestResponse.Manifest.Object[k8sCommonBean.K8sClusterResourceMetadataKey] - if !ok { - impl.logger.Warnw("metadata not found in resource tree, unable to extract port no") - continue - } - metadata, ok := metadataResp.(map[string]interface{}) - if !ok { - impl.logger.Warnw("metadata not found in resource tree, unable to extract port no") - continue - } - serviceNameResp, ok := metadata[k8sCommonBean.K8sClusterResourceMetadataNameKey] - if !ok { - impl.logger.Warnw("service name not found in resource tree, unable to extract port no") - continue - } - serviceName, ok := serviceNameResp.(string) - if !ok { - impl.logger.Warnw("service name not found in resource tree, unable to extract port no") - continue - } if kind == k8sCommonBean.ServiceKind { specField, ok := portHolder.ManifestResponse.Manifest.Object[k8sCommonBean.Spec] if !ok { @@ -389,7 +368,6 @@ func (impl K8sCommonServiceImpl) PortNumberExtraction(resp []BatchResourceRespon impl.logger.Warnw("spec not found in resource tree, unable to extract port no") continue } - if spec != nil { ports, ok := spec[k8sCommonBean.Ports] if !ok { @@ -401,7 +379,6 @@ func (impl K8sCommonServiceImpl) PortNumberExtraction(resp []BatchResourceRespon impl.logger.Warnw("portList not found in resource tree, unable to extract port no") continue } - servicePorts := make([]int64, 0) for _, portItem := range portList { portItems, ok := portItem.(map[string]interface{}) if !ok { @@ -420,11 +397,10 @@ func (impl K8sCommonServiceImpl) PortNumberExtraction(resp []BatchResourceRespon continue } if portNumber != 0 { - servicePorts = append(servicePorts, portNumber) + portsService = append(portsService, portNumber) } } } - servicePortMapping[serviceName] = servicePorts } else { impl.logger.Warnw("spec doest not contain data", "spec", spec) continue @@ -459,7 +435,6 @@ func (impl K8sCommonServiceImpl) PortNumberExtraction(resp []BatchResourceRespon impl.logger.Warnw("portsIfs not found in resource tree, unable to extract port no") continue } - endpointPorts := make([]int64, 0) for _, portsIf := range portsIfs { portsIfObj, ok := portsIf.(map[string]interface{}) if !ok { @@ -472,10 +447,9 @@ func (impl K8sCommonServiceImpl) PortNumberExtraction(resp []BatchResourceRespon impl.logger.Warnw("port not found in resource tree, unable to extract port no") continue } - endpointPorts = append(endpointPorts, port) + portsEndpoint = append(portsEndpoint, port) } } - endpointPortMapping[serviceName] = endpointPorts } } } @@ -492,7 +466,6 @@ func (impl K8sCommonServiceImpl) PortNumberExtraction(resp []BatchResourceRespon impl.logger.Warnw("endPointsSlicePorts not found in resource tree endpoint, unable to extract port no") continue } - endpointSlicePorts := make([]int64, 0) for _, val := range endPointsSlicePorts { portNumbers, ok := val.(map[string]interface{})[k8sCommonBean.Port] if !ok { @@ -505,10 +478,9 @@ func (impl K8sCommonServiceImpl) PortNumberExtraction(resp []BatchResourceRespon continue } if portNumber != 0 { - endpointSlicePorts = append(endpointSlicePorts, portNumber) + portEndpointSlice = append(portEndpointSlice, portNumber) } } - endpointSlicePortMapping[serviceName] = endpointSlicePorts } } } @@ -524,25 +496,15 @@ func (impl K8sCommonServiceImpl) PortNumberExtraction(resp []BatchResourceRespon impl.logger.Warnw("value not found in resourceTreeVal, unable to extract port no") continue } - serviceNameRes, ok := value[k8sCommonBean.K8sClusterResourceMetadataNameKey] - if !ok { - impl.logger.Warnw("service name not found in resourceTreeVal, unable to extract port no") - continue - } - serviceName, ok := serviceNameRes.(string) - if !ok { - impl.logger.Warnw("service name not found in resourceTreeVal, unable to extract port no") - continue - } for key, _type := range value { if key == k8sCommonBean.Kind && _type == k8sCommonBean.EndpointsKind { - value[k8sCommonBean.Port] = endpointPortMapping[serviceName] + value[k8sCommonBean.Port] = portsEndpoint } if key == k8sCommonBean.Kind && _type == k8sCommonBean.ServiceKind { - value[k8sCommonBean.Port] = servicePortMapping[serviceName] + value[k8sCommonBean.Port] = portsService } if key == k8sCommonBean.Kind && _type == k8sCommonBean.EndPointsSlice { - value[k8sCommonBean.Port] = endpointSlicePortMapping[serviceName] + value[k8sCommonBean.Port] = portEndpointSlice } } } diff --git a/pkg/k8s/capacity/bean/bean.go b/pkg/k8s/capacity/bean/bean.go index 95e35c8644..138aa69953 100644 --- a/pkg/k8s/capacity/bean/bean.go +++ b/pkg/k8s/capacity/bean/bean.go @@ -66,6 +66,7 @@ type ClusterCapacityDetail struct { ServerVersion string `json:"serverVersion,omitempty"` Cpu *ResourceDetailObject `json:"cpu"` Memory *ResourceDetailObject `json:"memory"` + IsVirtualCluster bool `json:"isVirtualCluster"` } type NodeCapacityDetail struct { diff --git a/pkg/k8s/capacity/k8sCapacityService.go b/pkg/k8s/capacity/k8sCapacityService.go index 97da7376b0..1bef44b9dc 100644 --- a/pkg/k8s/capacity/k8sCapacityService.go +++ b/pkg/k8s/capacity/k8sCapacityService.go @@ -67,7 +67,9 @@ func (impl *K8sCapacityServiceImpl) GetClusterCapacityDetailList(ctx context.Con for _, cluster := range clusters { clusterCapacityDetail := &bean.ClusterCapacityDetail{} var err error - if len(cluster.ErrorInConnecting) > 0 { + if cluster.IsVirtualCluster { + clusterCapacityDetail.IsVirtualCluster = cluster.IsVirtualCluster + } else if len(cluster.ErrorInConnecting) > 0 { clusterCapacityDetail.ErrorInConnection = cluster.ErrorInConnecting } else { clusterCapacityDetail, err = impl.GetClusterCapacityDetail(ctx, cluster, true) diff --git a/pkg/pipeline/BuildPipelineConfigService.go b/pkg/pipeline/BuildPipelineConfigService.go index 4219f677ef..b2c968a4d6 100644 --- a/pkg/pipeline/BuildPipelineConfigService.go +++ b/pkg/pipeline/BuildPipelineConfigService.go @@ -26,6 +26,7 @@ import ( dockerRegistryRepository "github.com/devtron-labs/devtron/internal/sql/repository/dockerRegistry" "github.com/devtron-labs/devtron/internal/sql/repository/pipelineConfig" "github.com/devtron-labs/devtron/internal/util" + "github.com/devtron-labs/devtron/pkg" "github.com/devtron-labs/devtron/pkg/attributes" "github.com/devtron-labs/devtron/pkg/bean" bean3 "github.com/devtron-labs/devtron/pkg/pipeline/bean" @@ -125,7 +126,7 @@ type CiPipelineConfigServiceImpl struct { ciPipelineMaterialRepository pipelineConfig.CiPipelineMaterialRepository resourceGroupService resourceGroup2.ResourceGroupService enforcerUtil rbac.EnforcerUtil - customTagService CustomTagService + customTagService pkg.CustomTagService } func NewCiPipelineConfigServiceImpl(logger *zap.SugaredLogger, @@ -147,7 +148,7 @@ func NewCiPipelineConfigServiceImpl(logger *zap.SugaredLogger, enforcerUtil rbac.EnforcerUtil, ciWorkflowRepository pipelineConfig.CiWorkflowRepository, resourceGroupService resourceGroup2.ResourceGroupService, - customTagService CustomTagService) *CiPipelineConfigServiceImpl { + customTagService pkg.CustomTagService) *CiPipelineConfigServiceImpl { securityConfig := &SecurityConfig{} err := env.Parse(securityConfig) @@ -620,7 +621,7 @@ func (impl *CiPipelineConfigServiceImpl) GetCiPipeline(appId int) (ciConfig *bea impl.logger.Errorw("error in fetching ciEnvMapping", "ciPipelineId ", pipeline.Id, "err", err) return nil, err } - customTag, err := impl.customTagService.GetActiveCustomTagByEntityKeyAndValue(bean3.EntityTypeCiPipelineId, strconv.Itoa(pipeline.Id)) + customTag, err := impl.customTagService.GetActiveCustomTagByEntityKeyAndValue(pkg.EntityTypeCiPipelineId, strconv.Itoa(pipeline.Id)) if err != nil && err != pg.ErrNoRows { return nil, err } @@ -757,16 +758,6 @@ func (impl *CiPipelineConfigServiceImpl) GetCiPipelineById(pipelineId int) (ciPi IsDockerConfigOverridden: pipeline.IsDockerConfigOverridden, PipelineType: bean.PipelineType(pipeline.PipelineType), } - customTag, err := impl.customTagService.GetActiveCustomTagByEntityKeyAndValue(bean3.EntityTypeCiPipelineId, strconv.Itoa(pipeline.Id)) - if err != nil && err != pg.ErrNoRows { - return nil, err - } - if customTag.Id != 0 { - ciPipeline.CustomTagObject = &bean.CustomTagData{ - TagPattern: customTag.TagPattern, - CounterX: customTag.AutoIncreasingNumber, - } - } ciEnvMapping, err := impl.ciPipelineRepository.FindCiEnvMappingByCiPipelineId(pipelineId) if err != nil && err != pg.ErrNoRows { impl.logger.Errorw("error in fetching ci env mapping", "pipelineId", pipelineId, "err", err) diff --git a/pkg/pipeline/CdHandler.go b/pkg/pipeline/CdHandler.go index ad22659614..b2dbb3913b 100644 --- a/pkg/pipeline/CdHandler.go +++ b/pkg/pipeline/CdHandler.go @@ -52,7 +52,6 @@ import ( "github.com/go-pg/pg" "go.opentelemetry.io/otel" "go.uber.org/zap" - "k8s.io/client-go/rest" "os" "path/filepath" "strconv" @@ -601,13 +600,10 @@ func (impl *CdHandlerImpl) CancelStage(workflowRunnerId int, userId int32) (int, } else if workflowRunner.WorkflowType == POST { isExtCluster = pipeline.RunPostStageInEnv } - var restConfig *rest.Config - if isExtCluster { - restConfig, err = impl.k8sUtil.GetRestConfigByCluster(clusterConfig) - if err != nil { - impl.Logger.Errorw("error in getting rest config by cluster id", "err", err) - return 0, err - } + restConfig, err := impl.k8sUtil.GetRestConfigByCluster(clusterConfig) + if err != nil { + impl.Logger.Errorw("error in getting rest config by cluster id", "err", err) + return 0, err } // Terminate workflow err = impl.workflowService.TerminateWorkflow(workflowRunner.ExecutorType, workflowRunner.Name, workflowRunner.Namespace, restConfig, isExtCluster, nil) diff --git a/pkg/pipeline/CiCdConfig.go b/pkg/pipeline/CiCdConfig.go index 6cf984bef9..af9d54d35c 100644 --- a/pkg/pipeline/CiCdConfig.go +++ b/pkg/pipeline/CiCdConfig.go @@ -1,14 +1,12 @@ package pipeline import ( - "encoding/json" "flag" "fmt" "github.com/caarlos0/env" blob_storage "github.com/devtron-labs/common-lib/blob-storage" "github.com/devtron-labs/devtron/internal/sql/repository/pipelineConfig" "github.com/devtron-labs/devtron/pkg/pipeline/bean" - v12 "k8s.io/api/core/v1" "k8s.io/client-go/rest" "k8s.io/client-go/tools/clientcmd" "os/user" @@ -422,45 +420,3 @@ func (impl *CiCdConfig) WorkflowRetriesEnabled() bool { return false } } - -func (impl *CiCdConfig) GetWorkflowVolumeAndVolumeMounts() ([]v12.Volume, []v12.VolumeMount, error) { - var volumes []v12.Volume - var volumeMounts []v12.VolumeMount - volumeMountsForCiJson := impl.VolumeMountsForCiJson - if len(volumeMountsForCiJson) > 0 { - var volumeMountsForCi []CiVolumeMount - // Unmarshal or Decode the JSON to the interface. - err := json.Unmarshal([]byte(volumeMountsForCiJson), &volumeMountsForCi) - if err != nil { - return nil, nil, err - } - - for _, volumeMountForCi := range volumeMountsForCi { - volumes = append(volumes, getWorkflowVolume(volumeMountForCi)) - volumeMounts = append(volumeMounts, getWorkflowVolumeMounts(volumeMountForCi)) - } - } - return volumes, volumeMounts, nil -} - -func getWorkflowVolume(volumeMountForCi CiVolumeMount) v12.Volume { - hostPathDirectoryOrCreate := v12.HostPathDirectoryOrCreate - - return v12.Volume{ - Name: volumeMountForCi.Name, - VolumeSource: v12.VolumeSource{ - HostPath: &v12.HostPathVolumeSource{ - Path: volumeMountForCi.HostMountPath, - Type: &hostPathDirectoryOrCreate, - }, - }, - } - -} - -func getWorkflowVolumeMounts(volumeMountForCi CiVolumeMount) v12.VolumeMount { - return v12.VolumeMount{ - Name: volumeMountForCi.Name, - MountPath: volumeMountForCi.ContainerMountPath, - } -} diff --git a/pkg/pipeline/CiCdPipelineOrchestrator.go b/pkg/pipeline/CiCdPipelineOrchestrator.go index f7500a83d3..e0936e50c0 100644 --- a/pkg/pipeline/CiCdPipelineOrchestrator.go +++ b/pkg/pipeline/CiCdPipelineOrchestrator.go @@ -32,6 +32,7 @@ import ( app2 "github.com/devtron-labs/devtron/internal/sql/repository/app" dockerRegistryRepository "github.com/devtron-labs/devtron/internal/sql/repository/dockerRegistry" "github.com/devtron-labs/devtron/internal/sql/repository/helper" + "github.com/devtron-labs/devtron/pkg" repository2 "github.com/devtron-labs/devtron/pkg/cluster/repository" "github.com/devtron-labs/devtron/pkg/genericNotes" repository3 "github.com/devtron-labs/devtron/pkg/genericNotes/repository" @@ -114,7 +115,7 @@ type CiCdPipelineOrchestratorImpl struct { dockerArtifactStoreRepository dockerRegistryRepository.DockerArtifactStoreRepository configMapService ConfigMapService genericNoteService genericNotes.GenericNoteService - customTagService CustomTagService + customTagService pkg.CustomTagService } func NewCiCdPipelineOrchestrator( @@ -140,7 +141,7 @@ func NewCiCdPipelineOrchestrator( ciTemplateService CiTemplateService, dockerArtifactStoreRepository dockerRegistryRepository.DockerArtifactStoreRepository, configMapService ConfigMapService, - customTagService CustomTagService, + customTagService pkg.CustomTagService, genericNoteService genericNotes.GenericNoteService) *CiCdPipelineOrchestratorImpl { return &CiCdPipelineOrchestratorImpl{ appRepository: pipelineGroupRepository, @@ -334,7 +335,7 @@ func (impl CiCdPipelineOrchestratorImpl) PatchMaterialValue(createRequest *bean. //Otherwise deleteIfExists if createRequest.CustomTagObject != nil { customTag := bean4.CustomTag{ - EntityKey: bean2.EntityTypeCiPipelineId, + EntityKey: pkg.EntityTypeCiPipelineId, EntityValue: strconv.Itoa(ciPipelineObject.Id), TagPattern: createRequest.CustomTagObject.TagPattern, AutoIncreasingNumber: createRequest.CustomTagObject.CounterX, @@ -345,7 +346,7 @@ func (impl CiCdPipelineOrchestratorImpl) PatchMaterialValue(createRequest *bean. } } else { customTag := bean4.CustomTag{ - EntityKey: bean2.EntityTypeCiPipelineId, + EntityKey: pkg.EntityTypeCiPipelineId, EntityValue: strconv.Itoa(ciPipelineObject.Id), } err := impl.customTagService.DeleteCustomTagIfExists(customTag) @@ -770,7 +771,7 @@ func (impl CiCdPipelineOrchestratorImpl) CreateCiConf(createRequest *bean.CiConf //If customTagObejct has been passed, save it if ciPipeline.CustomTagObject != nil { customTag := &bean4.CustomTag{ - EntityKey: bean2.EntityTypeCiPipelineId, + EntityKey: pkg.EntityTypeCiPipelineId, EntityValue: strconv.Itoa(ciPipeline.Id), TagPattern: ciPipeline.CustomTagObject.TagPattern, AutoIncreasingNumber: ciPipeline.CustomTagObject.CounterX, @@ -1064,23 +1065,18 @@ func (impl CiCdPipelineOrchestratorImpl) CreateApp(createRequest *bean.CreateApp } // create labels and tags with app if app.Active && len(createRequest.AppLabels) > 0 { - appLabelMap := make(map[string]bool) for _, label := range createRequest.AppLabels { - uniqueLabelExists := fmt.Sprintf("%s:%s:%t", label.Key, label.Value, label.Propagate) - if _, ok := appLabelMap[uniqueLabelExists]; !ok { - appLabelMap[uniqueLabelExists] = true - request := &bean.AppLabelDto{ - AppId: app.Id, - Key: label.Key, - Value: label.Value, - Propagate: label.Propagate, - UserId: createRequest.UserId, - } - _, err := impl.appLabelsService.Create(request, tx) - if err != nil { - impl.logger.Errorw("error on creating labels for app id ", "err", err, "appId", app.Id) - return nil, err - } + request := &bean.AppLabelDto{ + AppId: app.Id, + Key: label.Key, + Value: label.Value, + Propagate: label.Propagate, + UserId: createRequest.UserId, + } + _, err := impl.appLabelsService.Create(request, tx) + if err != nil { + impl.logger.Errorw("error on creating labels for app id ", "err", err, "appId", app.Id) + return nil, err } } } diff --git a/pkg/pipeline/CiHandler.go b/pkg/pipeline/CiHandler.go index 385036df79..e6a697f34c 100644 --- a/pkg/pipeline/CiHandler.go +++ b/pkg/pipeline/CiHandler.go @@ -30,6 +30,7 @@ import ( "github.com/devtron-labs/devtron/client/gitSensor" "github.com/devtron-labs/devtron/internal/sql/repository/appWorkflow" repository2 "github.com/devtron-labs/devtron/internal/sql/repository/imageTagging" + "github.com/devtron-labs/devtron/pkg" "github.com/devtron-labs/devtron/pkg/cluster" repository3 "github.com/devtron-labs/devtron/pkg/cluster/repository" k8s2 "github.com/devtron-labs/devtron/pkg/k8s" @@ -106,13 +107,13 @@ type CiHandlerImpl struct { resourceGroupService resourceGroup.ResourceGroupService envRepository repository3.EnvironmentRepository imageTaggingService ImageTaggingService - customTagService CustomTagService + customTagService pkg.CustomTagService appWorkflowRepository appWorkflow.AppWorkflowRepository config *CiConfig k8sCommonService k8s2.K8sCommonService } -func NewCiHandlerImpl(Logger *zap.SugaredLogger, ciService CiService, ciPipelineMaterialRepository pipelineConfig.CiPipelineMaterialRepository, gitSensorClient gitSensor.Client, ciWorkflowRepository pipelineConfig.CiWorkflowRepository, workflowService WorkflowService, ciLogService CiLogService, ciArtifactRepository repository.CiArtifactRepository, userService user.UserService, eventClient client.EventClient, eventFactory client.EventFactory, ciPipelineRepository pipelineConfig.CiPipelineRepository, appListingRepository repository.AppListingRepository, K8sUtil *k8s.K8sUtil, cdPipelineRepository pipelineConfig.PipelineRepository, enforcerUtil rbac.EnforcerUtil, resourceGroupService resourceGroup.ResourceGroupService, envRepository repository3.EnvironmentRepository, imageTaggingService ImageTaggingService, appWorkflowRepository appWorkflow.AppWorkflowRepository, customTagService CustomTagService, k8sCommonService k8s2.K8sCommonService) *CiHandlerImpl { +func NewCiHandlerImpl(Logger *zap.SugaredLogger, ciService CiService, ciPipelineMaterialRepository pipelineConfig.CiPipelineMaterialRepository, gitSensorClient gitSensor.Client, ciWorkflowRepository pipelineConfig.CiWorkflowRepository, workflowService WorkflowService, ciLogService CiLogService, ciArtifactRepository repository.CiArtifactRepository, userService user.UserService, eventClient client.EventClient, eventFactory client.EventFactory, ciPipelineRepository pipelineConfig.CiPipelineRepository, appListingRepository repository.AppListingRepository, K8sUtil *k8s.K8sUtil, cdPipelineRepository pipelineConfig.PipelineRepository, enforcerUtil rbac.EnforcerUtil, resourceGroupService resourceGroup.ResourceGroupService, envRepository repository3.EnvironmentRepository, imageTaggingService ImageTaggingService, appWorkflowRepository appWorkflow.AppWorkflowRepository, customTagService pkg.CustomTagService, k8sCommonService k8s2.K8sCommonService) *CiHandlerImpl { cih := &CiHandlerImpl{ Logger: Logger, ciService: ciService, @@ -620,8 +621,8 @@ func (impl *CiHandlerImpl) GetBuildHistory(pipelineId int, appId int, offset int EnvironmentName: w.EnvironmentName, ReferenceWorkflowId: w.RefCiWorkflowId, } - if w.Message == bean3.ImageTagUnavailableMessage { - customTag, err := impl.customTagService.GetCustomTagByEntityKeyAndValue(bean3.EntityTypeCiPipelineId, strconv.Itoa(w.CiPipelineId)) + if w.Message == pkg.ImageTagUnavailableMessage { + customTag, err := impl.customTagService.GetCustomTagByEntityKeyAndValue(pkg.EntityTypeCiPipelineId, strconv.Itoa(w.CiPipelineId)) if err != nil && err != pg.ErrNoRows { //err == pg.ErrNoRows should never happen return nil, err @@ -634,7 +635,7 @@ func (impl *CiHandlerImpl) GetBuildHistory(pipelineId int, appId int, offset int wfResponse.CustomTag = &bean2.CustomTagErrorResponse{ TagPattern: customTag.TagPattern, AutoIncreasingNumber: customTag.AutoIncreasingNumber, - Message: bean3.ImageTagUnavailableMessage, + Message: pkg.ImageTagUnavailableMessage, } } if imageTagsDataMap[w.CiArtifactId] != nil { diff --git a/pkg/pipeline/CiService.go b/pkg/pipeline/CiService.go index a8e58881ca..15b39010a0 100644 --- a/pkg/pipeline/CiService.go +++ b/pkg/pipeline/CiService.go @@ -24,6 +24,7 @@ import ( appRepository "github.com/devtron-labs/devtron/internal/sql/repository/app" repository3 "github.com/devtron-labs/devtron/internal/sql/repository/dockerRegistry" "github.com/devtron-labs/devtron/internal/sql/repository/helper" + "github.com/devtron-labs/devtron/pkg" "github.com/devtron-labs/devtron/pkg/app" repository1 "github.com/devtron-labs/devtron/pkg/cluster/repository" bean2 "github.com/devtron-labs/devtron/pkg/pipeline/bean" @@ -72,7 +73,7 @@ type CiServiceImpl struct { appCrudOperationService app.AppCrudOperationService envRepository repository1.EnvironmentRepository appRepository appRepository.AppRepository - customTagService CustomTagService + customTagService pkg.CustomTagService variableSnapshotHistoryService variables.VariableSnapshotHistoryService config *CiConfig } @@ -86,7 +87,7 @@ func NewCiServiceImpl(Logger *zap.SugaredLogger, workflowService WorkflowService userService user.UserService, ciTemplateService CiTemplateService, appCrudOperationService app.AppCrudOperationService, envRepository repository1.EnvironmentRepository, appRepository appRepository.AppRepository, variableSnapshotHistoryService variables.VariableSnapshotHistoryService, - customTagService CustomTagService, + customTagService pkg.CustomTagService, ) *CiServiceImpl { cis := &CiServiceImpl{ Logger: Logger, @@ -455,16 +456,16 @@ func (impl *CiServiceImpl) buildWfRequestForCiPipeline(pipeline *pipelineConfig. } var dockerImageTag string - customTag, err := impl.customTagService.GetActiveCustomTagByEntityKeyAndValue(bean2.EntityTypeCiPipelineId, strconv.Itoa(pipeline.Id)) + customTag, err := impl.customTagService.GetActiveCustomTagByEntityKeyAndValue(pkg.EntityTypeCiPipelineId, strconv.Itoa(pipeline.Id)) if err != nil && err != pg.ErrNoRows { return nil, err } if customTag.Id != 0 { - imagePathReservation, err := impl.customTagService.GenerateImagePath(bean2.EntityTypeCiPipelineId, strconv.Itoa(pipeline.Id), pipeline.CiTemplate.DockerRegistry.RegistryURL, pipeline.CiTemplate.DockerRepository) + imagePathReservation, err := impl.customTagService.GenerateImagePath(pkg.EntityTypeCiPipelineId, strconv.Itoa(pipeline.Id), pipeline.CiTemplate.DockerRegistry.RegistryURL, pipeline.CiTemplate.DockerRepository) if err != nil { - if errors.Is(err, bean2.ErrImagePathInUse) { + if errors.Is(err, pkg.ErrImagePathInUse) { savedWf.Status = pipelineConfig.WorkflowFailed - savedWf.Message = bean2.ImageTagUnavailableMessage + savedWf.Message = pkg.ImageTagUnavailableMessage err1 := impl.ciWorkflowRepository.UpdateWorkFlow(savedWf) if err1 != nil { impl.Logger.Errorw("could not save workflow, after failing due to conflicting image tag") diff --git a/pkg/pipeline/DeploymentConfigService.go b/pkg/pipeline/DeploymentConfigService.go index 91c9e1d7e2..7f5ec89832 100644 --- a/pkg/pipeline/DeploymentConfigService.go +++ b/pkg/pipeline/DeploymentConfigService.go @@ -1,7 +1,6 @@ package pipeline import ( - "context" "encoding/json" "github.com/devtron-labs/devtron/internal/sql/repository" "github.com/devtron-labs/devtron/internal/sql/repository/chartConfig" @@ -12,16 +11,14 @@ import ( "github.com/devtron-labs/devtron/pkg/resourceQualifiers" "github.com/devtron-labs/devtron/pkg/variables" "github.com/devtron-labs/devtron/pkg/variables/models" - "github.com/devtron-labs/devtron/pkg/variables/parsers" repository6 "github.com/devtron-labs/devtron/pkg/variables/repository" - "github.com/devtron-labs/devtron/util" "github.com/go-pg/pg" errors2 "github.com/juju/errors" "go.uber.org/zap" ) type DeploymentConfigService interface { - GetLatestDeploymentConfigurationByPipelineId(ctx context.Context, pipelineId int, userHasAdminAccess bool) (*history.AllDeploymentConfigurationDetail, error) + GetLatestDeploymentConfigurationByPipelineId(pipelineId int, userHasAdminAccess bool) (*history.AllDeploymentConfigurationDetail, error) } type DeploymentConfigServiceImpl struct { @@ -37,7 +34,6 @@ type DeploymentConfigServiceImpl struct { chartRefRepository chartRepoRepository.ChartRefRepository variableEntityMappingService variables.VariableEntityMappingService scopedVariableService variables.ScopedVariableService - variableTemplateParser parsers.VariableTemplateParser } func NewDeploymentConfigServiceImpl(logger *zap.SugaredLogger, @@ -51,9 +47,7 @@ func NewDeploymentConfigServiceImpl(logger *zap.SugaredLogger, configMapHistoryService history.ConfigMapHistoryService, chartRefRepository chartRepoRepository.ChartRefRepository, variableEntityMappingService variables.VariableEntityMappingService, - scopedVariableService variables.ScopedVariableService, - variableTemplateParser parsers.VariableTemplateParser, -) *DeploymentConfigServiceImpl { + scopedVariableService variables.ScopedVariableService) *DeploymentConfigServiceImpl { return &DeploymentConfigServiceImpl{ logger: logger, envConfigOverrideRepository: envConfigOverrideRepository, @@ -67,11 +61,10 @@ func NewDeploymentConfigServiceImpl(logger *zap.SugaredLogger, chartRefRepository: chartRefRepository, variableEntityMappingService: variableEntityMappingService, scopedVariableService: scopedVariableService, - variableTemplateParser: variableTemplateParser, } } -func (impl *DeploymentConfigServiceImpl) GetLatestDeploymentConfigurationByPipelineId(ctx context.Context, pipelineId int, userHasAdminAccess bool) (*history.AllDeploymentConfigurationDetail, error) { +func (impl *DeploymentConfigServiceImpl) GetLatestDeploymentConfigurationByPipelineId(pipelineId int, userHasAdminAccess bool) (*history.AllDeploymentConfigurationDetail, error) { configResp := &history.AllDeploymentConfigurationDetail{} pipeline, err := impl.pipelineRepository.FindById(pipelineId) if err != nil { @@ -79,7 +72,7 @@ func (impl *DeploymentConfigServiceImpl) GetLatestDeploymentConfigurationByPipel return nil, err } - deploymentTemplateConfig, err := impl.GetLatestDeploymentTemplateConfig(ctx, pipeline) + deploymentTemplateConfig, err := impl.GetLatestDeploymentTemplateConfig(pipeline) if err != nil { impl.logger.Errorw("error in getting latest deploymentTemplate", "err", err) return nil, err @@ -103,41 +96,28 @@ func (impl *DeploymentConfigServiceImpl) GetLatestDeploymentConfigurationByPipel return configResp, nil } -func (impl *DeploymentConfigServiceImpl) extractVariablesAndGetScopedVariables(template string, scope resourceQualifiers.Scope, entity repository6.Entity, isSuperAdmin bool) (string, map[string]string, error) { +func (impl *DeploymentConfigServiceImpl) extractVariablesAndGetScopedVariables(scope resourceQualifiers.Scope, entity repository6.Entity) (map[string]string, error) { variableMap := make(map[string]string) entityToVariables, err := impl.variableEntityMappingService.GetAllMappingsForEntities([]repository6.Entity{entity}) if err != nil { - return template, variableMap, err + return variableMap, err } scopedVariables := make([]*models.ScopedVariableData, 0) if _, ok := entityToVariables[entity]; ok && len(entityToVariables[entity]) > 0 { - scopedVariables, err = impl.scopedVariableService.GetScopedVariables(scope, entityToVariables[entity], isSuperAdmin) + scopedVariables, err = impl.scopedVariableService.GetScopedVariables(scope, entityToVariables[entity], true) if err != nil { - return template, variableMap, err + return variableMap, err } } for _, variable := range scopedVariables { variableMap[variable.VariableName] = variable.VariableValue.StringValue() } - - if len(variableMap) == 0 { - return template, variableMap, nil - } - - parserRequest := parsers.VariableParserRequest{Template: template, Variables: scopedVariables, TemplateType: parsers.JsonVariableTemplate} - parserResponse := impl.variableTemplateParser.ParseTemplate(parserRequest) - err = parserResponse.Error - if err != nil { - return template, variableMap, err - } - resolvedTemplate := parserResponse.ResolvedTemplate - - return resolvedTemplate, variableMap, nil + return variableMap, nil } -func (impl *DeploymentConfigServiceImpl) GetLatestDeploymentTemplateConfig(ctx context.Context, pipeline *pipelineConfig.Pipeline) (*history.HistoryDetailDto, error) { +func (impl *DeploymentConfigServiceImpl) GetLatestDeploymentTemplateConfig(pipeline *pipelineConfig.Pipeline) (*history.HistoryDetailDto, error) { isAppMetricsEnabled := false envLevelAppMetrics, err := impl.envLevelAppMetricsRepository.FindByAppIdAndEnvId(pipeline.AppId, pipeline.EnvironmentId) if err != nil && err != pg.ErrNoRows { @@ -175,14 +155,10 @@ func (impl *DeploymentConfigServiceImpl) GetLatestDeploymentTemplateConfig(ctx c EntityType: repository6.EntityTypeDeploymentTemplateEnvLevel, EntityId: envOverride.Id, } - isSuperAdmin, err := util.GetIsSuperAdminFromContext(ctx) + scopedVariablesMap, err := impl.extractVariablesAndGetScopedVariables(scope, entity) if err != nil { return nil, err } - resolvedTemplate, scopedVariablesMap, err := impl.extractVariablesAndGetScopedVariables(envOverride.EnvOverrideValues, scope, entity, isSuperAdmin) - if err != nil { - impl.logger.Errorw("could not resolve template", "err", err, "envOverrideId", envOverride.Id, "scope", scope, "pipelineId", pipeline.Id) - } deploymentTemplateConfig = &history.HistoryDetailDto{ TemplateName: envOverride.Chart.ChartName, @@ -192,8 +168,7 @@ func (impl *DeploymentConfigServiceImpl) GetLatestDeploymentTemplateConfig(ctx c DisplayName: "values.yaml", Value: envOverride.EnvOverrideValues, }, - VariableSnapshot: scopedVariablesMap, - ResolvedTemplateData: resolvedTemplate, + VariableSnapshot: scopedVariablesMap, } } } else { @@ -217,14 +192,10 @@ func (impl *DeploymentConfigServiceImpl) GetLatestDeploymentTemplateConfig(ctx c EntityType: repository6.EntityTypeDeploymentTemplateAppLevel, EntityId: chart.Id, } - isSuperAdmin, err := util.GetIsSuperAdminFromContext(ctx) + scopedVariablesMap, err := impl.extractVariablesAndGetScopedVariables(scope, entity) if err != nil { return nil, err } - resolvedTemplate, scopedVariablesMap, err := impl.extractVariablesAndGetScopedVariables(chart.GlobalOverride, scope, entity, isSuperAdmin) - if err != nil { - impl.logger.Errorw("could not resolve template", "err", err, "chartId", chart.Id, "scope", scope, "pipelineId", pipeline.Id) - } deploymentTemplateConfig = &history.HistoryDetailDto{ TemplateName: chart.ChartName, TemplateVersion: chartRef.Version, @@ -233,8 +204,7 @@ func (impl *DeploymentConfigServiceImpl) GetLatestDeploymentTemplateConfig(ctx c DisplayName: "values.yaml", Value: chart.GlobalOverride, }, - VariableSnapshot: scopedVariablesMap, - ResolvedTemplateData: resolvedTemplate, + VariableSnapshot: scopedVariablesMap, } } return deploymentTemplateConfig, nil diff --git a/pkg/pipeline/DeploymentPipelineConfigService.go b/pkg/pipeline/DeploymentPipelineConfigService.go index d8953ff3c9..80e37041fe 100644 --- a/pkg/pipeline/DeploymentPipelineConfigService.go +++ b/pkg/pipeline/DeploymentPipelineConfigService.go @@ -109,7 +109,6 @@ type CdPipelineConfigService interface { GetEnvironmentListForAutocompleteFilter(envName string, clusterIds []int, offset int, size int, emailId string, checkAuthBatch func(emailId string, appObject []string, envObject []string) (map[string]bool, map[string]bool), ctx context.Context) (*cluster.ResourceGroupingResponse, error) IsGitopsConfigured() (bool, error) RegisterInACD(gitOpsRepoName string, chartGitAttr *util.ChartGitAttribute, userId int32, ctx context.Context) error - CreateExternalCiAndAppWorkflowMapping(appId, appWorkflowId int, userId int32, tx *pg.Tx) (int, error) } type CdPipelineConfigServiceImpl struct { @@ -1542,7 +1541,15 @@ func (impl *CdPipelineConfigServiceImpl) createCdPipeline(ctx context.Context, a } // Rollback tx on error. defer tx.Rollback() + if pipeline.AppWorkflowId == 0 && pipeline.ParentPipelineType == "WEBHOOK" { + externalCiPipeline := &pipelineConfig.ExternalCiPipeline{ + AppId: app.Id, + AccessToken: "", + Active: true, + AuditLog: sql.AuditLog{CreatedBy: userId, CreatedOn: time.Now(), UpdatedOn: time.Now(), UpdatedBy: userId}, + } + externalCiPipeline, err = impl.ciPipelineRepository.SaveExternalCi(externalCiPipeline, tx) wf := &appWorkflow.AppWorkflow{ Name: fmt.Sprintf("wf-%d-%s", app.Id, util2.Generate(4)), AppId: app.Id, @@ -1551,15 +1558,21 @@ func (impl *CdPipelineConfigServiceImpl) createCdPipeline(ctx context.Context, a } savedAppWf, err := impl.appWorkflowRepository.SaveAppWorkflowWithTx(wf, tx) if err != nil { - impl.logger.Errorw("error in saving app workflow", "appId", app.Id, "err", err) + impl.logger.Errorw("err", err) return 0, err } - externalCiPipelineId, err := impl.CreateExternalCiAndAppWorkflowMapping(app.Id, savedAppWf.Id, userId, tx) + appWorkflowMap := &appWorkflow.AppWorkflowMapping{ + AppWorkflowId: savedAppWf.Id, + ComponentId: externalCiPipeline.Id, + Type: "WEBHOOK", + Active: true, + AuditLog: sql.AuditLog{CreatedBy: userId, CreatedOn: time.Now(), UpdatedOn: time.Now(), UpdatedBy: userId}, + } + appWorkflowMap, err = impl.appWorkflowRepository.SaveAppWorkflowMapping(appWorkflowMap, tx) if err != nil { - impl.logger.Errorw("error in creating new external ci pipeline and new app workflow mapping", "appId", app.Id, "err", err) return 0, err } - pipeline.ParentPipelineId = externalCiPipelineId + pipeline.ParentPipelineId = externalCiPipeline.Id pipeline.AppWorkflowId = savedAppWf.Id } @@ -1576,12 +1589,9 @@ func (impl *CdPipelineConfigServiceImpl) createCdPipeline(ctx context.Context, a //TODO: mark as created in our db pipelineId, err := impl.ciCdPipelineOrchestrator.CreateCDPipelines(pipeline, app.Id, userId, tx, app.AppName) if err != nil { - impl.logger.Errorw("error in creating cd pipeline", "appId", app.Id, "pipeline", pipeline) + impl.logger.Errorw("error in ") return 0, err } - if pipeline.RefPipelineId > 0 { - pipeline.SourceToNewPipelineId[pipeline.RefPipelineId] = pipelineId - } //adding pipeline to workflow _, err = impl.appWorkflowRepository.FindByIdAndAppId(pipeline.AppWorkflowId, app.Id) @@ -1591,16 +1601,12 @@ func (impl *CdPipelineConfigServiceImpl) createCdPipeline(ctx context.Context, a if pipeline.AppWorkflowId > 0 { var parentPipelineId int var parentPipelineType string - if pipeline.ParentPipelineId == 0 { parentPipelineId = pipeline.CiPipelineId parentPipelineType = "CI_PIPELINE" } else { parentPipelineId = pipeline.ParentPipelineId parentPipelineType = pipeline.ParentPipelineType - if pipeline.ParentPipelineType != appWorkflow.WEBHOOK && pipeline.RefPipelineId > 0 && len(pipeline.SourceToNewPipelineId) > 0 { - parentPipelineId = pipeline.SourceToNewPipelineId[pipeline.ParentPipelineId] - } } appWorkflowMap := &appWorkflow.AppWorkflowMapping{ AppWorkflowId: pipeline.AppWorkflowId, @@ -1976,30 +1982,3 @@ func (impl *CdPipelineConfigServiceImpl) BulkDeleteCdPipelines(impactedPipelines return respDtos } - -func (impl *CdPipelineConfigServiceImpl) CreateExternalCiAndAppWorkflowMapping(appId, appWorkflowId int, userId int32, tx *pg.Tx) (int, error) { - externalCiPipeline := &pipelineConfig.ExternalCiPipeline{ - AppId: appId, - AccessToken: "", - Active: true, - AuditLog: sql.AuditLog{CreatedBy: userId, CreatedOn: time.Now(), UpdatedOn: time.Now(), UpdatedBy: userId}, - } - externalCiPipeline, err := impl.ciPipelineRepository.SaveExternalCi(externalCiPipeline, tx) - if err != nil { - impl.logger.Errorw("error in saving external ci", "appId", appId, "err", err) - return 0, err - } - appWorkflowMap := &appWorkflow.AppWorkflowMapping{ - AppWorkflowId: appWorkflowId, - ComponentId: externalCiPipeline.Id, - Type: "WEBHOOK", - Active: true, - AuditLog: sql.AuditLog{CreatedBy: userId, CreatedOn: time.Now(), UpdatedOn: time.Now(), UpdatedBy: userId}, - } - appWorkflowMap, err = impl.appWorkflowRepository.SaveAppWorkflowMapping(appWorkflowMap, tx) - if err != nil { - impl.logger.Errorw("error in saving app workflow mapping for external ci", "appId", appId, "appWorkflowId", appWorkflowId, "externalCiPipelineId", externalCiPipeline.Id, "err", err) - return 0, err - } - return externalCiPipeline.Id, nil -} diff --git a/pkg/pipeline/PipelineStageService.go b/pkg/pipeline/PipelineStageService.go index 4952fce7c9..56ae6d9ef3 100644 --- a/pkg/pipeline/PipelineStageService.go +++ b/pkg/pipeline/PipelineStageService.go @@ -964,12 +964,10 @@ func (impl *PipelineStageServiceImpl) UpdatePipelineStage(stageReq *bean.Pipelin if err == pg.ErrNoRows || createNewPipStage { //no stage found, creating new stage stageReq.Id = 0 - if len(stageReq.Steps) > 0 { - err = impl.CreatePipelineStage(stageReq, stageType, pipelineId, userId) - if err != nil { - impl.logger.Errorw("error in creating new pipeline stage", "err", err, "pipelineStageReq", stageReq) - return err - } + err = impl.CreatePipelineStage(stageReq, stageType, pipelineId, userId) + if err != nil { + impl.logger.Errorw("error in creating new pipeline stage", "err", err, "pipelineStageReq", stageReq) + return err } } else { //stageId found, to handle as an update request @@ -2141,7 +2139,7 @@ func (impl *PipelineStageServiceImpl) fetchScopedVariablesAndResolveTemplate(unr return nil, err } parserResponse := impl.variableTemplateParser.ParseTemplate(parsers.VariableParserRequest{ - TemplateType: parsers.StringVariableTemplate, + TemplateType: parsers.JsonVariableTemplate, Template: string(responseJson), Variables: scopedVariables, IgnoreUnknownVariables: true, diff --git a/pkg/pipeline/WebhookService.go b/pkg/pipeline/WebhookService.go index 5e1b6bc18e..adfd216bfc 100644 --- a/pkg/pipeline/WebhookService.go +++ b/pkg/pipeline/WebhookService.go @@ -28,6 +28,7 @@ import ( "github.com/devtron-labs/devtron/internal/sql/repository" "github.com/devtron-labs/devtron/internal/sql/repository/pipelineConfig" util2 "github.com/devtron-labs/devtron/internal/util" + "github.com/devtron-labs/devtron/pkg" "github.com/devtron-labs/devtron/pkg/app" "github.com/devtron-labs/devtron/pkg/sql" "github.com/devtron-labs/devtron/util/event" @@ -70,7 +71,7 @@ type WebhookServiceImpl struct { eventFactory client.EventFactory workflowDagExecutor WorkflowDagExecutor ciHandler CiHandler - customTagService CustomTagService + customTagService pkg.CustomTagService } func NewWebhookServiceImpl( @@ -80,7 +81,7 @@ func NewWebhookServiceImpl( appService app.AppService, eventClient client.EventClient, eventFactory client.EventFactory, ciWorkflowRepository pipelineConfig.CiWorkflowRepository, - customTagService CustomTagService, + customTagService pkg.CustomTagService, workflowDagExecutor WorkflowDagExecutor, ciHandler CiHandler) *WebhookServiceImpl { webhookHandler := &WebhookServiceImpl{ ciArtifactRepository: ciArtifactRepository, diff --git a/pkg/pipeline/WorkflowDagExecutor.go b/pkg/pipeline/WorkflowDagExecutor.go index b7ed0a951e..67e032cc77 100644 --- a/pkg/pipeline/WorkflowDagExecutor.go +++ b/pkg/pipeline/WorkflowDagExecutor.go @@ -20,45 +20,23 @@ package pipeline import ( "context" "encoding/json" - errors3 "errors" "fmt" - "github.com/argoproj/argo-cd/v2/pkg/apiclient/application" - "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1" - "github.com/aws/aws-sdk-go/service/autoscaling" blob_storage "github.com/devtron-labs/common-lib/blob-storage" util5 "github.com/devtron-labs/common-lib/utils/k8s" "github.com/devtron-labs/common-lib/utils/k8s/health" - client2 "github.com/devtron-labs/devtron/api/helm-app" - "github.com/devtron-labs/devtron/client/argocdServer" - application2 "github.com/devtron-labs/devtron/client/argocdServer/application" gitSensorClient "github.com/devtron-labs/devtron/client/gitSensor" "github.com/devtron-labs/devtron/pkg" - "github.com/devtron-labs/devtron/internal/middleware" - app2 "github.com/devtron-labs/devtron/internal/sql/repository/app" - bean4 "github.com/devtron-labs/devtron/pkg/app/bean" "github.com/devtron-labs/devtron/pkg/app/status" - "github.com/devtron-labs/devtron/pkg/chartRepo/repository" - "github.com/devtron-labs/devtron/pkg/dockerRegistry" "github.com/devtron-labs/devtron/pkg/k8s" bean3 "github.com/devtron-labs/devtron/pkg/pipeline/bean" repository4 "github.com/devtron-labs/devtron/pkg/pipeline/repository" "github.com/devtron-labs/devtron/pkg/plugin" "github.com/devtron-labs/devtron/pkg/resourceQualifiers" "github.com/devtron-labs/devtron/pkg/variables" - "github.com/devtron-labs/devtron/pkg/variables/parsers" repository5 "github.com/devtron-labs/devtron/pkg/variables/repository" util4 "github.com/devtron-labs/devtron/util" "github.com/devtron-labs/devtron/util/argo" - errors2 "github.com/juju/errors" - "github.com/pkg/errors" - "github.com/tidwall/gjson" - "github.com/tidwall/sjson" "go.opentelemetry.io/otel" - "google.golang.org/grpc/codes" - status2 "google.golang.org/grpc/status" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/helm/pkg/proto/hapi/chart" - "path" "strconv" "strings" "time" @@ -144,51 +122,8 @@ type WorkflowDagExecutorImpl struct { globalPluginService plugin.GlobalPluginService variableSnapshotHistoryService variables.VariableSnapshotHistoryService pluginInputVariableParser PluginInputVariableParser - - deploymentTemplateHistoryService history2.DeploymentTemplateHistoryService - configMapHistoryService history2.ConfigMapHistoryService - pipelineStrategyHistoryService history2.PipelineStrategyHistoryService - manifestPushConfigRepository repository4.ManifestPushConfigRepository - gitOpsManifestPushService app.GitOpsPushService - ciPipelineMaterialRepository pipelineConfig.CiPipelineMaterialRepository - imageScanHistoryRepository security.ImageScanHistoryRepository - imageScanDeployInfoRepository security.ImageScanDeployInfoRepository - appCrudOperationService app.AppCrudOperationService - pipelineConfigRepository chartConfig.PipelineConfigRepository - dockerRegistryIpsConfigService dockerRegistry.DockerRegistryIpsConfigService - chartRepository chartRepoRepository.ChartRepository - chartTemplateService util.ChartTemplateService - strategyHistoryRepository repository3.PipelineStrategyHistoryRepository - appRepository app2.AppRepository - deploymentTemplateHistoryRepository repository3.DeploymentTemplateHistoryRepository - argoK8sClient argocdServer.ArgoK8sClient - configMapRepository chartConfig.ConfigMapRepository - configMapHistoryRepository repository3.ConfigMapHistoryRepository - refChartDir chartRepoRepository.RefChartDir - helmAppService client2.HelmAppService - helmAppClient client2.HelmAppClient - chartRefRepository chartRepoRepository.ChartRefRepository - environmentConfigRepository chartConfig.EnvConfigOverrideRepository - appLevelMetricsRepository repository.AppLevelMetricsRepository - envLevelMetricsRepository repository.EnvLevelAppMetricsRepository - dbMigrationConfigRepository pipelineConfig.DbMigrationConfigRepository - mergeUtil *util.MergeUtil - gitOpsConfigRepository repository.GitOpsConfigRepository - gitFactory *util.GitFactory - acdClient application2.ServiceClient - variableEntityMappingService variables.VariableEntityMappingService - variableTemplateParser parsers.VariableTemplateParser - argoClientWrapperService argocdServer.ArgoClientWrapperService - scopedVariableService variables.ScopedVariableService } -const kedaAutoscaling = "kedaAutoscaling" -const horizontalPodAutoscaler = "HorizontalPodAutoscaler" -const fullnameOverride = "fullnameOverride" -const nameOverride = "nameOverride" -const enabled = "enabled" -const replicaCount = "replicaCount" - const ( GIT_COMMIT_HASH_PREFIX = "GIT_COMMIT_HASH" GIT_SOURCE_TYPE_PREFIX = "GIT_SOURCE_TYPE" @@ -271,42 +206,6 @@ func NewWorkflowDagExecutorImpl(Logger *zap.SugaredLogger, pipelineRepository pi variableSnapshotHistoryService variables.VariableSnapshotHistoryService, globalPluginService plugin.GlobalPluginService, pluginInputVariableParser PluginInputVariableParser, - - deploymentTemplateHistoryService history2.DeploymentTemplateHistoryService, - configMapHistoryService history2.ConfigMapHistoryService, - pipelineStrategyHistoryService history2.PipelineStrategyHistoryService, - manifestPushConfigRepository repository4.ManifestPushConfigRepository, - gitOpsManifestPushService app.GitOpsPushService, - ciPipelineMaterialRepository pipelineConfig.CiPipelineMaterialRepository, - imageScanHistoryRepository security.ImageScanHistoryRepository, - imageScanDeployInfoRepository security.ImageScanDeployInfoRepository, - appCrudOperationService app.AppCrudOperationService, - pipelineConfigRepository chartConfig.PipelineConfigRepository, - dockerRegistryIpsConfigService dockerRegistry.DockerRegistryIpsConfigService, - chartRepository chartRepoRepository.ChartRepository, - chartTemplateService util.ChartTemplateService, - strategyHistoryRepository repository3.PipelineStrategyHistoryRepository, - appRepository app2.AppRepository, - deploymentTemplateHistoryRepository repository3.DeploymentTemplateHistoryRepository, - ArgoK8sClient argocdServer.ArgoK8sClient, - configMapRepository chartConfig.ConfigMapRepository, - configMapHistoryRepository repository3.ConfigMapHistoryRepository, - refChartDir chartRepoRepository.RefChartDir, - helmAppService client2.HelmAppService, - helmAppClient client2.HelmAppClient, - chartRefRepository chartRepoRepository.ChartRefRepository, - environmentConfigRepository chartConfig.EnvConfigOverrideRepository, - appLevelMetricsRepository repository.AppLevelMetricsRepository, - envLevelMetricsRepository repository.EnvLevelAppMetricsRepository, - dbMigrationConfigRepository pipelineConfig.DbMigrationConfigRepository, - mergeUtil *util.MergeUtil, - gitOpsConfigRepository repository.GitOpsConfigRepository, - gitFactory *util.GitFactory, - acdClient application2.ServiceClient, - variableEntityMappingService variables.VariableEntityMappingService, - variableTemplateParser parsers.VariableTemplateParser, - argoClientWrapperService argocdServer.ArgoClientWrapperService, - scopedVariableService variables.ScopedVariableService, ) *WorkflowDagExecutorImpl { wde := &WorkflowDagExecutorImpl{logger: Logger, pipelineRepository: pipelineRepository, @@ -343,42 +242,6 @@ func NewWorkflowDagExecutorImpl(Logger *zap.SugaredLogger, pipelineRepository pi variableSnapshotHistoryService: variableSnapshotHistoryService, globalPluginService: globalPluginService, pluginInputVariableParser: pluginInputVariableParser, - - deploymentTemplateHistoryService: deploymentTemplateHistoryService, - configMapHistoryService: configMapHistoryService, - pipelineStrategyHistoryService: pipelineStrategyHistoryService, - manifestPushConfigRepository: manifestPushConfigRepository, - gitOpsManifestPushService: gitOpsManifestPushService, - ciPipelineMaterialRepository: ciPipelineMaterialRepository, - imageScanHistoryRepository: imageScanHistoryRepository, - imageScanDeployInfoRepository: imageScanDeployInfoRepository, - appCrudOperationService: appCrudOperationService, - pipelineConfigRepository: pipelineConfigRepository, - dockerRegistryIpsConfigService: dockerRegistryIpsConfigService, - chartRepository: chartRepository, - chartTemplateService: chartTemplateService, - strategyHistoryRepository: strategyHistoryRepository, - appRepository: appRepository, - deploymentTemplateHistoryRepository: deploymentTemplateHistoryRepository, - argoK8sClient: ArgoK8sClient, - configMapRepository: configMapRepository, - configMapHistoryRepository: configMapHistoryRepository, - refChartDir: refChartDir, - helmAppService: helmAppService, - helmAppClient: helmAppClient, - chartRefRepository: chartRefRepository, - environmentConfigRepository: environmentConfigRepository, - appLevelMetricsRepository: appLevelMetricsRepository, - envLevelMetricsRepository: envLevelMetricsRepository, - dbMigrationConfigRepository: dbMigrationConfigRepository, - mergeUtil: mergeUtil, - gitOpsConfigRepository: gitOpsConfigRepository, - gitFactory: gitFactory, - acdClient: acdClient, - variableEntityMappingService: variableEntityMappingService, - variableTemplateParser: variableTemplateParser, - argoClientWrapperService: argoClientWrapperService, - scopedVariableService: scopedVariableService, } config, err := GetCdConfig() if err != nil { @@ -1098,7 +961,6 @@ func (impl *WorkflowDagExecutorImpl) buildWFRequest(runner *pipelineConfig.CdWor impl.logger.Errorw("error in getting environment by id", "err", err) return nil, err } - if pipelineStage != nil { //Scope will pick the environment of CD pipeline irrespective of in-cluster mode, //since user sees the environment of the CD pipeline @@ -1110,8 +972,7 @@ func (impl *WorkflowDagExecutorImpl) buildWFRequest(runner *pipelineConfig.CdWor EnvironmentName: env.Name, ClusterName: env.Cluster.ClusterName, Namespace: env.Namespace, - Image: artifact.Image, - ImageTag: util3.GetImageTagFromImage(artifact.Image), + ImageTag: artifact.Image, }, } var variableSnapshot map[string]string @@ -1662,7 +1523,7 @@ func (impl *WorkflowDagExecutorImpl) TriggerDeployment(cdWf *pipelineConfig.CdWo return nil } - err = impl.TriggerCD(artifact, cdWf.Id, savedWfr.Id, pipeline, triggeredAt) + err = impl.appService.TriggerCD(artifact, cdWf.Id, savedWfr.Id, pipeline, triggeredAt) err1 := impl.updatePreviousDeploymentStatus(runner, pipeline.Id, err, triggeredAt, triggeredBy) if err1 != nil || err != nil { impl.logger.Errorw("error while update previous cd workflow runners", "err", err, "runner", runner, "pipelineId", pipeline.Id) @@ -1935,7 +1796,7 @@ func (impl *WorkflowDagExecutorImpl) ManualCdTrigger(overrideRequest *bean.Value impl.logger.Errorf("invalid req", "err", err, "req", overrideRequest) return 0, err } - impl.SetPipelineFieldsInOverrideRequest(overrideRequest, cdPipeline) + impl.appService.SetPipelineFieldsInOverrideRequest(overrideRequest, cdPipeline) if overrideRequest.CdWorkflowType == bean.CD_WORKFLOW_TYPE_PRE { _, span = otel.Tracer("orchestrator").Start(ctx, "ciArtifactRepository.Get") @@ -2062,7 +1923,7 @@ func (impl *WorkflowDagExecutorImpl) ManualCdTrigger(overrideRequest *bean.Value return 0, fmt.Errorf("found vulnerability for image digest %s", artifact.ImageDigest) } _, span = otel.Tracer("orchestrator").Start(ctx, "appService.TriggerRelease") - releaseId, _, err = impl.TriggerRelease(overrideRequest, ctx, triggeredAt, overrideRequest.UserId) + releaseId, _, err = impl.appService.TriggerRelease(overrideRequest, ctx, triggeredAt, overrideRequest.UserId) span.End() if overrideRequest.DeploymentAppType == util.PIPELINE_DEPLOYMENT_TYPE_MANIFEST_DOWNLOAD { @@ -2321,1861 +2182,3 @@ func (impl *WorkflowDagExecutorImpl) buildACDContext() (acdContext context.Conte ctx = context.WithValue(ctx, "token", acdToken) return ctx, nil } - -func (impl *WorkflowDagExecutorImpl) TriggerRelease(overrideRequest *bean.ValuesOverrideRequest, ctx context.Context, triggeredAt time.Time, deployedBy int32) (releaseNo int, manifest []byte, err error) { - triggerEvent := impl.GetTriggerEvent(overrideRequest.DeploymentAppType, triggeredAt, deployedBy) - releaseNo, manifest, err = impl.TriggerPipeline(overrideRequest, triggerEvent, ctx) - if err != nil { - return 0, manifest, err - } - return releaseNo, manifest, nil -} - -func (impl *WorkflowDagExecutorImpl) TriggerCD(artifact *repository.CiArtifact, cdWorkflowId, wfrId int, pipeline *pipelineConfig.Pipeline, triggeredAt time.Time) error { - impl.logger.Debugw("automatic pipeline trigger attempt async", "artifactId", artifact.Id) - - return impl.triggerReleaseAsync(artifact, cdWorkflowId, wfrId, pipeline, triggeredAt) -} - -func (impl *WorkflowDagExecutorImpl) triggerReleaseAsync(artifact *repository.CiArtifact, cdWorkflowId, wfrId int, pipeline *pipelineConfig.Pipeline, triggeredAt time.Time) error { - err := impl.validateAndTrigger(pipeline, artifact, cdWorkflowId, wfrId, triggeredAt) - if err != nil { - impl.logger.Errorw("error in trigger for pipeline", "pipelineId", strconv.Itoa(pipeline.Id)) - } - impl.logger.Debugw("trigger attempted for all pipeline ", "artifactId", artifact.Id) - return err -} - -func (impl *WorkflowDagExecutorImpl) validateAndTrigger(p *pipelineConfig.Pipeline, artifact *repository.CiArtifact, cdWorkflowId, wfrId int, triggeredAt time.Time) error { - object := impl.enforcerUtil.GetAppRBACNameByAppId(p.AppId) - envApp := strings.Split(object, "/") - if len(envApp) != 2 { - impl.logger.Error("invalid req, app and env not found from rbac") - return errors.New("invalid req, app and env not found from rbac") - } - err := impl.releasePipeline(p, artifact, cdWorkflowId, wfrId, triggeredAt) - return err -} - -func (impl *WorkflowDagExecutorImpl) releasePipeline(pipeline *pipelineConfig.Pipeline, artifact *repository.CiArtifact, cdWorkflowId, wfrId int, triggeredAt time.Time) error { - impl.logger.Debugw("triggering release for ", "cdPipelineId", pipeline.Id, "artifactId", artifact.Id) - - pipeline, err := impl.pipelineRepository.FindById(pipeline.Id) - if err != nil { - impl.logger.Errorw("error in fetching pipeline by pipelineId", "err", err) - return err - } - - request := &bean.ValuesOverrideRequest{ - PipelineId: pipeline.Id, - UserId: artifact.CreatedBy, - CiArtifactId: artifact.Id, - AppId: pipeline.AppId, - CdWorkflowId: cdWorkflowId, - ForceTrigger: true, - DeploymentWithConfig: bean.DEPLOYMENT_CONFIG_TYPE_LAST_SAVED, - WfrId: wfrId, - } - impl.SetPipelineFieldsInOverrideRequest(request, pipeline) - - ctx, err := impl.buildACDContext() - if err != nil { - impl.logger.Errorw("error in creating acd synch context", "pipelineId", pipeline.Id, "artifactId", artifact.Id, "err", err) - return err - } - //setting deployedBy as 1(system user) since case of auto trigger - id, _, err := impl.TriggerRelease(request, ctx, triggeredAt, 1) - if err != nil { - impl.logger.Errorw("error in auto cd pipeline trigger", "pipelineId", pipeline.Id, "artifactId", artifact.Id, "err", err) - } else { - impl.logger.Infow("pipeline successfully triggered ", "cdPipelineId", pipeline.Id, "artifactId", artifact.Id, "releaseId", id) - } - return err - -} - -func (impl *WorkflowDagExecutorImpl) SetPipelineFieldsInOverrideRequest(overrideRequest *bean.ValuesOverrideRequest, pipeline *pipelineConfig.Pipeline) { - overrideRequest.PipelineId = pipeline.Id - overrideRequest.PipelineName = pipeline.Name - overrideRequest.EnvId = pipeline.EnvironmentId - overrideRequest.EnvName = pipeline.Environment.Name - overrideRequest.ClusterId = pipeline.Environment.ClusterId - overrideRequest.AppId = pipeline.AppId - overrideRequest.AppName = pipeline.App.AppName - overrideRequest.DeploymentAppType = pipeline.DeploymentAppType -} - -func (impl *WorkflowDagExecutorImpl) GetTriggerEvent(deploymentAppType string, triggeredAt time.Time, deployedBy int32) bean.TriggerEvent { - // trigger event will decide whether to perform GitOps or deployment for a particular deployment app type - triggerEvent := bean.TriggerEvent{ - TriggeredBy: deployedBy, - TriggerdAt: triggeredAt, - } - switch deploymentAppType { - case bean2.ArgoCd: - triggerEvent.PerformChartPush = true - triggerEvent.PerformDeploymentOnCluster = true - triggerEvent.GetManifestInResponse = false - triggerEvent.DeploymentAppType = bean2.ArgoCd - triggerEvent.ManifestStorageType = bean2.ManifestStorageGit - case bean2.Helm: - triggerEvent.PerformChartPush = false - triggerEvent.PerformDeploymentOnCluster = true - triggerEvent.GetManifestInResponse = false - triggerEvent.DeploymentAppType = bean2.Helm - } - return triggerEvent -} - -// write integration/unit test for each function -func (impl *WorkflowDagExecutorImpl) TriggerPipeline(overrideRequest *bean.ValuesOverrideRequest, triggerEvent bean.TriggerEvent, ctx context.Context) (releaseNo int, manifest []byte, err error) { - - isRequestValid, err := impl.ValidateTriggerEvent(triggerEvent) - if !isRequestValid { - return releaseNo, manifest, err - } - - valuesOverrideResponse, builtChartPath, err := impl.BuildManifestForTrigger(overrideRequest, triggerEvent.TriggerdAt, ctx) - _, span := otel.Tracer("orchestrator").Start(ctx, "CreateHistoriesForDeploymentTrigger") - err1 := impl.CreateHistoriesForDeploymentTrigger(valuesOverrideResponse.Pipeline, valuesOverrideResponse.PipelineStrategy, valuesOverrideResponse.EnvOverride, triggerEvent.TriggerdAt, triggerEvent.TriggeredBy) - if err1 != nil { - impl.logger.Errorw("error in saving histories for trigger", "err", err1, "pipelineId", valuesOverrideResponse.Pipeline.Id, "wfrId", overrideRequest.WfrId) - } - span.End() - if err != nil { - return releaseNo, manifest, err - } - - if triggerEvent.PerformChartPush { - manifestPushTemplate, err := impl.BuildManifestPushTemplate(overrideRequest, valuesOverrideResponse, builtChartPath, &manifest) - if err != nil { - impl.logger.Errorw("error in building manifest push template", "err", err) - return releaseNo, manifest, err - } - manifestPushService := impl.GetManifestPushService(triggerEvent) - manifestPushResponse := manifestPushService.PushChart(manifestPushTemplate, ctx) - if manifestPushResponse.Error != nil { - impl.logger.Errorw("Error in pushing manifest to git", "err", err, "git_repo_url", manifestPushTemplate.RepoUrl) - return releaseNo, manifest, err - } - pipelineOverrideUpdateRequest := &chartConfig.PipelineOverride{ - Id: valuesOverrideResponse.PipelineOverride.Id, - GitHash: manifestPushResponse.CommitHash, - CommitTime: manifestPushResponse.CommitTime, - EnvConfigOverrideId: valuesOverrideResponse.EnvOverride.Id, - PipelineOverrideValues: valuesOverrideResponse.ReleaseOverrideJSON, - PipelineId: overrideRequest.PipelineId, - CiArtifactId: overrideRequest.CiArtifactId, - PipelineMergedValues: valuesOverrideResponse.MergedValues, - AuditLog: sql.AuditLog{UpdatedOn: triggerEvent.TriggerdAt, UpdatedBy: overrideRequest.UserId}, - } - _, span := otel.Tracer("orchestrator").Start(ctx, "pipelineOverrideRepository.Update") - err = impl.pipelineOverrideRepository.Update(pipelineOverrideUpdateRequest) - span.End() - } - - if triggerEvent.PerformDeploymentOnCluster { - err = impl.DeployApp(overrideRequest, valuesOverrideResponse, triggerEvent.TriggerdAt, ctx) - if err != nil { - impl.logger.Errorw("error in deploying app", "err", err) - return releaseNo, manifest, err - } - } - - go impl.WriteCDTriggerEvent(overrideRequest, valuesOverrideResponse.Artifact, valuesOverrideResponse.PipelineOverride.PipelineReleaseCounter, valuesOverrideResponse.PipelineOverride.Id) - - _, spann := otel.Tracer("orchestrator").Start(ctx, "MarkImageScanDeployed") - _ = impl.MarkImageScanDeployed(overrideRequest.AppId, valuesOverrideResponse.EnvOverride.TargetEnvironment, valuesOverrideResponse.Artifact.ImageDigest, overrideRequest.ClusterId, valuesOverrideResponse.Artifact.ScanEnabled) - spann.End() - - middleware.CdTriggerCounter.WithLabelValues(overrideRequest.AppName, overrideRequest.EnvName).Inc() - - return valuesOverrideResponse.PipelineOverride.PipelineReleaseCounter, manifest, nil - -} - -func (impl *WorkflowDagExecutorImpl) ValidateTriggerEvent(triggerEvent bean.TriggerEvent) (bool, error) { - - switch triggerEvent.DeploymentAppType { - case bean2.ArgoCd: - if !triggerEvent.PerformChartPush { - return false, errors2.New("For deployment type ArgoCd, PerformChartPush flag expected value = true, got false") - } - case bean2.Helm: - return true, nil - case bean2.GitOpsWithoutDeployment: - if triggerEvent.PerformDeploymentOnCluster { - return false, errors2.New("For deployment type GitOpsWithoutDeployment, PerformDeploymentOnCluster flag expected value = false, got value = true") - } - case bean2.ManifestDownload: - if triggerEvent.PerformChartPush { - return false, errors3.New("For deployment type ManifestDownload, PerformChartPush flag expected value = false, got true") - } - if triggerEvent.PerformDeploymentOnCluster { - return false, errors3.New("For deployment type ManifestDownload, PerformDeploymentOnCluster flag expected value = false, got true") - } - } - return true, nil - -} - -func (impl *WorkflowDagExecutorImpl) BuildManifestForTrigger(overrideRequest *bean.ValuesOverrideRequest, triggeredAt time.Time, ctx context.Context) (valuesOverrideResponse *app.ValuesOverrideResponse, builtChartPath string, err error) { - - valuesOverrideResponse = &app.ValuesOverrideResponse{} - valuesOverrideResponse, err = impl.GetValuesOverrideForTrigger(overrideRequest, triggeredAt, ctx) - if err != nil { - impl.logger.Errorw("error in fetching values for trigger", "err", err) - return valuesOverrideResponse, "", err - } - builtChartPath, err = impl.appService.BuildChartAndGetPath(overrideRequest.AppName, valuesOverrideResponse.EnvOverride, ctx) - if err != nil { - impl.logger.Errorw("error in parsing reference chart", "err", err) - return valuesOverrideResponse, "", err - } - return valuesOverrideResponse, builtChartPath, err -} - -func (impl *WorkflowDagExecutorImpl) CreateHistoriesForDeploymentTrigger(pipeline *pipelineConfig.Pipeline, strategy *chartConfig.PipelineStrategy, envOverride *chartConfig.EnvConfigOverride, deployedOn time.Time, deployedBy int32) error { - //creating history for deployment template - deploymentTemplateHistory, err := impl.deploymentTemplateHistoryService.CreateDeploymentTemplateHistoryForDeploymentTrigger(pipeline, envOverride, envOverride.Chart.ImageDescriptorTemplate, deployedOn, deployedBy) - if err != nil { - impl.logger.Errorw("error in creating deployment template history for deployment trigger", "err", err) - return err - } - err = impl.configMapHistoryService.CreateCMCSHistoryForDeploymentTrigger(pipeline, deployedOn, deployedBy) - if err != nil { - impl.logger.Errorw("error in creating CM/CS history for deployment trigger", "err", err) - return err - } - if strategy != nil { - err = impl.pipelineStrategyHistoryService.CreateStrategyHistoryForDeploymentTrigger(strategy, deployedOn, deployedBy, pipeline.TriggerType) - if err != nil { - impl.logger.Errorw("error in creating strategy history for deployment trigger", "err", err) - return err - } - } - //VARIABLE_SNAPSHOT_SAVE - if envOverride.VariableSnapshot != nil && len(envOverride.VariableSnapshot) > 0 { - variableMapBytes, _ := json.Marshal(envOverride.VariableSnapshot) - variableSnapshotHistory := &repository5.VariableSnapshotHistoryBean{ - VariableSnapshot: variableMapBytes, - HistoryReference: repository5.HistoryReference{ - HistoryReferenceId: deploymentTemplateHistory.Id, - HistoryReferenceType: repository5.HistoryReferenceTypeDeploymentTemplate, - }, - } - err = impl.variableSnapshotHistoryService.SaveVariableHistoriesForTrigger([]*repository5.VariableSnapshotHistoryBean{variableSnapshotHistory}, deployedBy) - if err != nil { - return err - } - } - return nil -} - -func (impl *WorkflowDagExecutorImpl) BuildManifestPushTemplate(overrideRequest *bean.ValuesOverrideRequest, valuesOverrideResponse *app.ValuesOverrideResponse, builtChartPath string, manifest *[]byte) (*bean4.ManifestPushTemplate, error) { - - manifestPushTemplate := &bean4.ManifestPushTemplate{ - WorkflowRunnerId: overrideRequest.WfrId, - AppId: overrideRequest.AppId, - ChartRefId: valuesOverrideResponse.EnvOverride.Chart.ChartRefId, - EnvironmentId: valuesOverrideResponse.EnvOverride.Environment.Id, - UserId: overrideRequest.UserId, - PipelineOverrideId: valuesOverrideResponse.PipelineOverride.Id, - AppName: overrideRequest.AppName, - TargetEnvironmentName: valuesOverrideResponse.EnvOverride.TargetEnvironment, - BuiltChartPath: builtChartPath, - BuiltChartBytes: manifest, - MergedValues: valuesOverrideResponse.MergedValues, - } - - manifestPushConfig, err := impl.manifestPushConfigRepository.GetManifestPushConfigByAppIdAndEnvId(overrideRequest.AppId, overrideRequest.EnvId) - if err != nil && err != pg.ErrNoRows { - impl.logger.Errorw("error in fetching manifest push config from db", "err", err) - return manifestPushTemplate, err - } - - if manifestPushConfig != nil { - if manifestPushConfig.StorageType == bean2.ManifestStorageGit { - // need to implement for git repo push - // currently manifest push config doesn't have git push config. Gitops config is derived from charts, chart_env_config_override and chart_ref table - } - } else { - manifestPushTemplate.ChartReferenceTemplate = valuesOverrideResponse.EnvOverride.Chart.ReferenceTemplate - manifestPushTemplate.ChartName = valuesOverrideResponse.EnvOverride.Chart.ChartName - manifestPushTemplate.ChartVersion = valuesOverrideResponse.EnvOverride.Chart.ChartVersion - manifestPushTemplate.ChartLocation = valuesOverrideResponse.EnvOverride.Chart.ChartLocation - manifestPushTemplate.RepoUrl = valuesOverrideResponse.EnvOverride.Chart.GitRepoUrl - } - return manifestPushTemplate, err -} - -func (impl *WorkflowDagExecutorImpl) GetManifestPushService(triggerEvent bean.TriggerEvent) app.ManifestPushService { - var manifestPushService app.ManifestPushService - if triggerEvent.ManifestStorageType == bean2.ManifestStorageGit { - manifestPushService = impl.gitOpsManifestPushService - } - return manifestPushService -} - -func (impl *WorkflowDagExecutorImpl) DeployApp(overrideRequest *bean.ValuesOverrideRequest, valuesOverrideResponse *app.ValuesOverrideResponse, triggeredAt time.Time, ctx context.Context) error { - - if util.IsAcdApp(overrideRequest.DeploymentAppType) { - _, span := otel.Tracer("orchestrator").Start(ctx, "DeployArgocdApp") - err := impl.DeployArgocdApp(overrideRequest, valuesOverrideResponse, ctx) - span.End() - if err != nil { - impl.logger.Errorw("error in deploying app on argocd", "err", err) - return err - } - } else if util.IsHelmApp(overrideRequest.DeploymentAppType) { - _, span := otel.Tracer("orchestrator").Start(ctx, "createHelmAppForCdPipeline") - _, err := impl.createHelmAppForCdPipeline(overrideRequest, valuesOverrideResponse, triggeredAt, ctx) - span.End() - if err != nil { - impl.logger.Errorw("error in creating or updating helm application for cd pipeline", "err", err) - return err - } - } - return nil -} - -func (impl *WorkflowDagExecutorImpl) WriteCDTriggerEvent(overrideRequest *bean.ValuesOverrideRequest, artifact *repository.CiArtifact, releaseId, pipelineOverrideId int) { - - event := impl.eventFactory.Build(util2.Trigger, &overrideRequest.PipelineId, overrideRequest.AppId, &overrideRequest.EnvId, util2.CD) - impl.logger.Debugw("event WriteCDTriggerEvent", "event", event) - event = impl.eventFactory.BuildExtraCDData(event, nil, pipelineOverrideId, bean.CD_WORKFLOW_TYPE_DEPLOY) - _, evtErr := impl.eventClient.WriteNotificationEvent(event) - if evtErr != nil { - impl.logger.Errorw("CD trigger event not sent", "error", evtErr) - } - deploymentEvent := app.DeploymentEvent{ - ApplicationId: overrideRequest.AppId, - EnvironmentId: overrideRequest.EnvId, //check for production Environment - ReleaseId: releaseId, - PipelineOverrideId: pipelineOverrideId, - TriggerTime: time.Now(), - CiArtifactId: overrideRequest.CiArtifactId, - } - ciPipelineMaterials, err := impl.ciPipelineMaterialRepository.GetByPipelineId(artifact.PipelineId) - if err != nil { - impl.logger.Errorw("error in ") - } - materialInfoMap, mErr := artifact.ParseMaterialInfo() - if mErr != nil { - impl.logger.Errorw("material info map error", mErr) - return - } - for _, ciPipelineMaterial := range ciPipelineMaterials { - hash := materialInfoMap[ciPipelineMaterial.GitMaterial.Url] - pipelineMaterialInfo := &app.PipelineMaterialInfo{PipelineMaterialId: ciPipelineMaterial.Id, CommitHash: hash} - deploymentEvent.PipelineMaterials = append(deploymentEvent.PipelineMaterials, pipelineMaterialInfo) - } - impl.logger.Infow("triggering deployment event", "event", deploymentEvent) - err = impl.eventClient.WriteNatsEvent(pubsub.CD_SUCCESS, deploymentEvent) - if err != nil { - impl.logger.Errorw("error in writing cd trigger event", "err", err) - } -} - -func (impl *WorkflowDagExecutorImpl) MarkImageScanDeployed(appId int, envId int, imageDigest string, clusterId int, isScanEnabled bool) error { - impl.logger.Debugw("mark image scan deployed for normal app, from cd auto or manual trigger", "imageDigest", imageDigest) - executionHistory, err := impl.imageScanHistoryRepository.FindByImageDigest(imageDigest) - if err != nil && err != pg.ErrNoRows { - impl.logger.Errorw("error in fetching execution history", "err", err) - return err - } - if executionHistory == nil || executionHistory.Id == 0 { - impl.logger.Errorw("no execution history found for digest", "digest", imageDigest) - return fmt.Errorf("no execution history found for digest - %s", imageDigest) - } - impl.logger.Debugw("mark image scan deployed for normal app, from cd auto or manual trigger", "executionHistory", executionHistory) - var ids []int - ids = append(ids, executionHistory.Id) - - ot, err := impl.imageScanDeployInfoRepository.FetchByAppIdAndEnvId(appId, envId, []string{security.ScanObjectType_APP}) - - if err == pg.ErrNoRows && !isScanEnabled { - //ignoring if no rows are found and scan is disabled - return nil - } - - if err != nil && err != pg.ErrNoRows { - return err - } else if err == pg.ErrNoRows && isScanEnabled { - imageScanDeployInfo := &security.ImageScanDeployInfo{ - ImageScanExecutionHistoryId: ids, - ScanObjectMetaId: appId, - ObjectType: security.ScanObjectType_APP, - EnvId: envId, - ClusterId: clusterId, - AuditLog: sql.AuditLog{ - CreatedOn: time.Now(), - CreatedBy: 1, - UpdatedOn: time.Now(), - UpdatedBy: 1, - }, - } - impl.logger.Debugw("mark image scan deployed for normal app, from cd auto or manual trigger", "imageScanDeployInfo", imageScanDeployInfo) - err = impl.imageScanDeployInfoRepository.Save(imageScanDeployInfo) - if err != nil { - impl.logger.Errorw("error in creating deploy info", "err", err) - } - } else { - // Updating Execution history for Latest Deployment to fetch out security Vulnerabilities for latest deployed info - if isScanEnabled { - ot.ImageScanExecutionHistoryId = ids - } else { - arr := []int{-1} - ot.ImageScanExecutionHistoryId = arr - } - err = impl.imageScanDeployInfoRepository.Update(ot) - if err != nil { - impl.logger.Errorw("error in updating deploy info for latest deployed image", "err", err) - } - } - return err -} - -func (impl *WorkflowDagExecutorImpl) GetValuesOverrideForTrigger(overrideRequest *bean.ValuesOverrideRequest, triggeredAt time.Time, ctx context.Context) (*app.ValuesOverrideResponse, error) { - if overrideRequest.DeploymentType == models.DEPLOYMENTTYPE_UNKNOWN { - overrideRequest.DeploymentType = models.DEPLOYMENTTYPE_DEPLOY - } - if len(overrideRequest.DeploymentWithConfig) == 0 { - overrideRequest.DeploymentWithConfig = bean.DEPLOYMENT_CONFIG_TYPE_LAST_SAVED - } - valuesOverrideResponse := &app.ValuesOverrideResponse{} - - pipeline, err := impl.pipelineRepository.FindById(overrideRequest.PipelineId) - valuesOverrideResponse.Pipeline = pipeline - if err != nil { - impl.logger.Errorw("error in fetching pipeline by pipeline id", "err", err, "pipeline-id-", overrideRequest.PipelineId) - return valuesOverrideResponse, err - } - - _, span := otel.Tracer("orchestrator").Start(ctx, "ciArtifactRepository.Get") - artifact, err := impl.ciArtifactRepository.Get(overrideRequest.CiArtifactId) - valuesOverrideResponse.Artifact = artifact - span.End() - if err != nil { - return valuesOverrideResponse, err - } - overrideRequest.Image = artifact.Image - - strategy, err := impl.GetDeploymentStrategyByTriggerType(overrideRequest, ctx) - valuesOverrideResponse.PipelineStrategy = strategy - if err != nil { - impl.logger.Errorw("error in getting strategy by trigger type", "err", err) - return valuesOverrideResponse, err - } - - envOverride, err := impl.GetEnvOverrideByTriggerType(overrideRequest, triggeredAt, ctx) - valuesOverrideResponse.EnvOverride = envOverride - if err != nil { - impl.logger.Errorw("error in getting env override by trigger type", "err", err) - return valuesOverrideResponse, err - } - appMetrics, err := impl.GetAppMetricsByTriggerType(overrideRequest, ctx) - valuesOverrideResponse.AppMetrics = appMetrics - if err != nil { - impl.logger.Errorw("error in getting app metrics by trigger type", "err", err) - return valuesOverrideResponse, err - } - - _, span = otel.Tracer("orchestrator").Start(ctx, "getDbMigrationOverride") - //FIXME: how to determine rollback - //we can't depend on ciArtifact ID because CI pipeline can be manually triggered in any order regardless of sourcecode status - dbMigrationOverride, err := impl.getDbMigrationOverride(overrideRequest, artifact, false) - span.End() - if err != nil { - impl.logger.Errorw("error in fetching db migration config", "req", overrideRequest, "err", err) - return valuesOverrideResponse, err - } - chartVersion := envOverride.Chart.ChartVersion - _, span = otel.Tracer("orchestrator").Start(ctx, "getConfigMapAndSecretJsonV2") - configMapJson, err := impl.getConfigMapAndSecretJsonV2(overrideRequest.AppId, envOverride.TargetEnvironment, overrideRequest.PipelineId, chartVersion, overrideRequest.DeploymentWithConfig, overrideRequest.WfrIdForDeploymentWithSpecificTrigger) - span.End() - if err != nil { - impl.logger.Errorw("error in fetching config map n secret ", "err", err) - configMapJson = nil - } - _, span = otel.Tracer("orchestrator").Start(ctx, "appCrudOperationService.GetLabelsByAppIdForDeployment") - appLabelJsonByte, err := impl.appCrudOperationService.GetLabelsByAppIdForDeployment(overrideRequest.AppId) - span.End() - if err != nil { - impl.logger.Errorw("error in fetching app labels for gitOps commit", "err", err) - appLabelJsonByte = nil - } - _, span = otel.Tracer("orchestrator").Start(ctx, "mergeAndSave") - pipelineOverride, err := impl.savePipelineOverride(overrideRequest, envOverride.Id, triggeredAt) - valuesOverrideResponse.PipelineOverride = pipelineOverride - if err != nil { - return valuesOverrideResponse, err - } - //TODO: check status and apply lock - releaseOverrideJson, err := impl.getReleaseOverride(envOverride, overrideRequest, artifact, pipelineOverride, strategy, &appMetrics) - valuesOverrideResponse.ReleaseOverrideJSON = releaseOverrideJson - if err != nil { - return valuesOverrideResponse, err - } - mergedValues, err := impl.mergeOverrideValues(envOverride, dbMigrationOverride, releaseOverrideJson, configMapJson, appLabelJsonByte, strategy) - - appName := fmt.Sprintf("%s-%s", overrideRequest.AppName, envOverride.Environment.Name) - mergedValues = impl.autoscalingCheckBeforeTrigger(ctx, appName, envOverride.Namespace, mergedValues, overrideRequest) - - _, span = otel.Tracer("orchestrator").Start(ctx, "dockerRegistryIpsConfigService.HandleImagePullSecretOnApplicationDeployment") - // handle image pull secret if access given - mergedValues, err = impl.dockerRegistryIpsConfigService.HandleImagePullSecretOnApplicationDeployment(envOverride.Environment, pipeline.CiPipelineId, mergedValues) - valuesOverrideResponse.MergedValues = string(mergedValues) - span.End() - if err != nil { - return valuesOverrideResponse, err - } - pipelineOverride.PipelineMergedValues = string(mergedValues) - err = impl.pipelineOverrideRepository.Update(pipelineOverride) - if err != nil { - return valuesOverrideResponse, err - } - return valuesOverrideResponse, err -} - -func (impl *WorkflowDagExecutorImpl) DeployArgocdApp(overrideRequest *bean.ValuesOverrideRequest, valuesOverrideResponse *app.ValuesOverrideResponse, ctx context.Context) error { - - impl.logger.Debugw("new pipeline found", "pipeline", valuesOverrideResponse.Pipeline) - _, span := otel.Tracer("orchestrator").Start(ctx, "createArgoApplicationIfRequired") - name, err := impl.createArgoApplicationIfRequired(overrideRequest.AppId, valuesOverrideResponse.EnvOverride, valuesOverrideResponse.Pipeline, overrideRequest.UserId) - span.End() - if err != nil { - impl.logger.Errorw("acd application create error on cd trigger", "err", err, "req", overrideRequest) - return err - } - impl.logger.Debugw("argocd application created", "name", name) - - _, span = otel.Tracer("orchestrator").Start(ctx, "updateArgoPipeline") - updateAppInArgocd, err := impl.updateArgoPipeline(overrideRequest.AppId, valuesOverrideResponse.Pipeline.Name, valuesOverrideResponse.EnvOverride, ctx) - span.End() - if err != nil { - impl.logger.Errorw("error in updating argocd app ", "err", err) - return err - } - if updateAppInArgocd { - impl.logger.Debug("argo-cd successfully updated") - } else { - impl.logger.Debug("argo-cd failed to update, ignoring it") - } - return nil -} -func (impl *WorkflowDagExecutorImpl) createArgoApplicationIfRequired(appId int, envConfigOverride *chartConfig.EnvConfigOverride, pipeline *pipelineConfig.Pipeline, userId int32) (string, error) { - //repo has been registered while helm create - chart, err := impl.chartRepository.FindLatestChartForAppByAppId(appId) - if err != nil { - impl.logger.Errorw("no chart found ", "app", appId) - return "", err - } - envModel, err := impl.envRepository.FindById(envConfigOverride.TargetEnvironment) - if err != nil { - return "", err - } - argoAppName := pipeline.DeploymentAppName - if pipeline.DeploymentAppCreated { - return argoAppName, nil - } else { - //create - appNamespace := envConfigOverride.Namespace - if appNamespace == "" { - appNamespace = "default" - } - namespace := argocdServer.DevtronInstalationNs - appRequest := &argocdServer.AppTemplate{ - ApplicationName: argoAppName, - Namespace: namespace, - TargetNamespace: appNamespace, - TargetServer: envModel.Cluster.ServerUrl, - Project: "default", - ValuesFile: impl.getValuesFileForEnv(envModel.Id), - RepoPath: chart.ChartLocation, - RepoUrl: chart.GitRepoUrl, - } - - argoAppName, err := impl.argoK8sClient.CreateAcdApp(appRequest, envModel.Cluster) - if err != nil { - return "", err - } - //update cd pipeline to mark deployment app created - _, err = impl.updatePipeline(pipeline, userId) - if err != nil { - impl.logger.Errorw("error in update cd pipeline for deployment app created or not", "err", err) - return "", err - } - return argoAppName, nil - } -} - -func (impl *WorkflowDagExecutorImpl) createHelmAppForCdPipeline(overrideRequest *bean.ValuesOverrideRequest, valuesOverrideResponse *app.ValuesOverrideResponse, triggeredAt time.Time, ctx context.Context) (bool, error) { - - pipeline := valuesOverrideResponse.Pipeline - envOverride := valuesOverrideResponse.EnvOverride - mergeAndSave := valuesOverrideResponse.MergedValues - - chartMetaData := &chart.Metadata{ - Name: pipeline.App.AppName, - Version: envOverride.Chart.ChartVersion, - } - referenceTemplatePath := path.Join(string(impl.refChartDir), envOverride.Chart.ReferenceTemplate) - - if util.IsHelmApp(pipeline.DeploymentAppType) { - referenceChartByte := envOverride.Chart.ReferenceChart - // here updating reference chart into database. - if len(envOverride.Chart.ReferenceChart) == 0 { - refChartByte, err := impl.chartTemplateService.GetByteArrayRefChart(chartMetaData, referenceTemplatePath) - if err != nil { - impl.logger.Errorw("ref chart commit error on cd trigger", "err", err, "req", overrideRequest) - return false, err - } - ch := envOverride.Chart - ch.ReferenceChart = refChartByte - ch.UpdatedOn = time.Now() - ch.UpdatedBy = overrideRequest.UserId - err = impl.chartRepository.Update(ch) - if err != nil { - impl.logger.Errorw("chart update error", "err", err, "req", overrideRequest) - return false, err - } - referenceChartByte = refChartByte - } - - releaseName := pipeline.DeploymentAppName - cluster := envOverride.Environment.Cluster - bearerToken := cluster.Config[util5.BearerToken] - clusterConfig := &client2.ClusterConfig{ - ClusterName: cluster.ClusterName, - Token: bearerToken, - ApiServerUrl: cluster.ServerUrl, - InsecureSkipTLSVerify: cluster.InsecureSkipTlsVerify, - } - if cluster.InsecureSkipTlsVerify == false { - clusterConfig.KeyData = cluster.Config[util5.TlsKey] - clusterConfig.CertData = cluster.Config[util5.CertData] - clusterConfig.CaData = cluster.Config[util5.CertificateAuthorityData] - } - releaseIdentifier := &client2.ReleaseIdentifier{ - ReleaseName: releaseName, - ReleaseNamespace: envOverride.Namespace, - ClusterConfig: clusterConfig, - } - - if pipeline.DeploymentAppCreated { - req := &client2.UpgradeReleaseRequest{ - ReleaseIdentifier: releaseIdentifier, - ValuesYaml: mergeAndSave, - HistoryMax: impl.helmAppService.GetRevisionHistoryMaxValue(client2.SOURCE_DEVTRON_APP), - ChartContent: &client2.ChartContent{Content: referenceChartByte}, - } - - updateApplicationResponse, err := impl.helmAppClient.UpdateApplication(ctx, req) - - // For cases where helm release was not found but db flag for deployment app created was true - if err != nil && strings.Contains(err.Error(), "release: not found") { - - // retry install - _, err = impl.helmInstallReleaseWithCustomChart(ctx, releaseIdentifier, referenceChartByte, mergeAndSave) - - // if retry failed, return - if err != nil { - impl.logger.Errorw("release not found, failed to re-install helm application", "err", err) - return false, err - } - } else if err != nil { - impl.logger.Errorw("error in updating helm application for cd pipeline", "err", err) - return false, err - } else { - impl.logger.Debugw("updated helm application", "response", updateApplicationResponse, "isSuccess", updateApplicationResponse.Success) - } - - } else { - - helmResponse, err := impl.helmInstallReleaseWithCustomChart(ctx, releaseIdentifier, referenceChartByte, mergeAndSave) - - // For connection related errors, no need to update the db - if err != nil && strings.Contains(err.Error(), "connection error") { - impl.logger.Errorw("error in helm install custom chart", "err", err) - return false, err - } - - // IMP: update cd pipeline to mark deployment app created, even if helm install fails - // If the helm install fails, it still creates the app in failed state, so trying to - // re-create the app results in error from helm that cannot re-use name which is still in use - _, pgErr := impl.updatePipeline(pipeline, overrideRequest.UserId) - - if err != nil { - impl.logger.Errorw("error in helm install custom chart", "err", err) - - if pgErr != nil { - impl.logger.Errorw("failed to update deployment app created flag in pipeline table", "err", err) - } - return false, err - } - - if pgErr != nil { - impl.logger.Errorw("failed to update deployment app created flag in pipeline table", "err", err) - return false, err - } - - impl.logger.Debugw("received helm release response", "helmResponse", helmResponse, "isSuccess", helmResponse.Success) - } - - //update workflow runner status, used in app workflow view - cdWf, err := impl.cdWorkflowRepository.FindByWorkflowIdAndRunnerType(ctx, overrideRequest.CdWorkflowId, bean.CD_WORKFLOW_TYPE_DEPLOY) - if err != nil && err != pg.ErrNoRows { - impl.logger.Errorw("err on fetching cd workflow", "err", err) - return false, err - } - cdWorkflowId := cdWf.CdWorkflowId - if cdWf.CdWorkflowId == 0 { - cdWf := &pipelineConfig.CdWorkflow{ - CiArtifactId: overrideRequest.CiArtifactId, - PipelineId: overrideRequest.PipelineId, - AuditLog: sql.AuditLog{CreatedOn: triggeredAt, CreatedBy: overrideRequest.UserId, UpdatedOn: triggeredAt, UpdatedBy: overrideRequest.UserId}, - } - err := impl.cdWorkflowRepository.SaveWorkFlow(ctx, cdWf) - if err != nil { - impl.logger.Errorw("err on updating cd workflow for status update", "err", err) - return false, err - } - cdWorkflowId = cdWf.Id - runner := &pipelineConfig.CdWorkflowRunner{ - Id: cdWf.Id, - Name: pipeline.Name, - WorkflowType: bean.CD_WORKFLOW_TYPE_DEPLOY, - ExecutorType: pipelineConfig.WORKFLOW_EXECUTOR_TYPE_AWF, - Status: pipelineConfig.WorkflowInProgress, - TriggeredBy: overrideRequest.UserId, - StartedOn: triggeredAt, - CdWorkflowId: cdWorkflowId, - AuditLog: sql.AuditLog{CreatedOn: triggeredAt, CreatedBy: overrideRequest.UserId, UpdatedOn: triggeredAt, UpdatedBy: overrideRequest.UserId}, - } - _, err = impl.cdWorkflowRepository.SaveWorkFlowRunner(runner) - if err != nil { - impl.logger.Errorw("err on updating cd workflow runner for status update", "err", err) - return false, err - } - } else { - cdWf.Status = pipelineConfig.WorkflowInProgress - cdWf.FinishedOn = time.Now() - cdWf.UpdatedBy = overrideRequest.UserId - cdWf.UpdatedOn = time.Now() - err = impl.cdWorkflowRepository.UpdateWorkFlowRunner(&cdWf) - if err != nil { - impl.logger.Errorw("error on update cd workflow runner", "cdWf", cdWf, "err", err) - return false, err - } - } - } - return true, nil -} - -func (impl *WorkflowDagExecutorImpl) GetDeploymentStrategyByTriggerType(overrideRequest *bean.ValuesOverrideRequest, ctx context.Context) (*chartConfig.PipelineStrategy, error) { - - strategy := &chartConfig.PipelineStrategy{} - var err error - if overrideRequest.DeploymentWithConfig == bean.DEPLOYMENT_CONFIG_TYPE_SPECIFIC_TRIGGER { - _, span := otel.Tracer("orchestrator").Start(ctx, "strategyHistoryRepository.GetHistoryByPipelineIdAndWfrId") - strategyHistory, err := impl.strategyHistoryRepository.GetHistoryByPipelineIdAndWfrId(overrideRequest.PipelineId, overrideRequest.WfrIdForDeploymentWithSpecificTrigger) - span.End() - if err != nil { - impl.logger.Errorw("error in getting deployed strategy history by pipleinId and wfrId", "err", err, "pipelineId", overrideRequest.PipelineId, "wfrId", overrideRequest.WfrIdForDeploymentWithSpecificTrigger) - return nil, err - } - strategy.Strategy = strategyHistory.Strategy - strategy.Config = strategyHistory.Config - strategy.PipelineId = overrideRequest.PipelineId - } else if overrideRequest.DeploymentWithConfig == bean.DEPLOYMENT_CONFIG_TYPE_LAST_SAVED { - if overrideRequest.ForceTrigger { - _, span := otel.Tracer("orchestrator").Start(ctx, "pipelineConfigRepository.GetDefaultStrategyByPipelineId") - strategy, err = impl.pipelineConfigRepository.GetDefaultStrategyByPipelineId(overrideRequest.PipelineId) - span.End() - } else { - var deploymentTemplate chartRepoRepository.DeploymentStrategy - if overrideRequest.DeploymentTemplate == "ROLLING" { - deploymentTemplate = chartRepoRepository.DEPLOYMENT_STRATEGY_ROLLING - } else if overrideRequest.DeploymentTemplate == "BLUE-GREEN" { - deploymentTemplate = chartRepoRepository.DEPLOYMENT_STRATEGY_BLUE_GREEN - } else if overrideRequest.DeploymentTemplate == "CANARY" { - deploymentTemplate = chartRepoRepository.DEPLOYMENT_STRATEGY_CANARY - } else if overrideRequest.DeploymentTemplate == "RECREATE" { - deploymentTemplate = chartRepoRepository.DEPLOYMENT_STRATEGY_RECREATE - } - - if len(deploymentTemplate) > 0 { - _, span := otel.Tracer("orchestrator").Start(ctx, "pipelineConfigRepository.FindByStrategyAndPipelineId") - strategy, err = impl.pipelineConfigRepository.FindByStrategyAndPipelineId(deploymentTemplate, overrideRequest.PipelineId) - span.End() - } else { - _, span := otel.Tracer("orchestrator").Start(ctx, "pipelineConfigRepository.GetDefaultStrategyByPipelineId") - strategy, err = impl.pipelineConfigRepository.GetDefaultStrategyByPipelineId(overrideRequest.PipelineId) - span.End() - } - } - if err != nil && errors2.IsNotFound(err) == false { - impl.logger.Errorf("invalid state", "err", err, "req", strategy) - return nil, err - } - } - return strategy, nil -} - -func (impl *WorkflowDagExecutorImpl) GetEnvOverrideByTriggerType(overrideRequest *bean.ValuesOverrideRequest, triggeredAt time.Time, ctx context.Context) (*chartConfig.EnvConfigOverride, error) { - - envOverride := &chartConfig.EnvConfigOverride{} - - var err error - if overrideRequest.DeploymentWithConfig == bean.DEPLOYMENT_CONFIG_TYPE_SPECIFIC_TRIGGER { - _, span := otel.Tracer("orchestrator").Start(ctx, "deploymentTemplateHistoryRepository.GetHistoryByPipelineIdAndWfrId") - deploymentTemplateHistory, err := impl.deploymentTemplateHistoryRepository.GetHistoryByPipelineIdAndWfrId(overrideRequest.PipelineId, overrideRequest.WfrIdForDeploymentWithSpecificTrigger) - //VARIABLE_SNAPSHOT_GET and resolve - - span.End() - if err != nil { - impl.logger.Errorw("error in getting deployed deployment template history by pipelineId and wfrId", "err", err, "pipelineId", &overrideRequest, "wfrId", overrideRequest.WfrIdForDeploymentWithSpecificTrigger) - return nil, err - } - templateName := deploymentTemplateHistory.TemplateName - templateVersion := deploymentTemplateHistory.TemplateVersion - if templateName == "Rollout Deployment" { - templateName = "" - } - //getting chart_ref by id - _, span = otel.Tracer("orchestrator").Start(ctx, "chartRefRepository.FindByVersionAndName") - chartRef, err := impl.chartRefRepository.FindByVersionAndName(templateName, templateVersion) - span.End() - if err != nil { - impl.logger.Errorw("error in getting chartRef by version and name", "err", err, "version", templateVersion, "name", templateName) - return nil, err - } - //assuming that if a chartVersion is deployed then it's envConfigOverride will be available - _, span = otel.Tracer("orchestrator").Start(ctx, "environmentConfigRepository.GetByAppIdEnvIdAndChartRefId") - envOverride, err = impl.environmentConfigRepository.GetByAppIdEnvIdAndChartRefId(overrideRequest.AppId, overrideRequest.EnvId, chartRef.Id) - span.End() - if err != nil { - impl.logger.Errorw("error in getting envConfigOverride for pipeline for specific chartVersion", "err", err, "appId", overrideRequest.AppId, "envId", overrideRequest.EnvId, "chartRefId", chartRef.Id) - return nil, err - } - - _, span = otel.Tracer("orchestrator").Start(ctx, "envRepository.FindById") - env, err := impl.envRepository.FindById(envOverride.TargetEnvironment) - span.End() - if err != nil { - impl.logger.Errorw("unable to find env", "err", err) - return nil, err - } - envOverride.Environment = env - - //updating historical data in envConfigOverride and appMetrics flag - envOverride.IsOverride = true - envOverride.EnvOverrideValues = deploymentTemplateHistory.Template - - resolvedTemplate, variableMap, err := impl.getResolvedTemplateWithSnapshot(deploymentTemplateHistory.Id, envOverride.EnvOverrideValues) - envOverride.ResolvedEnvOverrideValues = resolvedTemplate - envOverride.VariableSnapshot = variableMap - if err != nil { - return envOverride, err - } - } else if overrideRequest.DeploymentWithConfig == bean.DEPLOYMENT_CONFIG_TYPE_LAST_SAVED { - _, span := otel.Tracer("orchestrator").Start(ctx, "environmentConfigRepository.ActiveEnvConfigOverride") - envOverride, err = impl.environmentConfigRepository.ActiveEnvConfigOverride(overrideRequest.AppId, overrideRequest.EnvId) - - var chart *chartRepoRepository.Chart - span.End() - if err != nil { - impl.logger.Errorw("invalid state", "err", err, "req", overrideRequest) - return nil, err - } - if envOverride.Id == 0 { - _, span = otel.Tracer("orchestrator").Start(ctx, "chartRepository.FindLatestChartForAppByAppId") - chart, err = impl.chartRepository.FindLatestChartForAppByAppId(overrideRequest.AppId) - span.End() - if err != nil { - impl.logger.Errorw("invalid state", "err", err, "req", overrideRequest) - return nil, err - } - _, span = otel.Tracer("orchestrator").Start(ctx, "environmentConfigRepository.FindChartByAppIdAndEnvIdAndChartRefId") - envOverride, err = impl.environmentConfigRepository.FindChartByAppIdAndEnvIdAndChartRefId(overrideRequest.AppId, overrideRequest.EnvId, chart.ChartRefId) - span.End() - if err != nil && !errors2.IsNotFound(err) { - impl.logger.Errorw("invalid state", "err", err, "req", overrideRequest) - return nil, err - } - - //creating new env override config - if errors2.IsNotFound(err) || envOverride == nil { - _, span = otel.Tracer("orchestrator").Start(ctx, "envRepository.FindById") - environment, err := impl.envRepository.FindById(overrideRequest.EnvId) - span.End() - if err != nil && !util.IsErrNoRows(err) { - return nil, err - } - envOverride = &chartConfig.EnvConfigOverride{ - Active: true, - ManualReviewed: true, - Status: models.CHARTSTATUS_SUCCESS, - TargetEnvironment: overrideRequest.EnvId, - ChartId: chart.Id, - AuditLog: sql.AuditLog{UpdatedBy: overrideRequest.UserId, UpdatedOn: triggeredAt, CreatedOn: triggeredAt, CreatedBy: overrideRequest.UserId}, - Namespace: environment.Namespace, - IsOverride: false, - EnvOverrideValues: "{}", - Latest: false, - IsBasicViewLocked: chart.IsBasicViewLocked, - CurrentViewEditor: chart.CurrentViewEditor, - } - _, span = otel.Tracer("orchestrator").Start(ctx, "environmentConfigRepository.Save") - err = impl.environmentConfigRepository.Save(envOverride) - span.End() - if err != nil { - impl.logger.Errorw("error in creating envconfig", "data", envOverride, "error", err) - return nil, err - } - } - envOverride.Chart = chart - } else if envOverride.Id > 0 && !envOverride.IsOverride { - _, span = otel.Tracer("orchestrator").Start(ctx, "chartRepository.FindLatestChartForAppByAppId") - chart, err = impl.chartRepository.FindLatestChartForAppByAppId(overrideRequest.AppId) - span.End() - if err != nil { - impl.logger.Errorw("invalid state", "err", err, "req", overrideRequest) - return nil, err - } - envOverride.Chart = chart - } - - _, span = otel.Tracer("orchestrator").Start(ctx, "envRepository.FindById") - env, err := impl.envRepository.FindById(envOverride.TargetEnvironment) - span.End() - if err != nil { - impl.logger.Errorw("unable to find env", "err", err) - return nil, err - } - envOverride.Environment = env - - //VARIABLE different cases for variable resolution - scope := resourceQualifiers.Scope{ - AppId: overrideRequest.AppId, - EnvId: overrideRequest.EnvId, - ClusterId: overrideRequest.ClusterId, - SystemMetadata: &resourceQualifiers.SystemMetadata{ - EnvironmentName: env.Name, - ClusterName: env.Cluster.ClusterName, - Namespace: env.Namespace, - AppName: overrideRequest.AppName, - Image: overrideRequest.Image, - ImageTag: util3.GetImageTagFromImage(overrideRequest.Image), - }, - } - - if envOverride.IsOverride { - - resolvedTemplate, variableMap, err := impl.extractVariablesAndResolveTemplate(scope, envOverride.EnvOverrideValues, repository5.Entity{ - EntityType: repository5.EntityTypeDeploymentTemplateEnvLevel, - EntityId: envOverride.Id, - }) - envOverride.ResolvedEnvOverrideValues = resolvedTemplate - envOverride.VariableSnapshot = variableMap - if err != nil { - return envOverride, err - } - - } else { - resolvedTemplate, variableMap, err := impl.extractVariablesAndResolveTemplate(scope, chart.GlobalOverride, repository5.Entity{ - EntityType: repository5.EntityTypeDeploymentTemplateAppLevel, - EntityId: chart.Id, - }) - envOverride.Chart.ResolvedGlobalOverride = resolvedTemplate - envOverride.VariableSnapshot = variableMap - if err != nil { - return envOverride, err - } - - } - } - - return envOverride, nil -} - -func (impl *WorkflowDagExecutorImpl) GetAppMetricsByTriggerType(overrideRequest *bean.ValuesOverrideRequest, ctx context.Context) (bool, error) { - - var appMetrics bool - if overrideRequest.DeploymentWithConfig == bean.DEPLOYMENT_CONFIG_TYPE_SPECIFIC_TRIGGER { - _, span := otel.Tracer("orchestrator").Start(ctx, "deploymentTemplateHistoryRepository.GetHistoryByPipelineIdAndWfrId") - deploymentTemplateHistory, err := impl.deploymentTemplateHistoryRepository.GetHistoryByPipelineIdAndWfrId(overrideRequest.PipelineId, overrideRequest.WfrIdForDeploymentWithSpecificTrigger) - span.End() - if err != nil { - impl.logger.Errorw("error in getting deployed deployment template history by pipelineId and wfrId", "err", err, "pipelineId", &overrideRequest, "wfrId", overrideRequest.WfrIdForDeploymentWithSpecificTrigger) - return appMetrics, err - } - appMetrics = deploymentTemplateHistory.IsAppMetricsEnabled - - } else if overrideRequest.DeploymentWithConfig == bean.DEPLOYMENT_CONFIG_TYPE_LAST_SAVED { - _, span := otel.Tracer("orchestrator").Start(ctx, "appLevelMetricsRepository.FindByAppId") - appLevelMetrics, err := impl.appLevelMetricsRepository.FindByAppId(overrideRequest.AppId) - span.End() - if err != nil && !util.IsErrNoRows(err) { - impl.logger.Errorw("err", err) - return appMetrics, &util.ApiError{InternalMessage: "unable to fetch app level metrics flag"} - } - appMetrics = appLevelMetrics.AppMetrics - - _, span = otel.Tracer("orchestrator").Start(ctx, "envLevelMetricsRepository.FindByAppIdAndEnvId") - envLevelMetrics, err := impl.envLevelMetricsRepository.FindByAppIdAndEnvId(overrideRequest.AppId, overrideRequest.EnvId) - span.End() - if err != nil && !util.IsErrNoRows(err) { - impl.logger.Errorw("err", err) - return appMetrics, &util.ApiError{InternalMessage: "unable to fetch env level metrics flag"} - } - if envLevelMetrics.Id != 0 && envLevelMetrics.AppMetrics != nil { - appMetrics = *envLevelMetrics.AppMetrics - } - } - return appMetrics, nil -} - -func (impl *WorkflowDagExecutorImpl) getDbMigrationOverride(overrideRequest *bean.ValuesOverrideRequest, artifact *repository.CiArtifact, isRollback bool) (overrideJson []byte, err error) { - if isRollback { - return nil, fmt.Errorf("rollback not supported ye") - } - notConfigured := false - config, err := impl.dbMigrationConfigRepository.FindByPipelineId(overrideRequest.PipelineId) - if err != nil && !util.IsErrNoRows(err) { - impl.logger.Errorw("error in fetching pipeline override config", "req", overrideRequest, "err", err) - return nil, err - } else if util.IsErrNoRows(err) { - notConfigured = true - } - envVal := &EnvironmentOverride{} - if notConfigured { - impl.logger.Warnw("no active db migration found", "pipeline", overrideRequest.PipelineId) - envVal.Enabled = false - } else { - materialInfos, err := artifact.ParseMaterialInfo() - if err != nil { - return nil, err - } - - hash, ok := materialInfos[config.GitMaterial.Url] - if !ok { - impl.logger.Errorf("wrong url map ", "map", materialInfos, "url", config.GitMaterial.Url) - return nil, fmt.Errorf("configured url not found in material %s", config.GitMaterial.Url) - } - - envVal.Enabled = true - if config.GitMaterial.GitProvider.AuthMode != repository.AUTH_MODE_USERNAME_PASSWORD && - config.GitMaterial.GitProvider.AuthMode != repository.AUTH_MODE_ACCESS_TOKEN && - config.GitMaterial.GitProvider.AuthMode != repository.AUTH_MODE_ANONYMOUS { - return nil, fmt.Errorf("auth mode %s not supported for migration", config.GitMaterial.GitProvider.AuthMode) - } - envVal.appendEnvironmentVariable("GIT_REPO_URL", config.GitMaterial.Url) - envVal.appendEnvironmentVariable("GIT_USER", config.GitMaterial.GitProvider.UserName) - var password string - if config.GitMaterial.GitProvider.AuthMode == repository.AUTH_MODE_USERNAME_PASSWORD { - password = config.GitMaterial.GitProvider.Password - } else { - password = config.GitMaterial.GitProvider.AccessToken - } - envVal.appendEnvironmentVariable("GIT_AUTH_TOKEN", password) - // parse git-tag not required - //envVal.appendEnvironmentVariable("GIT_TAG", "") - envVal.appendEnvironmentVariable("GIT_HASH", hash) - envVal.appendEnvironmentVariable("SCRIPT_LOCATION", config.ScriptSource) - envVal.appendEnvironmentVariable("DB_TYPE", string(config.DbConfig.Type)) - envVal.appendEnvironmentVariable("DB_USER_NAME", config.DbConfig.UserName) - envVal.appendEnvironmentVariable("DB_PASSWORD", config.DbConfig.Password) - envVal.appendEnvironmentVariable("DB_HOST", config.DbConfig.Host) - envVal.appendEnvironmentVariable("DB_PORT", config.DbConfig.Port) - envVal.appendEnvironmentVariable("DB_NAME", config.DbConfig.DbName) - //Will be used for rollback don't delete it - //envVal.appendEnvironmentVariable("MIGRATE_TO_VERSION", strconv.Itoa(overrideRequest.TargetDbVersion)) - } - dbMigrationConfig := map[string]interface{}{"dbMigrationConfig": envVal} - confByte, err := json.Marshal(dbMigrationConfig) - if err != nil { - return nil, err - } - return confByte, nil -} - -func (impl *WorkflowDagExecutorImpl) getConfigMapAndSecretJsonV2(appId int, envId int, pipelineId int, chartVersion string, deploymentWithConfig bean.DeploymentConfigurationType, wfrIdForDeploymentWithSpecificTrigger int) ([]byte, error) { - - var configMapJson string - var secretDataJson string - var configMapJsonApp string - var secretDataJsonApp string - var configMapJsonEnv string - var secretDataJsonEnv string - var err error - //var configMapJsonPipeline string - //var secretDataJsonPipeline string - - merged := []byte("{}") - if deploymentWithConfig == bean.DEPLOYMENT_CONFIG_TYPE_LAST_SAVED { - configMapA, err := impl.configMapRepository.GetByAppIdAppLevel(appId) - if err != nil && pg.ErrNoRows != err { - return []byte("{}"), err - } - if configMapA != nil && configMapA.Id > 0 { - configMapJsonApp = configMapA.ConfigMapData - secretDataJsonApp = configMapA.SecretData - } - configMapE, err := impl.configMapRepository.GetByAppIdAndEnvIdEnvLevel(appId, envId) - if err != nil && pg.ErrNoRows != err { - return []byte("{}"), err - } - if configMapE != nil && configMapE.Id > 0 { - configMapJsonEnv = configMapE.ConfigMapData - secretDataJsonEnv = configMapE.SecretData - } - } else if deploymentWithConfig == bean.DEPLOYMENT_CONFIG_TYPE_SPECIFIC_TRIGGER { - //fetching history and setting envLevelConfig and not appLevelConfig because history already contains merged appLevel and envLevel configs - configMapHistory, err := impl.configMapHistoryRepository.GetHistoryByPipelineIdAndWfrId(pipelineId, wfrIdForDeploymentWithSpecificTrigger, repository3.CONFIGMAP_TYPE) - if err != nil { - impl.logger.Errorw("error in getting config map history config by pipelineId and wfrId ", "err", err, "pipelineId", pipelineId, "wfrid", wfrIdForDeploymentWithSpecificTrigger) - return []byte("{}"), err - } - configMapJsonEnv = configMapHistory.Data - secretHistory, err := impl.configMapHistoryRepository.GetHistoryByPipelineIdAndWfrId(pipelineId, wfrIdForDeploymentWithSpecificTrigger, repository3.SECRET_TYPE) - if err != nil { - impl.logger.Errorw("error in getting config map history config by pipelineId and wfrId ", "err", err, "pipelineId", pipelineId, "wfrid", wfrIdForDeploymentWithSpecificTrigger) - return []byte("{}"), err - } - secretDataJsonEnv = secretHistory.Data - } - configMapJson, err = impl.mergeUtil.ConfigMapMerge(configMapJsonApp, configMapJsonEnv) - if err != nil { - return []byte("{}"), err - } - chartMajorVersion, chartMinorVersion, err := util4.ExtractChartVersion(chartVersion) - if err != nil { - impl.logger.Errorw("chart version parsing", "err", err) - return []byte("{}"), err - } - secretDataJson, err = impl.mergeUtil.ConfigSecretMerge(secretDataJsonApp, secretDataJsonEnv, chartMajorVersion, chartMinorVersion, false) - if err != nil { - return []byte("{}"), err - } - configResponseR := bean.ConfigMapRootJson{} - configResponse := bean.ConfigMapJson{} - if configMapJson != "" { - err = json.Unmarshal([]byte(configMapJson), &configResponse) - if err != nil { - return []byte("{}"), err - } - } - configResponseR.ConfigMapJson = configResponse - secretResponseR := bean.ConfigSecretRootJson{} - secretResponse := bean.ConfigSecretJson{} - if configMapJson != "" { - err = json.Unmarshal([]byte(secretDataJson), &secretResponse) - if err != nil { - return []byte("{}"), err - } - } - secretResponseR.ConfigSecretJson = secretResponse - - configMapByte, err := json.Marshal(configResponseR) - if err != nil { - return []byte("{}"), err - } - secretDataByte, err := json.Marshal(secretResponseR) - if err != nil { - return []byte("{}"), err - } - - merged, err = impl.mergeUtil.JsonPatch(configMapByte, secretDataByte) - if err != nil { - return []byte("{}"), err - } - return merged, nil -} - -func (impl *WorkflowDagExecutorImpl) savePipelineOverride(overrideRequest *bean.ValuesOverrideRequest, envOverrideId int, triggeredAt time.Time) (override *chartConfig.PipelineOverride, err error) { - currentReleaseNo, err := impl.pipelineOverrideRepository.GetCurrentPipelineReleaseCounter(overrideRequest.PipelineId) - if err != nil { - return nil, err - } - po := &chartConfig.PipelineOverride{ - EnvConfigOverrideId: envOverrideId, - Status: models.CHARTSTATUS_NEW, - PipelineId: overrideRequest.PipelineId, - CiArtifactId: overrideRequest.CiArtifactId, - PipelineReleaseCounter: currentReleaseNo + 1, - CdWorkflowId: overrideRequest.CdWorkflowId, - AuditLog: sql.AuditLog{CreatedBy: overrideRequest.UserId, CreatedOn: triggeredAt, UpdatedOn: triggeredAt, UpdatedBy: overrideRequest.UserId}, - DeploymentType: overrideRequest.DeploymentType, - } - - err = impl.pipelineOverrideRepository.Save(po) - if err != nil { - return nil, err - } - err = impl.checkAndFixDuplicateReleaseNo(po) - if err != nil { - impl.logger.Errorw("error in checking release no duplicacy", "pipeline", po, "err", err) - return nil, err - } - return po, nil -} - -func (impl *WorkflowDagExecutorImpl) getReleaseOverride(envOverride *chartConfig.EnvConfigOverride, overrideRequest *bean.ValuesOverrideRequest, artifact *repository.CiArtifact, pipelineOverride *chartConfig.PipelineOverride, strategy *chartConfig.PipelineStrategy, appMetrics *bool) (releaseOverride string, err error) { - - artifactImage := artifact.Image - imageTag := strings.Split(artifactImage, ":") - - imageTagLen := len(imageTag) - - imageName := "" - - for i := 0; i < imageTagLen-1; i++ { - if i != imageTagLen-2 { - imageName = imageName + imageTag[i] + ":" - } else { - imageName = imageName + imageTag[i] - } - } - - appId := strconv.Itoa(overrideRequest.AppId) - envId := strconv.Itoa(overrideRequest.EnvId) - - deploymentStrategy := "" - if strategy != nil { - deploymentStrategy = string(strategy.Strategy) - } - releaseAttribute := app.ReleaseAttributes{ - Name: imageName, - Tag: imageTag[imageTagLen-1], - PipelineName: overrideRequest.PipelineName, - ReleaseVersion: strconv.Itoa(pipelineOverride.PipelineReleaseCounter), - DeploymentType: deploymentStrategy, - App: appId, - Env: envId, - AppMetrics: appMetrics, - } - override, err := util4.Tprintf(envOverride.Chart.ImageDescriptorTemplate, releaseAttribute) - if err != nil { - return "", &util.ApiError{InternalMessage: "unable to render ImageDescriptorTemplate"} - } - if overrideRequest.AdditionalOverride != nil { - userOverride, err := overrideRequest.AdditionalOverride.MarshalJSON() - if err != nil { - return "", err - } - data, err := impl.mergeUtil.JsonPatch(userOverride, []byte(override)) - if err != nil { - return "", err - } - override = string(data) - } - return override, nil -} - -func (impl *WorkflowDagExecutorImpl) mergeAndSave(envOverride *chartConfig.EnvConfigOverride, - overrideRequest *bean.ValuesOverrideRequest, - dbMigrationOverride []byte, - artifact *repository.CiArtifact, - pipeline *pipelineConfig.Pipeline, configMapJson, appLabelJsonByte []byte, strategy *chartConfig.PipelineStrategy, ctx context.Context, - triggeredAt time.Time, deployedBy int32, appMetrics *bool) (releaseId int, overrideId int, mergedValues string, err error) { - - //register release , obtain release id TODO: populate releaseId to template - override, err := impl.savePipelineOverride(overrideRequest, envOverride.Id, triggeredAt) - if err != nil { - return 0, 0, "", err - } - //TODO: check status and apply lock - overrideJson, err := impl.getReleaseOverride(envOverride, overrideRequest, artifact, override, strategy, appMetrics) - if err != nil { - return 0, 0, "", err - } - - //merge three values on the fly - //ordering is important here - //global < environment < db< release - var merged []byte - if !envOverride.IsOverride { - merged, err = impl.mergeUtil.JsonPatch([]byte("{}"), []byte(envOverride.Chart.GlobalOverride)) - if err != nil { - return 0, 0, "", err - } - } else { - merged, err = impl.mergeUtil.JsonPatch([]byte("{}"), []byte(envOverride.EnvOverrideValues)) - if err != nil { - return 0, 0, "", err - } - } - - //pipeline override here comes from pipeline strategy table - if strategy != nil && len(strategy.Config) > 0 { - merged, err = impl.mergeUtil.JsonPatch(merged, []byte(strategy.Config)) - if err != nil { - return 0, 0, "", err - } - } - merged, err = impl.mergeUtil.JsonPatch(merged, dbMigrationOverride) - if err != nil { - return 0, 0, "", err - } - merged, err = impl.mergeUtil.JsonPatch(merged, []byte(overrideJson)) - if err != nil { - return 0, 0, "", err - } - - if configMapJson != nil { - merged, err = impl.mergeUtil.JsonPatch(merged, configMapJson) - if err != nil { - return 0, 0, "", err - } - } - - if appLabelJsonByte != nil { - merged, err = impl.mergeUtil.JsonPatch(merged, appLabelJsonByte) - if err != nil { - return 0, 0, "", err - } - } - - appName := fmt.Sprintf("%s-%s", pipeline.App.AppName, envOverride.Environment.Name) - merged = impl.autoscalingCheckBeforeTrigger(ctx, appName, envOverride.Namespace, merged, overrideRequest) - - _, span := otel.Tracer("orchestrator").Start(ctx, "dockerRegistryIpsConfigService.HandleImagePullSecretOnApplicationDeployment") - // handle image pull secret if access given - merged, err = impl.dockerRegistryIpsConfigService.HandleImagePullSecretOnApplicationDeployment(envOverride.Environment, pipeline.CiPipelineId, merged) - span.End() - if err != nil { - return 0, 0, "", err - } - - commitHash := "" - commitTime := time.Time{} - if util.IsAcdApp(pipeline.DeploymentAppType) { - chartRepoName := impl.chartTemplateService.GetGitOpsRepoNameFromUrl(envOverride.Chart.GitRepoUrl) - _, span = otel.Tracer("orchestrator").Start(ctx, "chartTemplateService.GetUserEmailIdAndNameForGitOpsCommit") - //getting username & emailId for commit author data - userEmailId, userName := impl.chartTemplateService.GetUserEmailIdAndNameForGitOpsCommit(overrideRequest.UserId) - span.End() - chartGitAttr := &util.ChartConfig{ - FileName: fmt.Sprintf("_%d-values.yaml", envOverride.TargetEnvironment), - FileContent: string(merged), - ChartName: envOverride.Chart.ChartName, - ChartLocation: envOverride.Chart.ChartLocation, - ChartRepoName: chartRepoName, - ReleaseMessage: fmt.Sprintf("release-%d-env-%d ", override.Id, envOverride.TargetEnvironment), - UserName: userName, - UserEmailId: userEmailId, - } - gitOpsConfigBitbucket, err := impl.gitOpsConfigRepository.GetGitOpsConfigByProvider(util.BITBUCKET_PROVIDER) - if err != nil { - if err == pg.ErrNoRows { - gitOpsConfigBitbucket.BitBucketWorkspaceId = "" - } else { - return 0, 0, "", err - } - } - gitOpsConfig := &bean.GitOpsConfigDto{BitBucketWorkspaceId: gitOpsConfigBitbucket.BitBucketWorkspaceId} - _, span = otel.Tracer("orchestrator").Start(ctx, "gitFactory.Client.CommitValues") - commitHash, commitTime, err = impl.gitFactory.Client.CommitValues(chartGitAttr, gitOpsConfig) - span.End() - if err != nil { - impl.logger.Errorw("error in git commit", "err", err) - return 0, 0, "", err - } - } - if commitTime.IsZero() { - commitTime = time.Now() - } - pipelineOverride := &chartConfig.PipelineOverride{ - Id: override.Id, - GitHash: commitHash, - CommitTime: commitTime, - EnvConfigOverrideId: envOverride.Id, - PipelineOverrideValues: overrideJson, - PipelineId: overrideRequest.PipelineId, - CiArtifactId: overrideRequest.CiArtifactId, - PipelineMergedValues: string(merged), - AuditLog: sql.AuditLog{UpdatedOn: triggeredAt, UpdatedBy: deployedBy}, - } - _, span = otel.Tracer("orchestrator").Start(ctx, "pipelineOverrideRepository.Update") - err = impl.pipelineOverrideRepository.Update(pipelineOverride) - span.End() - if err != nil { - return 0, 0, "", err - } - mergedValues = string(merged) - return override.PipelineReleaseCounter, override.Id, mergedValues, nil -} - -func (impl *WorkflowDagExecutorImpl) mergeOverrideValues(envOverride *chartConfig.EnvConfigOverride, - dbMigrationOverride []byte, - releaseOverrideJson string, - configMapJson []byte, - appLabelJsonByte []byte, - strategy *chartConfig.PipelineStrategy, -) (mergedValues []byte, err error) { - - //merge three values on the fly - //ordering is important here - //global < environment < db< release - var merged []byte - if !envOverride.IsOverride { - merged, err = impl.mergeUtil.JsonPatch([]byte("{}"), []byte(envOverride.Chart.ResolvedGlobalOverride)) - if err != nil { - return nil, err - } - } else { - merged, err = impl.mergeUtil.JsonPatch([]byte("{}"), []byte(envOverride.ResolvedEnvOverrideValues)) - if err != nil { - return nil, err - } - } - if strategy != nil && len(strategy.Config) > 0 { - merged, err = impl.mergeUtil.JsonPatch(merged, []byte(strategy.Config)) - if err != nil { - return nil, err - } - } - merged, err = impl.mergeUtil.JsonPatch(merged, dbMigrationOverride) - if err != nil { - return nil, err - } - merged, err = impl.mergeUtil.JsonPatch(merged, []byte(releaseOverrideJson)) - if err != nil { - return nil, err - } - if configMapJson != nil { - merged, err = impl.mergeUtil.JsonPatch(merged, configMapJson) - if err != nil { - return nil, err - } - } - if appLabelJsonByte != nil { - merged, err = impl.mergeUtil.JsonPatch(merged, appLabelJsonByte) - if err != nil { - return nil, err - } - } - return merged, nil -} - -func (impl *WorkflowDagExecutorImpl) autoscalingCheckBeforeTrigger(ctx context.Context, appName string, namespace string, merged []byte, overrideRequest *bean.ValuesOverrideRequest) []byte { - //pipeline := overrideRequest.Pipeline - var appId = overrideRequest.AppId - pipelineId := overrideRequest.PipelineId - var appDeploymentType = overrideRequest.DeploymentAppType - var clusterId = overrideRequest.ClusterId - deploymentType := overrideRequest.DeploymentType - templateMap := make(map[string]interface{}) - err := json.Unmarshal(merged, &templateMap) - if err != nil { - return merged - } - - hpaResourceRequest := impl.getAutoScalingReplicaCount(templateMap, appName) - impl.logger.Debugw("autoscalingCheckBeforeTrigger", "hpaResourceRequest", hpaResourceRequest) - if hpaResourceRequest.IsEnable { - resourceManifest := make(map[string]interface{}) - if util.IsAcdApp(appDeploymentType) { - query := &application.ApplicationResourceRequest{ - Name: &appName, - Version: &hpaResourceRequest.Version, - Group: &hpaResourceRequest.Group, - Kind: &hpaResourceRequest.Kind, - ResourceName: &hpaResourceRequest.ResourceName, - Namespace: &namespace, - } - recv, err := impl.acdClient.GetResource(ctx, query) - impl.logger.Debugw("resource manifest get replica count", "response", recv) - if err != nil { - impl.logger.Errorw("ACD Get Resource API Failed", "err", err) - middleware.AcdGetResourceCounter.WithLabelValues(strconv.Itoa(appId), namespace, appName).Inc() - return merged - } - if recv != nil && len(*recv.Manifest) > 0 { - err := json.Unmarshal([]byte(*recv.Manifest), &resourceManifest) - if err != nil { - impl.logger.Errorw("unmarshal failed for hpa check", "err", err) - return merged - } - } - } else { - version := "v2beta2" - k8sResource, err := impl.k8sCommonService.GetResource(ctx, &k8s.ResourceRequestBean{ClusterId: clusterId, - K8sRequest: &util5.K8sRequestBean{ResourceIdentifier: util5.ResourceIdentifier{Name: hpaResourceRequest.ResourceName, - Namespace: namespace, GroupVersionKind: schema.GroupVersionKind{Group: hpaResourceRequest.Group, Kind: hpaResourceRequest.Kind, Version: version}}}}) - if err != nil { - impl.logger.Errorw("error occurred while fetching resource for app", "resourceName", hpaResourceRequest.ResourceName, "err", err) - return merged - } - resourceManifest = k8sResource.Manifest.Object - } - if len(resourceManifest) > 0 { - statusMap := resourceManifest["status"].(map[string]interface{}) - currentReplicaVal := statusMap["currentReplicas"] - currentReplicaCount, err := util4.ParseFloatNumber(currentReplicaVal) - if err != nil { - impl.logger.Errorw("error occurred while parsing replica count", "currentReplicas", currentReplicaVal, "err", err) - return merged - } - - reqReplicaCount := impl.fetchRequiredReplicaCount(currentReplicaCount, hpaResourceRequest.ReqMaxReplicas, hpaResourceRequest.ReqMinReplicas) - templateMap["replicaCount"] = reqReplicaCount - merged, err = json.Marshal(&templateMap) - if err != nil { - impl.logger.Errorw("marshaling failed for hpa check", "err", err) - return merged - } - } - } else { - impl.logger.Errorw("autoscaling is not enabled", "pipelineId", pipelineId) - } - - //check for custom chart support - if autoscalingEnabledPath, ok := templateMap[bean2.CustomAutoScalingEnabledPathKey]; ok { - if deploymentType == models.DEPLOYMENTTYPE_STOP { - merged, err = impl.setScalingValues(templateMap, bean2.CustomAutoScalingEnabledPathKey, merged, false) - if err != nil { - return merged - } - merged, err = impl.setScalingValues(templateMap, bean2.CustomAutoscalingReplicaCountPathKey, merged, 0) - if err != nil { - return merged - } - } else { - autoscalingEnabled := false - autoscalingEnabledValue := gjson.Get(string(merged), autoscalingEnabledPath.(string)).Value() - if val, ok := autoscalingEnabledValue.(bool); ok { - autoscalingEnabled = val - } - if autoscalingEnabled { - // extract replica count, min, max and check for required value - replicaCount, err := impl.getReplicaCountFromCustomChart(templateMap, merged) - if err != nil { - return merged - } - merged, err = impl.setScalingValues(templateMap, bean2.CustomAutoscalingReplicaCountPathKey, merged, replicaCount) - if err != nil { - return merged - } - } - } - } - - return merged -} - -func (impl *WorkflowDagExecutorImpl) updateArgoPipeline(appId int, pipelineName string, envOverride *chartConfig.EnvConfigOverride, ctx context.Context) (bool, error) { - //repo has been registered while helm create - if ctx == nil { - impl.logger.Errorw("err in syncing ACD, ctx is NULL", "pipelineName", pipelineName) - return false, nil - } - app, err := impl.appRepository.FindById(appId) - if err != nil { - impl.logger.Errorw("no app found ", "err", err) - return false, err - } - envModel, err := impl.envRepository.FindById(envOverride.TargetEnvironment) - if err != nil { - return false, err - } - argoAppName := fmt.Sprintf("%s-%s", app.AppName, envModel.Name) - impl.logger.Infow("received payload, updateArgoPipeline", "appId", appId, "pipelineName", pipelineName, "envId", envOverride.TargetEnvironment, "argoAppName", argoAppName, "context", ctx) - application3, err := impl.acdClient.Get(ctx, &application.ApplicationQuery{Name: &argoAppName}) - if err != nil { - impl.logger.Errorw("no argo app exists", "app", argoAppName, "pipeline", pipelineName) - return false, err - } - //if status, ok:=status.FromError(err);ok{ - appStatus, _ := status2.FromError(err) - - if appStatus.Code() == codes.OK { - impl.logger.Debugw("argo app exists", "app", argoAppName, "pipeline", pipelineName) - if application3.Spec.Source.Path != envOverride.Chart.ChartLocation || application3.Spec.Source.TargetRevision != "master" { - patchReq := v1alpha1.Application{Spec: v1alpha1.ApplicationSpec{Source: v1alpha1.ApplicationSource{Path: envOverride.Chart.ChartLocation, RepoURL: envOverride.Chart.GitRepoUrl, TargetRevision: "master"}}} - reqbyte, err := json.Marshal(patchReq) - if err != nil { - impl.logger.Errorw("error in creating patch", "err", err) - } - reqString := string(reqbyte) - patchType := "merge" - _, err = impl.acdClient.Patch(ctx, &application.ApplicationPatchRequest{Patch: &reqString, Name: &argoAppName, PatchType: &patchType}) - if err != nil { - impl.logger.Errorw("error in creating argo pipeline ", "name", pipelineName, "patch", string(reqbyte), "err", err) - return false, err - } - impl.logger.Debugw("pipeline update req ", "res", patchReq) - } else { - impl.logger.Debug("pipeline no need to update ") - } - // Doing normal refresh to avoid the sync delay in argo-cd. - err2 := impl.argoClientWrapperService.GetArgoAppWithNormalRefresh(ctx, argoAppName) - if err2 != nil { - impl.logger.Errorw("error in getting argo application with normal refresh", "argoAppName", argoAppName, "pipelineName", pipelineName) - } - return true, nil - } else if appStatus.Code() == codes.NotFound { - impl.logger.Errorw("argo app not found", "app", argoAppName, "pipeline", pipelineName) - return false, nil - } else { - impl.logger.Errorw("err in checking application on gocd", "err", err, "pipeline", pipelineName) - return false, err - } -} - -func (impl *WorkflowDagExecutorImpl) getValuesFileForEnv(environmentId int) string { - return fmt.Sprintf("_%d-values.yaml", environmentId) //-{envId}-values.yaml -} - -func (impl *WorkflowDagExecutorImpl) updatePipeline(pipeline *pipelineConfig.Pipeline, userId int32) (bool, error) { - err := impl.pipelineRepository.SetDeploymentAppCreatedInPipeline(true, pipeline.Id, userId) - if err != nil { - impl.logger.Errorw("error on updating cd pipeline for setting deployment app created", "err", err) - return false, err - } - return true, nil -} - -// helmInstallReleaseWithCustomChart performs helm install with custom chart -func (impl *WorkflowDagExecutorImpl) helmInstallReleaseWithCustomChart(ctx context.Context, releaseIdentifier *client2.ReleaseIdentifier, referenceChartByte []byte, valuesYaml string) (*client2.HelmInstallCustomResponse, error) { - - helmInstallRequest := client2.HelmInstallCustomRequest{ - ValuesYaml: valuesYaml, - ChartContent: &client2.ChartContent{Content: referenceChartByte}, - ReleaseIdentifier: releaseIdentifier, - } - - // Request exec - return impl.helmAppClient.InstallReleaseWithCustomChart(ctx, &helmInstallRequest) -} - -func (impl *WorkflowDagExecutorImpl) getResolvedTemplateWithSnapshot(deploymentTemplateHistoryId int, template string) (string, map[string]string, error) { - - variableSnapshotMap := make(map[string]string) - reference := repository5.HistoryReference{ - HistoryReferenceId: deploymentTemplateHistoryId, - HistoryReferenceType: repository5.HistoryReferenceTypeDeploymentTemplate, - } - variableSnapshot, err := impl.variableSnapshotHistoryService.GetVariableHistoryForReferences([]repository5.HistoryReference{reference}) - if err != nil { - return template, variableSnapshotMap, err - } - - if _, ok := variableSnapshot[reference]; !ok { - return template, variableSnapshotMap, nil - } - - err = json.Unmarshal(variableSnapshot[reference].VariableSnapshot, &variableSnapshotMap) - if err != nil { - return template, variableSnapshotMap, err - } - - if len(variableSnapshotMap) == 0 { - return template, variableSnapshotMap, nil - } - scopedVariableData := parsers.GetScopedVarData(variableSnapshotMap, make(map[string]bool), true) - request := parsers.VariableParserRequest{Template: template, TemplateType: parsers.JsonVariableTemplate, Variables: scopedVariableData} - parserResponse := impl.variableTemplateParser.ParseTemplate(request) - err = parserResponse.Error - if err != nil { - return template, variableSnapshotMap, err - } - resolvedTemplate := parserResponse.ResolvedTemplate - return resolvedTemplate, variableSnapshotMap, nil -} - -func (impl *WorkflowDagExecutorImpl) extractVariablesAndResolveTemplate(scope resourceQualifiers.Scope, template string, entity repository5.Entity) (string, map[string]string, error) { - - variableMap := make(map[string]string) - entityToVariables, err := impl.variableEntityMappingService.GetAllMappingsForEntities([]repository5.Entity{entity}) - if err != nil { - return template, variableMap, err - } - - if vars, ok := entityToVariables[entity]; !ok || len(vars) == 0 { - return template, variableMap, nil - } - - // pre-populating variable map with variable so that the variables which don't have any resolved data - // is saved in snapshot - for _, variable := range entityToVariables[entity] { - variableMap[variable] = impl.scopedVariableService.GetFormattedVariableForName(variable) - } - - scopedVariables, err := impl.scopedVariableService.GetScopedVariables(scope, entityToVariables[entity], true) - if err != nil { - return template, variableMap, err - } - - for _, variable := range scopedVariables { - variableMap[variable.VariableName] = variable.VariableValue.StringValue() - } - - parserRequest := parsers.VariableParserRequest{Template: template, Variables: scopedVariables, TemplateType: parsers.JsonVariableTemplate} - parserResponse := impl.variableTemplateParser.ParseTemplate(parserRequest) - err = parserResponse.Error - if err != nil { - return template, variableMap, err - } - - resolvedTemplate := parserResponse.ResolvedTemplate - return resolvedTemplate, variableMap, nil -} - -type EnvironmentOverride struct { - Enabled bool `json:"enabled"` - EnvValues []*KeyValue `json:"envValues"` -} - -type KeyValue struct { - Key string `json:"key"` - Value string `json:"value"` -} - -func (conf *EnvironmentOverride) appendEnvironmentVariable(key, value string) { - item := &KeyValue{Key: key, Value: value} - conf.EnvValues = append(conf.EnvValues, item) -} - -func (impl *WorkflowDagExecutorImpl) checkAndFixDuplicateReleaseNo(override *chartConfig.PipelineOverride) error { - - uniqueVerified := false - retryCount := 0 - - for !uniqueVerified && retryCount < 5 { - retryCount = retryCount + 1 - overrides, err := impl.pipelineOverrideRepository.GetByPipelineIdAndReleaseNo(override.PipelineId, override.PipelineReleaseCounter) - if err != nil { - return err - } - if overrides[0].Id == override.Id { - uniqueVerified = true - } else { - //duplicate might be due to concurrency, lets fix it - currentReleaseNo, err := impl.pipelineOverrideRepository.GetCurrentPipelineReleaseCounter(override.PipelineId) - if err != nil { - return err - } - override.PipelineReleaseCounter = currentReleaseNo + 1 - err = impl.pipelineOverrideRepository.Save(override) - if err != nil { - return err - } - } - } - if !uniqueVerified { - return fmt.Errorf("duplicate verification retry count exide max overrideId: %d ,count: %d", override.Id, retryCount) - } - return nil -} - -func (impl *WorkflowDagExecutorImpl) getAutoScalingReplicaCount(templateMap map[string]interface{}, appName string) *util4.HpaResourceRequest { - hasOverride := false - if _, ok := templateMap[fullnameOverride]; ok { - appNameOverride := templateMap[fullnameOverride].(string) - if len(appNameOverride) > 0 { - appName = appNameOverride - hasOverride = true - } - } - if !hasOverride { - if _, ok := templateMap[nameOverride]; ok { - nameOverride := templateMap[nameOverride].(string) - if len(nameOverride) > 0 { - appName = fmt.Sprintf("%s-%s", appName, nameOverride) - } - } - } - hpaResourceRequest := &util4.HpaResourceRequest{} - hpaResourceRequest.Version = "" - hpaResourceRequest.Group = autoscaling.ServiceName - hpaResourceRequest.Kind = horizontalPodAutoscaler - impl.logger.Infow("getAutoScalingReplicaCount", "hpaResourceRequest", hpaResourceRequest) - if _, ok := templateMap[kedaAutoscaling]; ok { - as := templateMap[kedaAutoscaling] - asd := as.(map[string]interface{}) - if _, ok := asd[enabled]; ok { - impl.logger.Infow("getAutoScalingReplicaCount", "hpaResourceRequest", hpaResourceRequest) - enable := asd[enabled].(bool) - if enable { - hpaResourceRequest.IsEnable = enable - hpaResourceRequest.ReqReplicaCount = templateMap[replicaCount].(float64) - hpaResourceRequest.ReqMaxReplicas = asd["maxReplicaCount"].(float64) - hpaResourceRequest.ReqMinReplicas = asd["minReplicaCount"].(float64) - hpaResourceRequest.ResourceName = fmt.Sprintf("%s-%s-%s", "keda-hpa", appName, "keda") - impl.logger.Infow("getAutoScalingReplicaCount", "hpaResourceRequest", hpaResourceRequest) - return hpaResourceRequest - } - } - } - - if _, ok := templateMap[autoscaling.ServiceName]; ok { - as := templateMap[autoscaling.ServiceName] - asd := as.(map[string]interface{}) - if _, ok := asd[enabled]; ok { - enable := asd[enabled].(bool) - if enable { - hpaResourceRequest.IsEnable = asd[enabled].(bool) - hpaResourceRequest.ReqReplicaCount = templateMap[replicaCount].(float64) - hpaResourceRequest.ReqMaxReplicas = asd["MaxReplicas"].(float64) - hpaResourceRequest.ReqMinReplicas = asd["MinReplicas"].(float64) - hpaResourceRequest.ResourceName = fmt.Sprintf("%s-%s", appName, "hpa") - return hpaResourceRequest - } - } - } - return hpaResourceRequest - -} - -func (impl *WorkflowDagExecutorImpl) fetchRequiredReplicaCount(currentReplicaCount float64, reqMaxReplicas float64, reqMinReplicas float64) float64 { - var reqReplicaCount float64 - if currentReplicaCount <= reqMaxReplicas && currentReplicaCount >= reqMinReplicas { - reqReplicaCount = currentReplicaCount - } else if currentReplicaCount > reqMaxReplicas { - reqReplicaCount = reqMaxReplicas - } else if currentReplicaCount < reqMinReplicas { - reqReplicaCount = reqMinReplicas - } - return reqReplicaCount -} - -func (impl *WorkflowDagExecutorImpl) getReplicaCountFromCustomChart(templateMap map[string]interface{}, merged []byte) (float64, error) { - autoscalingMinVal, err := impl.extractParamValue(templateMap, bean2.CustomAutoscalingMinPathKey, merged) - if err != nil { - return 0, err - } - autoscalingMaxVal, err := impl.extractParamValue(templateMap, bean2.CustomAutoscalingMaxPathKey, merged) - if err != nil { - return 0, err - } - autoscalingReplicaCountVal, err := impl.extractParamValue(templateMap, bean2.CustomAutoscalingReplicaCountPathKey, merged) - if err != nil { - return 0, err - } - return impl.fetchRequiredReplicaCount(autoscalingReplicaCountVal, autoscalingMaxVal, autoscalingMinVal), nil -} - -func (impl *WorkflowDagExecutorImpl) setScalingValues(templateMap map[string]interface{}, customScalingKey string, merged []byte, value interface{}) ([]byte, error) { - autoscalingJsonPath := templateMap[customScalingKey] - autoscalingJsonPathKey := autoscalingJsonPath.(string) - mergedRes, err := sjson.Set(string(merged), autoscalingJsonPathKey, value) - if err != nil { - impl.logger.Errorw("error occurred while setting autoscaling key", "JsonPathKey", autoscalingJsonPathKey, "err", err) - return []byte{}, err - } - return []byte(mergedRes), nil -} - -func (impl *WorkflowDagExecutorImpl) extractParamValue(inputMap map[string]interface{}, key string, merged []byte) (float64, error) { - if _, ok := inputMap[key]; !ok { - return 0, errors.New("empty-val-err") - } - floatNumber, err := util4.ParseFloatNumber(gjson.Get(string(merged), inputMap[key].(string)).Value()) - if err != nil { - impl.logger.Errorw("error occurred while parsing float number", "key", key, "err", err) - } - return floatNumber, err -} diff --git a/pkg/pipeline/WorkflowService.go b/pkg/pipeline/WorkflowService.go index 5ced308973..223c52ecb8 100644 --- a/pkg/pipeline/WorkflowService.go +++ b/pkg/pipeline/WorkflowService.go @@ -19,7 +19,6 @@ package pipeline import ( "context" - "encoding/json" "errors" "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1" v1alpha12 "github.com/argoproj/argo-workflows/v3/pkg/client/clientset/versioned/typed/workflow/v1alpha1" @@ -138,21 +137,10 @@ func (impl *WorkflowServiceImpl) createWorkflowTemplate(workflowRequest *Workflo workflowTemplate.Volumes = ExtractVolumesFromCmCs(workflowConfigMaps, workflowSecrets) workflowRequest.AddNodeConstraintsFromConfig(&workflowTemplate, impl.ciCdConfig) - workflowMainContainer, err := workflowRequest.GetWorkflowMainContainer(impl.ciCdConfig, workflowJson, &workflowTemplate, workflowConfigMaps, workflowSecrets) - - if err != nil { - impl.Logger.Errorw("error occurred while getting workflow main container", "err", err) - return bean3.WorkflowTemplate{}, err - } - + workflowMainContainer := workflowRequest.GetWorkflowMainContainer(impl.ciCdConfig, workflowJson, workflowTemplate, workflowConfigMaps, workflowSecrets) workflowTemplate.Containers = []v12.Container{workflowMainContainer} impl.updateBlobStorageConfig(workflowRequest, &workflowTemplate) - if workflowRequest.Type == bean3.CI_WORKFLOW_PIPELINE_TYPE || workflowRequest.Type == bean3.JOB_WORKFLOW_PIPELINE_TYPE { - nodeSelector := impl.getAppLabelNodeSelector(workflowRequest) - if nodeSelector != nil { - workflowTemplate.NodeSelector = nodeSelector - } - } + if workflowRequest.Type == bean3.CD_WORKFLOW_PIPELINE_TYPE { workflowTemplate.WfControllerInstanceID = impl.ciCdConfig.WfControllerInstanceID workflowTemplate.TerminationGracePeriod = impl.ciCdConfig.TerminationGracePeriod @@ -254,21 +242,6 @@ func (impl *WorkflowServiceImpl) updateBlobStorageConfig(workflowRequest *Workfl workflowTemplate.CloudStorageKey = workflowRequest.BlobStorageLogsKey } -func (impl *WorkflowServiceImpl) getAppLabelNodeSelector(workflowRequest *WorkflowRequest) map[string]string { - // node selector - if val, ok := workflowRequest.AppLabels[CI_NODE_SELECTOR_APP_LABEL_KEY]; ok && !(workflowRequest.CheckForJob() && workflowRequest.IsExtRun) { - var nodeSelectors map[string]string - // Unmarshal or Decode the JSON to the interface. - err := json.Unmarshal([]byte(val), &nodeSelectors) - if err != nil { - impl.Logger.Errorw("err in unmarshalling nodeSelectors", "err", err, "val", val) - return nil - } - return nodeSelectors - } - return nil -} - func (impl *WorkflowServiceImpl) getWorkflowExecutor(executorType pipelineConfig.WorkflowExecutorType) WorkflowExecutor { if executorType == pipelineConfig.WORKFLOW_EXECUTOR_TYPE_AWF { return impl.argoWorkflowExecutor @@ -295,9 +268,6 @@ func (impl *WorkflowServiceImpl) TerminateWorkflow(executorType pipelineConfig.W var err error if executorType != "" { workflowExecutor := impl.getWorkflowExecutor(executorType) - if restConfig == nil { - restConfig = impl.config - } err = workflowExecutor.TerminateWorkflow(name, namespace, restConfig) } else { wfClient, err := impl.getWfClient(environment, namespace, isExt) diff --git a/pkg/pipeline/WorkflowUtils.go b/pkg/pipeline/WorkflowUtils.go index b8317a2f98..e08da6989e 100644 --- a/pkg/pipeline/WorkflowUtils.go +++ b/pkg/pipeline/WorkflowUtils.go @@ -694,7 +694,7 @@ func (workflowRequest *WorkflowRequest) getWorkflowImage() string { return "" } } -func (workflowRequest *WorkflowRequest) GetWorkflowMainContainer(config *CiCdConfig, workflowJson []byte, workflowTemplate *bean.WorkflowTemplate, workflowConfigMaps []bean2.ConfigSecretMap, workflowSecrets []bean2.ConfigSecretMap) (v12.Container, error) { +func (workflowRequest *WorkflowRequest) GetWorkflowMainContainer(config *CiCdConfig, workflowJson []byte, workflowTemplate bean.WorkflowTemplate, workflowConfigMaps []bean2.ConfigSecretMap, workflowSecrets []bean2.ConfigSecretMap) v12.Container { privileged := true pvc := workflowRequest.getPVCForWorkflowRequest() containerEnvVariables := workflowRequest.getContainerEnvVariables(config, workflowJson) @@ -714,12 +714,7 @@ func (workflowRequest *WorkflowRequest) GetWorkflowMainContainer(config *CiCdCon Name: "app-data", ContainerPort: 9102, }} - err := updateVolumeMountsForCi(config, workflowTemplate, &workflowMainContainer) - if err != nil { - return workflowMainContainer, err - } } - if len(pvc) != 0 { buildPvcCachePath := config.BuildPvcCachePath buildxPvcCachePath := config.BuildxPvcCachePath @@ -749,7 +744,7 @@ func (workflowRequest *WorkflowRequest) GetWorkflowMainContainer(config *CiCdCon }) } UpdateContainerEnvsFromCmCs(&workflowMainContainer, workflowConfigMaps, workflowSecrets) - return workflowMainContainer, nil + return workflowMainContainer } func CheckIfReTriggerRequired(status, message, workflowRunnerStatus string) bool { @@ -757,13 +752,3 @@ func CheckIfReTriggerRequired(status, message, workflowRunnerStatus string) bool message == POD_DELETED_MESSAGE) && workflowRunnerStatus != WorkflowCancel } - -func updateVolumeMountsForCi(config *CiCdConfig, workflowTemplate *bean.WorkflowTemplate, workflowMainContainer *v12.Container) error { - volume, volumeMounts, err := config.GetWorkflowVolumeAndVolumeMounts() - if err != nil { - return err - } - workflowTemplate.Volumes = volume - workflowMainContainer.VolumeMounts = volumeMounts - return nil -} diff --git a/pkg/pipeline/bean/CustomTagService.go b/pkg/pipeline/bean/CustomTagService.go deleted file mode 100644 index b823de3aed..0000000000 --- a/pkg/pipeline/bean/CustomTagService.go +++ /dev/null @@ -1,25 +0,0 @@ -package bean - -import "fmt" - -const ( - EntityNull = iota - EntityTypeCiPipelineId -) - -const ( - ImagePathPattern = "%s/%s:%s" // dockerReg/dockerRepo:Tag - ImageTagUnavailableMessage = "Desired image tag already exists" - REGEX_PATTERN_FOR_ENSURING_ONLY_ONE_VARIABLE_BETWEEN_BRACKETS = `\{.{2,}\}` - REGEX_PATTERN_FOR_CHARACTER_OTHER_THEN_X_OR_x = `\{[^xX]|{}\}` - REGEX_PATTERN_FOR_IMAGE_TAG = `^[a-zA-Z0-9]+[a-zA-Z0-9._-]*$` -) - -var ( - ErrImagePathInUse = fmt.Errorf(ImageTagUnavailableMessage) -) - -const ( - IMAGE_TAG_VARIABLE_NAME_X = "{X}" - IMAGE_TAG_VARIABLE_NAME_x = "{x}" -) diff --git a/pkg/pipeline/history/DeployedConfigurationHistoryService.go b/pkg/pipeline/history/DeployedConfigurationHistoryService.go index 7e35bc4a22..b7c2565d06 100644 --- a/pkg/pipeline/history/DeployedConfigurationHistoryService.go +++ b/pkg/pipeline/history/DeployedConfigurationHistoryService.go @@ -1,7 +1,6 @@ package history import ( - "context" "errors" "fmt" "github.com/devtron-labs/devtron/api/bean" @@ -15,9 +14,9 @@ import ( type DeployedConfigurationHistoryService interface { GetDeployedConfigurationByWfrId(pipelineId, wfrId int) ([]*DeploymentConfigurationDto, error) GetDeployedHistoryComponentList(pipelineId, baseConfigId int, historyComponent, historyComponentName string) ([]*DeployedHistoryComponentMetadataDto, error) - GetDeployedHistoryComponentDetail(ctx context.Context, pipelineId, id int, historyComponent, historyComponentName string, userHasAdminAccess bool) (*HistoryDetailDto, error) - GetAllDeployedConfigurationByPipelineIdAndLatestWfrId(ctx context.Context, pipelineId int, userHasAdminAccess bool) (*AllDeploymentConfigurationDetail, error) - GetAllDeployedConfigurationByPipelineIdAndWfrId(ctx context.Context, pipelineId, wfrId int, userHasAdminAccess bool) (*AllDeploymentConfigurationDetail, error) + GetDeployedHistoryComponentDetail(pipelineId, id int, historyComponent, historyComponentName string, userHasAdminAccess bool) (*HistoryDetailDto, error) + GetAllDeployedConfigurationByPipelineIdAndLatestWfrId(pipelineId int, userHasAdminAccess bool) (*AllDeploymentConfigurationDetail, error) + GetAllDeployedConfigurationByPipelineIdAndWfrId(pipelineId, wfrId int, userHasAdminAccess bool) (*AllDeploymentConfigurationDetail, error) } type DeployedConfigurationHistoryServiceImpl struct { @@ -126,11 +125,11 @@ func (impl *DeployedConfigurationHistoryServiceImpl) GetDeployedHistoryComponent return historyList, nil } -func (impl *DeployedConfigurationHistoryServiceImpl) GetDeployedHistoryComponentDetail(ctx context.Context, pipelineId, id int, historyComponent, historyComponentName string, userHasAdminAccess bool) (*HistoryDetailDto, error) { +func (impl *DeployedConfigurationHistoryServiceImpl) GetDeployedHistoryComponentDetail(pipelineId, id int, historyComponent, historyComponentName string, userHasAdminAccess bool) (*HistoryDetailDto, error) { history := &HistoryDetailDto{} var err error if historyComponent == string(DEPLOYMENT_TEMPLATE_TYPE_HISTORY_COMPONENT) { - history, err = impl.deploymentTemplateHistoryService.GetHistoryForDeployedTemplateById(ctx, id, pipelineId) + history, err = impl.deploymentTemplateHistoryService.GetHistoryForDeployedTemplateById(id, pipelineId) } else if historyComponent == string(PIPELINE_STRATEGY_TYPE_HISTORY_COMPONENT) { history, err = impl.strategyHistoryService.GetHistoryForDeployedStrategyById(id, pipelineId) } else if historyComponent == string(CONFIGMAP_TYPE_HISTORY_COMPONENT) { @@ -147,14 +146,14 @@ func (impl *DeployedConfigurationHistoryServiceImpl) GetDeployedHistoryComponent return history, nil } -func (impl *DeployedConfigurationHistoryServiceImpl) GetAllDeployedConfigurationByPipelineIdAndLatestWfrId(ctx context.Context, pipelineId int, userHasAdminAccess bool) (*AllDeploymentConfigurationDetail, error) { +func (impl *DeployedConfigurationHistoryServiceImpl) GetAllDeployedConfigurationByPipelineIdAndLatestWfrId(pipelineId int, userHasAdminAccess bool) (*AllDeploymentConfigurationDetail, error) { //getting latest wfr from pipelineId wfr, err := impl.cdWorkflowRepository.FindLastStatusByPipelineIdAndRunnerType(pipelineId, bean.CD_WORKFLOW_TYPE_DEPLOY) if err != nil { impl.logger.Errorw("error in getting latest deploy stage wfr by pipelineId", "err", err, "pipelineId", pipelineId) return nil, err } - deployedConfig, err := impl.GetAllDeployedConfigurationByPipelineIdAndWfrId(ctx, pipelineId, wfr.Id, userHasAdminAccess) + deployedConfig, err := impl.GetAllDeployedConfigurationByPipelineIdAndWfrId(pipelineId, wfr.Id, userHasAdminAccess) if err != nil { impl.logger.Errorw("error in getting GetAllDeployedConfigurationByPipelineIdAndWfrId", "err", err, "pipelineID", pipelineId, "wfrId", wfr.Id) return nil, err @@ -162,9 +161,9 @@ func (impl *DeployedConfigurationHistoryServiceImpl) GetAllDeployedConfiguration deployedConfig.WfrId = wfr.Id return deployedConfig, nil } -func (impl *DeployedConfigurationHistoryServiceImpl) GetAllDeployedConfigurationByPipelineIdAndWfrId(ctx context.Context, pipelineId, wfrId int, userHasAdminAccess bool) (*AllDeploymentConfigurationDetail, error) { +func (impl *DeployedConfigurationHistoryServiceImpl) GetAllDeployedConfigurationByPipelineIdAndWfrId(pipelineId, wfrId int, userHasAdminAccess bool) (*AllDeploymentConfigurationDetail, error) { //getting history of deployment template for latest deployment - deploymentTemplateHistory, err := impl.deploymentTemplateHistoryService.GetDeployedHistoryByPipelineIdAndWfrId(ctx, pipelineId, wfrId) + deploymentTemplateHistory, err := impl.deploymentTemplateHistoryService.GetDeployedHistoryByPipelineIdAndWfrId(pipelineId, wfrId) if err != nil && err != pg.ErrNoRows { impl.logger.Errorw("error in getting deployment template history by pipelineId and wfrId", "err", err, "pipelineId", pipelineId, "wfrId", wfrId) return nil, err diff --git a/pkg/pipeline/history/DeploymentTemplateHistoryService.go b/pkg/pipeline/history/DeploymentTemplateHistoryService.go index 4dab5399be..7544df13b2 100644 --- a/pkg/pipeline/history/DeploymentTemplateHistoryService.go +++ b/pkg/pipeline/history/DeploymentTemplateHistoryService.go @@ -1,7 +1,6 @@ package history import ( - "context" "encoding/json" repository2 "github.com/devtron-labs/devtron/internal/sql/repository" "github.com/devtron-labs/devtron/internal/sql/repository/chartConfig" @@ -13,7 +12,6 @@ import ( "github.com/devtron-labs/devtron/pkg/variables" "github.com/devtron-labs/devtron/pkg/variables/parsers" repository6 "github.com/devtron-labs/devtron/pkg/variables/repository" - "github.com/devtron-labs/devtron/util" "github.com/go-pg/pg" "go.uber.org/zap" "time" @@ -25,12 +23,12 @@ type DeploymentTemplateHistoryService interface { CreateDeploymentTemplateHistoryForDeploymentTrigger(pipeline *pipelineConfig.Pipeline, envOverride *chartConfig.EnvConfigOverride, renderedImageTemplate string, deployedOn time.Time, deployedBy int32) (*repository.DeploymentTemplateHistory, error) GetDeploymentDetailsForDeployedTemplateHistory(pipelineId, offset, limit int) ([]*DeploymentTemplateHistoryDto, error) - GetHistoryForDeployedTemplateById(ctx context.Context, id int, pipelineId int) (*HistoryDetailDto, error) + GetHistoryForDeployedTemplateById(id, pipelineId int) (*HistoryDetailDto, error) CheckIfHistoryExistsForPipelineIdAndWfrId(pipelineId, wfrId int) (historyId int, exists bool, err error) GetDeployedHistoryList(pipelineId, baseConfigId int) ([]*DeployedHistoryComponentMetadataDto, error) // used for rollback - GetDeployedHistoryByPipelineIdAndWfrId(ctx context.Context, pipelineId, wfrId int) (*HistoryDetailDto, error) + GetDeployedHistoryByPipelineIdAndWfrId(pipelineId, wfrId int) (*HistoryDetailDto, error) } type DeploymentTemplateHistoryServiceImpl struct { @@ -45,7 +43,6 @@ type DeploymentTemplateHistoryServiceImpl struct { cdWorkflowRepository pipelineConfig.CdWorkflowRepository variableSnapshotHistoryService variables.VariableSnapshotHistoryService variableTemplateParser parsers.VariableTemplateParser - scopedVariableService variables.ScopedVariableService } func NewDeploymentTemplateHistoryServiceImpl(logger *zap.SugaredLogger, deploymentTemplateHistoryRepository repository.DeploymentTemplateHistoryRepository, @@ -57,9 +54,7 @@ func NewDeploymentTemplateHistoryServiceImpl(logger *zap.SugaredLogger, deployme userService user.UserService, cdWorkflowRepository pipelineConfig.CdWorkflowRepository, variableSnapshotHistoryService variables.VariableSnapshotHistoryService, - variableTemplateParser parsers.VariableTemplateParser, - scopedVariableService variables.ScopedVariableService, -) *DeploymentTemplateHistoryServiceImpl { + variableTemplateParser parsers.VariableTemplateParser) *DeploymentTemplateHistoryServiceImpl { return &DeploymentTemplateHistoryServiceImpl{ logger: logger, deploymentTemplateHistoryRepository: deploymentTemplateHistoryRepository, @@ -72,7 +67,6 @@ func NewDeploymentTemplateHistoryServiceImpl(logger *zap.SugaredLogger, deployme cdWorkflowRepository: cdWorkflowRepository, variableSnapshotHistoryService: variableSnapshotHistoryService, variableTemplateParser: variableTemplateParser, - scopedVariableService: scopedVariableService, } } @@ -318,7 +312,7 @@ func (impl DeploymentTemplateHistoryServiceImpl) CheckIfHistoryExistsForPipeline return history.Id, true, nil } -func (impl DeploymentTemplateHistoryServiceImpl) GetDeployedHistoryByPipelineIdAndWfrId(ctx context.Context, pipelineId, wfrId int) (*HistoryDetailDto, error) { +func (impl DeploymentTemplateHistoryServiceImpl) GetDeployedHistoryByPipelineIdAndWfrId(pipelineId, wfrId int) (*HistoryDetailDto, error) { impl.logger.Debugw("received request, GetDeployedHistoryByPipelineIdAndWfrId", "pipelineId", pipelineId, "wfrId", wfrId) //checking if history exists for pipelineId and wfrId @@ -328,14 +322,10 @@ func (impl DeploymentTemplateHistoryServiceImpl) GetDeployedHistoryByPipelineIdA return nil, err } - isSuperAdmin, err := util.GetIsSuperAdminFromContext(ctx) + variableSnapshotMap, err := impl.getVariableSnapshot(history.Id) if err != nil { return nil, err } - variableSnapshotMap, resolvedTemplate, err := impl.getVariableSnapshotAndResolveTemplate(history.Template, history.Id, isSuperAdmin) - if err != nil { - impl.logger.Errorw("error while resolving template from history", "err", err, "wfrId", wfrId, "pipelineID", pipelineId) - } historyDto := &HistoryDetailDto{ TemplateName: history.TemplateName, @@ -345,53 +335,28 @@ func (impl DeploymentTemplateHistoryServiceImpl) GetDeployedHistoryByPipelineIdA DisplayName: "values.yaml", Value: history.Template, }, - VariableSnapshot: variableSnapshotMap, - ResolvedTemplateData: resolvedTemplate, + VariableSnapshot: variableSnapshotMap, } return historyDto, nil } -func (impl DeploymentTemplateHistoryServiceImpl) getVariableSnapshotAndResolveTemplate(template string, historyId int, isSuperAdmin bool) (map[string]string, string, error) { +func (impl DeploymentTemplateHistoryServiceImpl) getVariableSnapshot(historyId int) (map[string]string, error) { reference := repository6.HistoryReference{ HistoryReferenceId: historyId, HistoryReferenceType: repository6.HistoryReferenceTypeDeploymentTemplate, } - variableSnapshotMap := make(map[string]string) references, err := impl.variableSnapshotHistoryService.GetVariableHistoryForReferences([]repository6.HistoryReference{reference}) if err != nil { - return variableSnapshotMap, template, err + return nil, err } - + variableSnapshotMap := make(map[string]string) if _, ok := references[reference]; ok { err = json.Unmarshal(references[reference].VariableSnapshot, &variableSnapshotMap) if err != nil { - return variableSnapshotMap, template, err + return nil, err } } - - if len(variableSnapshotMap) == 0 { - return variableSnapshotMap, template, err - } - - varNames := make([]string, 0) - for varName, _ := range variableSnapshotMap { - varNames = append(varNames, varName) - } - varNameToIsSensitive, err := impl.scopedVariableService.CheckForSensitiveVariables(varNames) - if err != nil { - return variableSnapshotMap, template, err - } - - scopedVariableData := parsers.GetScopedVarData(variableSnapshotMap, varNameToIsSensitive, isSuperAdmin) - request := parsers.VariableParserRequest{Template: template, TemplateType: parsers.JsonVariableTemplate, Variables: scopedVariableData, IgnoreUnknownVariables: true} - parserResponse := impl.variableTemplateParser.ParseTemplate(request) - err = parserResponse.Error - if err != nil { - return variableSnapshotMap, template, err - } - resolvedTemplate := parserResponse.ResolvedTemplate - - return variableSnapshotMap, resolvedTemplate, nil + return variableSnapshotMap, nil } func (impl DeploymentTemplateHistoryServiceImpl) GetDeployedHistoryList(pipelineId, baseConfigId int) ([]*DeployedHistoryComponentMetadataDto, error) { @@ -415,21 +380,29 @@ func (impl DeploymentTemplateHistoryServiceImpl) GetDeployedHistoryList(pipeline return historyList, nil } -func (impl DeploymentTemplateHistoryServiceImpl) GetHistoryForDeployedTemplateById(ctx context.Context, id int, pipelineId int) (*HistoryDetailDto, error) { +func (impl DeploymentTemplateHistoryServiceImpl) GetHistoryForDeployedTemplateById(id, pipelineId int) (*HistoryDetailDto, error) { history, err := impl.deploymentTemplateHistoryRepository.GetHistoryForDeployedTemplateById(id, pipelineId) if err != nil { impl.logger.Errorw("error in getting deployment template history", "err", err, "id", id, "pipelineId", pipelineId) return nil, err } - isSuperAdmin, err := util.GetIsSuperAdminFromContext(ctx) + variableSnapshotMap, err := impl.getVariableSnapshot(history.Id) if err != nil { return nil, err } - variableSnapshotMap, resolvedTemplate, err := impl.getVariableSnapshotAndResolveTemplate(history.Template, history.Id, isSuperAdmin) - if err != nil { - impl.logger.Errorw("error while resolving template from history", "err", err, "id", id, "pipelineID", pipelineId) + resolvedTemplate := history.Template + if len(variableSnapshotMap) > 0 { + scopedVariableData := parsers.GetScopedVarData(variableSnapshotMap) + request := parsers.VariableParserRequest{Template: history.Template, TemplateType: parsers.JsonVariableTemplate, Variables: scopedVariableData, IgnoreUnknownVariables: true} + parserResponse := impl.variableTemplateParser.ParseTemplate(request) + err = parserResponse.Error + if err != nil { + return nil, err + } + resolvedTemplate = parserResponse.ResolvedTemplate } + historyDto := &HistoryDetailDto{ TemplateName: history.TemplateName, TemplateVersion: history.TemplateVersion, @@ -438,8 +411,8 @@ func (impl DeploymentTemplateHistoryServiceImpl) GetHistoryForDeployedTemplateBy DisplayName: "values.yaml", Value: history.Template, }, - VariableSnapshot: variableSnapshotMap, - ResolvedTemplateData: resolvedTemplate, + VariableSnapshot: variableSnapshotMap, + ResolvedTemplate: resolvedTemplate, } return historyDto, nil } diff --git a/pkg/pipeline/history/bean.go b/pkg/pipeline/history/bean.go index 740d2747e4..80392c60c3 100644 --- a/pkg/pipeline/history/bean.go +++ b/pkg/pipeline/history/bean.go @@ -50,16 +50,16 @@ type HistoryDetailDto struct { PipelineTriggerType pipelineConfig.TriggerType `json:"pipelineTriggerType,omitempty"` Strategy string `json:"strategy,omitempty"` //for configmap and secret - Type string `json:"type,omitempty"` - External *bool `json:"external,omitempty"` - MountPath string `json:"mountPath,omitempty"` - ExternalSecretType string `json:"externalType,omitempty"` - RoleARN string `json:"roleARN,omitempty"` - SubPath *bool `json:"subPath,omitempty"` - FilePermission string `json:"filePermission,omitempty"` - CodeEditorValue *HistoryDetailConfig `json:"codeEditorValue"` - VariableSnapshot map[string]string `json:"variableSnapshot"` - ResolvedTemplateData string `json:"resolvedTemplateData"` + Type string `json:"type,omitempty"` + External *bool `json:"external,omitempty"` + MountPath string `json:"mountPath,omitempty"` + ExternalSecretType string `json:"externalType,omitempty"` + RoleARN string `json:"roleARN,omitempty"` + SubPath *bool `json:"subPath,omitempty"` + FilePermission string `json:"filePermission,omitempty"` + CodeEditorValue *HistoryDetailConfig `json:"codeEditorValue"` + VariableSnapshot map[string]string `json:"variableSnapshot"` + ResolvedTemplate string `json:"-"` } type HistoryDetailConfig struct { diff --git a/pkg/pipeline/repository/PipelineStageRepository.go b/pkg/pipeline/repository/PipelineStageRepository.go index 360a69840b..56ce81b330 100644 --- a/pkg/pipeline/repository/PipelineStageRepository.go +++ b/pkg/pipeline/repository/PipelineStageRepository.go @@ -29,7 +29,7 @@ const ( PIPELINE_STAGE_STEP_VARIABLE_VALUE_TYPE_GLOBAL PipelineStageStepVariableValueType = "GLOBAL" PIPELINE_STAGE_STEP_CONDITION_TYPE_SKIP PipelineStageStepConditionType = "SKIP" PIPELINE_STAGE_STEP_CONDITION_TYPE_TRIGGER PipelineStageStepConditionType = "TRIGGER" - PIPELINE_STAGE_STEP_CONDITION_TYPE_SUCCESS PipelineStageStepConditionType = "PASS" + PIPELINE_STAGE_STEP_CONDITION_TYPE_SUCCESS PipelineStageStepConditionType = "SUCCESS" PIPELINE_STAGE_STEP_CONDITION_TYPE_FAIL PipelineStageStepConditionType = "FAIL" PIPELINE_STAGE_STEP_VARIABLE_FORMAT_TYPE_STRING PipelineStageStepVariableFormatType = "STRING" PIPELINE_STAGE_STEP_VARIABLE_FORMAT_TYPE_NUMBER PipelineStageStepVariableFormatType = "NUMBER" diff --git a/pkg/resourceQualifiers/bean.go b/pkg/resourceQualifiers/bean.go index 09a235e226..fc6fe6e431 100644 --- a/pkg/resourceQualifiers/bean.go +++ b/pkg/resourceQualifiers/bean.go @@ -13,7 +13,6 @@ type SystemMetadata struct { ClusterName string Namespace string ImageTag string - Image string AppName string } @@ -29,8 +28,6 @@ func (metadata *SystemMetadata) GetDataFromSystemVariable(variable SystemVariabl return metadata.ImageTag case DevtronAppName: return metadata.AppName - case DevtronImage: - return metadata.Image } return "" } diff --git a/pkg/resourceQualifiers/constants.go b/pkg/resourceQualifiers/constants.go index cf0644e782..f3541b84e0 100644 --- a/pkg/resourceQualifiers/constants.go +++ b/pkg/resourceQualifiers/constants.go @@ -7,15 +7,7 @@ const ( DevtronClusterName SystemVariableName = "DEVTRON_CLUSTER_NAME" DevtronEnvName SystemVariableName = "DEVTRON_ENV_NAME" DevtronImageTag SystemVariableName = "DEVTRON_IMAGE_TAG" - DevtronImage SystemVariableName = "DEVTRON_IMAGE" DevtronAppName SystemVariableName = "DEVTRON_APP_NAME" ) -var SystemVariables = []SystemVariableName{ - DevtronNamespace, - DevtronClusterName, - DevtronEnvName, - DevtronImageTag, - DevtronAppName, - DevtronImage, -} +var SystemVariables = []SystemVariableName{DevtronNamespace, DevtronClusterName, DevtronEnvName, DevtronImageTag, DevtronAppName} diff --git a/pkg/user/casbin/rbac.go b/pkg/user/casbin/rbac.go index edc050169e..189f6a3505 100644 --- a/pkg/user/casbin/rbac.go +++ b/pkg/user/casbin/rbac.go @@ -115,7 +115,8 @@ func (e *EnforcerImpl) Enforce(token string, resource string, action string, res } func (e *EnforcerImpl) EnforceByEmail(emailId string, resource string, action string, resourceItem string) bool { - return e.enforceByEmail(emailId, resource, action, strings.ToLower(resourceItem)) + allowed := e.enforceByEmail(emailId, resource, action, resourceItem) + return allowed } func (e *EnforcerImpl) ReloadPolicy() error { @@ -166,10 +167,6 @@ func (e *EnforcerImpl) EnforceByEmailInBatch(emailId string, resource string, ac batchRequestLock.Lock() defer batchRequestLock.Unlock() - for index, val := range vals { - vals[index] = strings.ToLower(val) - } - var metrics = make(map[int]int64) result, notFoundItemList := e.batchEnforceFromCache(emailId, resource, action, vals) if len(result) > 0 { diff --git a/pkg/util/artifact-utils.go b/pkg/util/artifact-utils.go deleted file mode 100644 index 7841d6e117..0000000000 --- a/pkg/util/artifact-utils.go +++ /dev/null @@ -1,12 +0,0 @@ -package util - -import "strings" - -func GetImageTagFromImage(image string) string { - parts := strings.Split(image, ":") - - if len(parts) < 1 { - return "" - } - return parts[len(parts)-1] -} diff --git a/pkg/variables/ScopedVariableService.go b/pkg/variables/ScopedVariableService.go index 03de59e610..692d1b42f6 100644 --- a/pkg/variables/ScopedVariableService.go +++ b/pkg/variables/ScopedVariableService.go @@ -2,7 +2,6 @@ package variables import ( "fmt" - "github.com/argoproj/argo-workflows/v3/errors" "github.com/caarlos0/env" "github.com/devtron-labs/devtron/pkg/devtronResource" "github.com/devtron-labs/devtron/pkg/resourceQualifiers" @@ -23,10 +22,8 @@ import ( type ScopedVariableService interface { CreateVariables(payload models.Payload) error - GetScopedVariables(scope resourceQualifiers.Scope, varNames []string, unmaskSensitiveData bool) (scopedVariableDataObj []*models.ScopedVariableData, err error) + GetScopedVariables(scope resourceQualifiers.Scope, varNames []string, maskSensitiveData bool) (scopedVariableDataObj []*models.ScopedVariableData, err error) GetJsonForVariables() (*models.Payload, error) - CheckForSensitiveVariables(variableNames []string) (map[string]bool, error) - GetFormattedVariableForName(name string) string } type ScopedVariableServiceImpl struct { @@ -59,7 +56,6 @@ type VariableConfig struct { VariableNameRegex string `env:"SCOPED_VARIABLE_NAME_REGEX" envDefault:"^[a-zA-Z][a-zA-Z0-9_-]{0,62}[a-zA-Z0-9]$"` VariableCacheEnabled bool `env:"VARIABLE_CACHE_ENABLED" envDefault:"true"` SystemVariablePrefix string `env:"SYSTEM_VAR_PREFIX" envDefault:"DEVTRON_"` - ScopedVariableFormat string `env:"SCOPED_VARIABLE_FORMAT" envDefault:"@{{%s}}"` } func loadVariableCache(cfg *VariableConfig, service *ScopedVariableServiceImpl) { @@ -90,43 +86,6 @@ func (impl *ScopedVariableServiceImpl) loadVarCache() { impl.logger.Info("variable cache loaded successfully") } -func (impl *ScopedVariableServiceImpl) GetFormattedVariableForName(name string) string { - return fmt.Sprintf(impl.VariableNameConfig.ScopedVariableFormat, name) -} - -func (impl *ScopedVariableServiceImpl) CheckForSensitiveVariables(variableNames []string) (map[string]bool, error) { - - // getting all variables from cache - allVariableDefinitions := impl.VariableCache.GetData() - - var err error - // cache is not loaded get from repo - if allVariableDefinitions == nil { - allVariableDefinitions, err = impl.scopedVariableRepository.GetVariableTypeForVariableNames(variableNames) - if err != nil { - return nil, errors.Wrap(err, "400", "error in fetching variable type") - } - } - - variableNameToType := make(map[string]models.VariableType) - for _, definition := range allVariableDefinitions { - variableNameToType[definition.Name] = definition.VarType - } - - varNameToIsSensitive := make(map[string]bool) - for _, name := range variableNames { - - // by default all variables are marked sensitive to handle deleted variables - // only super admin will be able to see the values once variable is deleted from system - if varType, ok := variableNameToType[name]; ok { - varNameToIsSensitive[name] = varType.IsTypeSensitive() - } else { - varNameToIsSensitive[name] = true - } - } - return varNameToIsSensitive, nil -} - func (impl *ScopedVariableServiceImpl) CreateVariables(payload models.Payload) error { err, _ := impl.isValidPayload(payload) if err != nil { @@ -360,7 +319,7 @@ func (impl *ScopedVariableServiceImpl) selectScopeForCompoundQualifier(scopes [] return selectedParentScope } -func (impl *ScopedVariableServiceImpl) GetScopedVariables(scope resourceQualifiers.Scope, varNames []string, unmaskSensitiveData bool) (scopedVariableDataObj []*models.ScopedVariableData, err error) { +func (impl *ScopedVariableServiceImpl) GetScopedVariables(scope resourceQualifiers.Scope, varNames []string, maskSensitiveData bool) (scopedVariableDataObj []*models.ScopedVariableData, err error) { //populating system variables from system metadata var systemVariableData, allSystemVariables []*models.ScopedVariableData @@ -377,7 +336,7 @@ func (impl *ScopedVariableServiceImpl) GetScopedVariables(scope resourceQualifie return scopedVariableDataObj, nil } - // Cache is not loaded + // Need to get from repo for isSensitive even if cache is loaded since cache only contains metadata if allVariableDefinitions == nil { allVariableDefinitions, err = impl.scopedVariableRepository.GetAllVariables() @@ -397,22 +356,18 @@ func (impl *ScopedVariableServiceImpl) GetScopedVariables(scope resourceQualifie } variableIds := make([]int, 0) + variableIdToDefinition := make(map[int]*repository2.VariableDefinition) for _, definition := range variableDefinitions { variableIds = append(variableIds, definition.Id) + variableIdToDefinition[definition.Id] = definition } + // This to prevent corner case where no variables were found for the provided names if len(varNames) > 0 && len(variableIds) == 0 { return scopedVariableDataObj, nil } - allVariableIds := make([]int, 0) - variableIdToDefinition := make(map[int]*repository2.VariableDefinition) - for _, definition := range allVariableDefinitions { - allVariableIds = append(allVariableIds, definition.Id) - variableIdToDefinition[definition.Id] = definition - } - - varScope, err := impl.qualifierMappingService.GetQualifierMappings(resourceQualifiers.Variable, &scope, allVariableIds) + varScope, err := impl.qualifierMappingService.GetQualifierMappings(resourceQualifiers.Variable, &scope, variableIds) if err != nil { impl.logger.Errorw("error in getting varScope", "err", err) return nil, err @@ -450,8 +405,8 @@ func (impl *ScopedVariableServiceImpl) GetScopedVariables(scope resourceQualifie var varValue *models.VariableValue var isRedacted bool - if !unmaskSensitiveData && variableIdToDefinition[varId].VarType == models.PRIVATE { - varValue = &models.VariableValue{Value: models.HiddenValue} + if !maskSensitiveData && variableIdToDefinition[varId].VarType == models.PRIVATE { + varValue = &models.VariableValue{Value: ""} isRedacted = true } else { varValue = &models.VariableValue{Value: value} @@ -465,30 +420,20 @@ func (impl *ScopedVariableServiceImpl) GetScopedVariables(scope resourceQualifie scopedVariableDataObj = append(scopedVariableDataObj, scopedVariableData) } - allScopedVariableDataObj := scopedVariableDataObj - usedScopedVariableDataObj := make([]*models.ScopedVariableData, 0) - for _, data := range scopedVariableDataObj { - if varNames == nil || slices.Contains(varNames, data.VariableName) { - usedScopedVariableDataObj = append(usedScopedVariableDataObj, data) - } - } - //adding variable def for variables which don't have any scoped data defined // This only happens when passed var names is null (called from UI to get all variables with or without data) if varNames == nil { for _, definition := range allVariableDefinitions { if !slices.Contains(foundVarIds, definition.Id) { - usedScopedVariableDataObj = append(usedScopedVariableDataObj, &models.ScopedVariableData{ + scopedVariableDataObj = append(scopedVariableDataObj, &models.ScopedVariableData{ VariableName: definition.Name, ShortDescription: definition.ShortDescription, }) } } } - - allScopedVariableDataObj = append(allScopedVariableDataObj, allSystemVariables...) - impl.deduceVariables(usedScopedVariableDataObj, allScopedVariableDataObj) - return usedScopedVariableDataObj, err + impl.deduceVariables(scopedVariableDataObj, allSystemVariables) + return scopedVariableDataObj, err } func resolveExpressionWithVariableValues(expr string, varNameToData map[string]*models.ScopedVariableData) (string, error) { diff --git a/pkg/variables/ScopedVariableValidator.go b/pkg/variables/ScopedVariableValidator.go index 8bb674b960..cffe5983ab 100644 --- a/pkg/variables/ScopedVariableValidator.go +++ b/pkg/variables/ScopedVariableValidator.go @@ -30,11 +30,6 @@ func (impl *ScopedVariableServiceImpl) isValidPayload(payload models.Payload) (e variableNamesList = append(variableNamesList, variable.Definition.VarName) uniqueVariableMap := make(map[string]interface{}) for _, attributeValue := range variable.AttributeValues { - - if !utils.IsStringType(attributeValue.VariableValue.Value) && variable.Definition.VarType.IsTypeSensitive() { - return models.ValidationError{Err: fmt.Errorf("data type other than string cannot be sensitive")}, false - } - validIdentifierTypeList := helper.GetIdentifierTypeFromAttributeType(attributeValue.AttributeType) if len(validIdentifierTypeList) != len(attributeValue.AttributeParams) { return models.ValidationError{Err: fmt.Errorf("attribute selectors are not valid for given category %s", attributeValue.AttributeType)}, false diff --git a/pkg/variables/models/variable-payload.go b/pkg/variables/models/variable-payload.go index d5a9885afb..3ee5e522b6 100644 --- a/pkg/variables/models/variable-payload.go +++ b/pkg/variables/models/variable-payload.go @@ -42,9 +42,6 @@ const ( PRIMITIVE_TYPE DataType = "primitive" ) -const HiddenValue = "hidden-value" -const UndefinedValue = "undefined-variable-value" - func (variableType VariableType) IsTypeSensitive() bool { if variableType == PRIVATE { return true @@ -77,17 +74,3 @@ func (value VariableValue) StringValue() string { } return value.Value.(string) } - -func GetInterfacedValue(input string) interface{} { - var interfaceValue interface{} - if intValue, err := strconv.Atoi(input); err == nil { - interfaceValue = intValue - } else if floatValue, err := strconv.ParseFloat(input, 64); err == nil { - interfaceValue = floatValue - } else if boolValue, err := strconv.ParseBool(input); err == nil { - interfaceValue = boolValue - } else { - interfaceValue = input - } - return interfaceValue -} diff --git a/pkg/variables/parsers/VariableTemplateParser.go b/pkg/variables/parsers/VariableTemplateParser.go index 6fab245a6c..a745e18441 100644 --- a/pkg/variables/parsers/VariableTemplateParser.go +++ b/pkg/variables/parsers/VariableTemplateParser.go @@ -40,14 +40,12 @@ func NewVariableTemplateParserImpl(logger *zap.SugaredLogger) (*VariableTemplate return impl, nil } -type VariableTemplateParserConfig struct { - ScopedVariableEnabled bool `env:"SCOPED_VARIABLE_ENABLED" envDefault:"false"` - ScopedVariableHandlePrimitives bool `env:"SCOPED_VARIABLE_HANDLE_PRIMITIVES" envDefault:"false"` - VariableExpressionRegex string `env:"VARIABLE_EXPRESSION_REGEX" envDefault:"@{{([^}]+)}}"` -} +const VariableRegex = `@\{\{[a-zA-Z0-9-+/*%_\s]+\}\}` +const VariableSubRegexWithQuotes = `\"@{{([a-zA-Z0-9-+/*%_\s]+)}}\"` -func (cfg VariableTemplateParserConfig) isScopedVariablesDisabled() bool { - return !cfg.ScopedVariableEnabled +type VariableTemplateParserConfig struct { + ScopedVariableEnabled bool `env:"SCOPED_VARIABLE_ENABLED" envDefault:"false"` + ScopedVariableHandlePrimitives bool `env:"SCOPED_VARIABLE_HANDLE_PRIMITIVES" envDefault:"false"` } func getVariableTemplateParserConfig() (*VariableTemplateParserConfig, error) { @@ -56,22 +54,10 @@ func getVariableTemplateParserConfig() (*VariableTemplateParserConfig, error) { return cfg, err } -func getRegexSubMatches(regex string, input string) [][]string { - re := regexp.MustCompile(regex) - matches := re.FindAllStringSubmatch(input, -1) - return matches -} - -const quote = "\"" -const escapedQuote = `\\"` - -func (impl *VariableTemplateParserImpl) preProcessPlaceholder(template string, variableValueMap map[string]interface{}) string { +func preProcessPlaceholder(template string, variableValueMap map[string]interface{}) string { - variableSubRegexWithQuotes := quote + impl.variableTemplateParserConfig.VariableExpressionRegex + quote - variableSubRegexWithEscapedQuotes := escapedQuote + impl.variableTemplateParserConfig.VariableExpressionRegex + escapedQuote - - matches := getRegexSubMatches(variableSubRegexWithQuotes, template) - matches = append(matches, getRegexSubMatches(variableSubRegexWithEscapedQuotes, template)...) + re := regexp.MustCompile(VariableSubRegexWithQuotes) + matches := re.FindAllStringSubmatch(template, -1) // Replace the surrounding quotes for variables whose value is known // and type is primitive @@ -90,23 +76,30 @@ func (impl *VariableTemplateParserImpl) preProcessPlaceholder(template string, v func (impl *VariableTemplateParserImpl) ParseTemplate(parserRequest VariableParserRequest) VariableParserResponse { - if impl.variableTemplateParserConfig.isScopedVariablesDisabled() { - return parserRequest.GetEmptyResponse() - } - request := parserRequest - if impl.handlePrimitivesForJson(parserRequest) { - variableToValue := parserRequest.GetOriginalValuesMap() - template := impl.preProcessPlaceholder(parserRequest.Template, variableToValue) - - //overriding request to handle primitives in json request - request.TemplateType = StringVariableTemplate - request.Template = template + if !impl.variableTemplateParserConfig.ScopedVariableEnabled { + return VariableParserResponse{ + Request: parserRequest, + ResolvedTemplate: parserRequest.Template, + } } - return impl.parseTemplate(request) -} -func (impl *VariableTemplateParserImpl) handlePrimitivesForJson(parserRequest VariableParserRequest) bool { - return impl.variableTemplateParserConfig.ScopedVariableHandlePrimitives && parserRequest.TemplateType == JsonVariableTemplate + if impl.variableTemplateParserConfig.ScopedVariableHandlePrimitives && parserRequest.TemplateType == JsonVariableTemplate { + + var variableToValue = make(map[string]interface{}, 0) + for _, variable := range parserRequest.Variables { + variableToValue[variable.VariableName] = variable.VariableValue.Value + } + template := preProcessPlaceholder(parserRequest.Template, variableToValue) + request := VariableParserRequest{ + TemplateType: StringVariableTemplate, + Template: template, + Variables: parserRequest.Variables, + IgnoreUnknownVariables: parserRequest.IgnoreUnknownVariables, + } + return impl.parseTemplate(request) + } else { + return impl.parseTemplate(parserRequest) + } } func (impl *VariableTemplateParserImpl) ExtractVariables(template string, templateType VariableTemplateType) ([]string, error) { @@ -282,7 +275,6 @@ func (impl *VariableTemplateParserImpl) getDefaultMappedFunc() map[string]functi "upper": stdlib.UpperFunc, "toInt": stdlib.IntFunc, "toBool": ParseBoolFunc, - "split": stdlib.SplitFunc, } } @@ -323,7 +315,7 @@ func (impl *VariableTemplateParserImpl) diluteExistingHclVars(template string, t func (impl *VariableTemplateParserImpl) convertToHclExpression(template string) string { - var devtronRegexCompiledPattern = regexp.MustCompile(impl.variableTemplateParserConfig.VariableExpressionRegex) + var devtronRegexCompiledPattern = regexp.MustCompile(VariableRegex) //TODO KB: add support of Braces () also indexesData := devtronRegexCompiledPattern.FindAllIndex([]byte(template), -1) var strBuilder strings.Builder strBuilder.Grow(len(template)) diff --git a/pkg/variables/parsers/bean.go b/pkg/variables/parsers/bean.go index 3d9cc4db84..c883449286 100644 --- a/pkg/variables/parsers/bean.go +++ b/pkg/variables/parsers/bean.go @@ -24,13 +24,6 @@ type VariableParserRequest struct { IgnoreUnknownVariables bool } -func (request VariableParserRequest) GetEmptyResponse() VariableParserResponse { - return VariableParserResponse{ - Request: request, - ResolvedTemplate: request.Template, - } -} - type VariableParserResponse struct { Request VariableParserRequest ResolvedTemplate string @@ -47,23 +40,10 @@ func (request VariableParserRequest) GetValuesMap() map[string]string { return variablesMap } -func (request VariableParserRequest) GetOriginalValuesMap() map[string]interface{} { - var variableToValue = make(map[string]interface{}, 0) - for _, variable := range request.Variables { - variableToValue[variable.VariableName] = variable.VariableValue.Value - } - return variableToValue -} - -func GetScopedVarData(varData map[string]string, nameToIsSensitive map[string]bool, isSuperAdmin bool) []*models.ScopedVariableData { +func GetScopedVarData(varData map[string]string) []*models.ScopedVariableData { scopedVarData := make([]*models.ScopedVariableData, 0) for key, value := range varData { - - finalValue := value - if !isSuperAdmin && nameToIsSensitive[key] { - finalValue = models.HiddenValue - } - scopedVarData = append(scopedVarData, &models.ScopedVariableData{VariableName: key, VariableValue: &models.VariableValue{Value: models.GetInterfacedValue(finalValue)}}) + scopedVarData = append(scopedVarData, &models.ScopedVariableData{VariableName: key, VariableValue: &models.VariableValue{Value: value}}) } return scopedVarData } diff --git a/pkg/variables/repository/ScopedVariableRepository.go b/pkg/variables/repository/ScopedVariableRepository.go index fc6023a48b..4615c878bf 100644 --- a/pkg/variables/repository/ScopedVariableRepository.go +++ b/pkg/variables/repository/ScopedVariableRepository.go @@ -23,8 +23,6 @@ type ScopedVariableRepository interface { // Delete DeleteVariables(auditLog sql.AuditLog, tx *pg.Tx) error - - GetVariableTypeForVariableNames(variableNames []string) ([]*VariableDefinition, error) } type ScopedVariableRepositoryImpl struct { @@ -75,20 +73,6 @@ func (impl *ScopedVariableRepositoryImpl) GetAllVariableMetadata() ([]*VariableD return variableDefinition, err } -func (impl *ScopedVariableRepositoryImpl) GetVariableTypeForVariableNames(variableNames []string) ([]*VariableDefinition, error) { - variableDefinition := make([]*VariableDefinition, 0) - err := impl. - dbConnection.Model(&variableDefinition). - Column("name", "var_type"). - Where("active = ?", true). - Where("name in (?)", pg.In(variableNames)). - Select() - if err == pg.ErrNoRows { - err = nil - } - return variableDefinition, err -} - func (impl *ScopedVariableRepositoryImpl) GetVariablesForVarIds(ids []int) ([]*VariableDefinition, error) { var variableDefinition []*VariableDefinition err := impl. diff --git a/releasenotes.md b/releasenotes.md index e7cc215a5c..56918bb5d4 100644 --- a/releasenotes.md +++ b/releasenotes.md @@ -1,74 +1,38 @@ -## v0.6.23 +## v0.6.22 ## Bugs -- fix: DT19-v1 bug fixes (#3962) -- fix: ci pod request correction (#3980) -- fix: pipelineOverride id being sent instead of pipelineId (#3984) -- fix: Iam role handling script for plugin pull image from CR (#3955) -- fix: Deployment Template HCL parsing with % keyword (#4012) -- fix: handled releaseNotExists case for helm type cd pipeline resource tree fetch (#4016) -- fix: auto post cd not working in case of multiple parallel gitOps pipeline (#4018) -- fix: handled error in bulk trigger deploy (#4034) -- fix: The manager(non-admin user) of the application is unable to select a list of apps when assigning permissions (#4053) -- fix: ci job handling in app create api (#4054) -- fix: Deploying currently Active image using TriggerDeploy API from devtctl tool is broken (#4056) -- fix: Unable to delete ci pipeline in case you configure multi git (#4072) -- fix: env for specific deployment (#4085) -- fix: update build configuration fix (#4093) -- fix: Artifacts filter in CD trigger view (#4064) -- fix: Bugathon DT-19 version-2 fixes (#4105) -- fix: App Labels node selector not getting attach in ci-workflow (#4084) -- fix: Update cd pipeline create empty pre post cd steps (#4113) -- fix: normal Refresh after triggering gitops deployment to avoid sync delay in argo (#4066) -- fix: helm chart delete when no rows are found (#4124) -- fix: Unable to abort pre-cd and post-cd workflow (#4121) -- fix: Helm Apps permissions do not allow Terminal or Logs view (#4110) -- fix: port service mapping (#4132) +- fix: updated adapter for cluster object (#3900) +- fix: rbac-modification for cluster list (#3767) +- fix: Helm app deployment history page breaking due to user details not found (#3873) +- fix: ci pip status query optmization (#3877) +- fix: migration script for virtual cluster v3 (#3870) +- fix: cloning app cmcs global boolean value fix (#3862) +- fix: Makefile correction (#3852) +- fix: deleting pipeline stage and related data if no stage steps are found (#3832) +- fix: Port number fix in helm app (#3843) +- fix: External cm and secret in jobs not getting added as env variable in container (#3815) +- fix: pre-cd pod not getting scheduled when node affinity is not present in external cluster. (#3806) +- fix: k8s permission and chart-group permission not getting deleted from orchestrator (#3824) +- fix: added missing audit logs while deleting cd pipeline (#3822) ## Enhancements -- feat: Helm async install (#3856) -- feat: handle CI success event auto trigger in batch (#3951) -- feat: added env variable to skip gitops validation on create/update (#3956) -- feat: added flag to configure ecr repo creation (#3963) -- feat: Ability to change branch for all selected applications during bulk build from Application Groups (#3955) -- feat: Variables support in pre-post CI, CD and Jobs (#3911) -- feat: Poll Images from ECR Container Repository Plugin (#3971) -- feat: resource groups CRUD and environment filtering (#3974) -- feat: Scoped variables primitive handling (#4033) -- feat: adding DEVTRON_APP_NAME system variable for deployment template (#4041) -- feat: wf pod restart (#3892) -- feat: added deduction for system variables (#4075) -- feat: manifest comparision (#3844) -- feat: multiple images handling for single workflow for ECR Plugin Poll Images (#4027) -- feat: Jenkins plugin migration (#4039) -- feat: clone cd pipelines while cloning app across project (#4087) +- feat: added new statefulset-5-0-0 chart in reference chart (#3909) +- feat: added configurable provenance flag for buildx builds (#3905) +- feat: deployment history release not found err handling (#3811) +- feat: added validation for create app workflow API (#3842) +- feat: custom chart download (#3801) +- feat: Virtual cluster v3 (#3764) +- feat: Maintaining audit logs (#3763) +- feat: Capability to block deployments in case of vulnerabilities only if FIXED IN VERSION available (#3796) ## Documentation -- doc: Glossary of jargonish terms for layman in the context of Devtron (#3820) -- docs: Ephemeral Container Doc (#3912) -- docs: New Image Alignment in Ephemeral doc (#3959) -- docs: Snapshot updation in PVC docs + PreBuild CI-CD (#3964) -- doc: Fixed issuer url in okta docs (#4062) -- docs: Config Approval Draft (#3981) -- docs: Modified Existing Container Registry Doc (#4048) -- docs: Added OCI Pull in Usecases (#4112) +- doc: draft version of Graviton benchmark (#3890) +- doc: Okta SSO Configuration Doc (#3876) ## Others -- chore: added workflow to escalate pager-duty issue (#3927) -- chore: changed loop from for to while (#3928) -- chore: scheduled escalate pager duty issue workflow (#3933) -- chore: added log config for dev mode (#3953) -- chore: minor correction in devtron reference charts (#3957) -- chore: workflow refactoring (#3714) -- chore: pr-issue-validator permissions fix (#3967) -- chore: added CODEOWNERS (#3966) -- chore: Scoped variable refactoring (#3977) -- chore: modified labels of keda autoscale object in deployment chart (#3999) -- chore: Update pr-issue-validator.yaml (#3854) -- chore: refactoring around PipelineBuilder (#4043) -- chore: moved k8s library to common-lib and added scripts for adding sshTunnel config to clusters (#3848) -- chore: Add pager-duty issue template (#3988) -- chore: first cut refactor ci-pipeline (#4091) -- chore: refactored appartifact manager and cimaterialconfigservice (#4096) -- chore: Remove the EnvVariablesFromFieldPath from values.yaml in refcharts (#4111) -- chore: Updated schema for Scope Variable (#4079) -- chore: skip validation for release PRs (#4128) +- chore: changes for migration no conflict (#3919) +- chore: Changed in Docker file for SQL file (#3904) +- chore: adjust duplicate action threshold (#3879) +- chore: find potential-duplicate issues (#3858) +- chore: Update pr-issue-validator.yaml (#3849) + + diff --git a/scripts/devtron-reference-helm-charts/cronjob-chart_1-2-0/schema.json b/scripts/devtron-reference-helm-charts/cronjob-chart_1-2-0/schema.json index 0a29c23603..5761da51aa 100644 --- a/scripts/devtron-reference-helm-charts/cronjob-chart_1-2-0/schema.json +++ b/scripts/devtron-reference-helm-charts/cronjob-chart_1-2-0/schema.json @@ -11,11 +11,7 @@ "type": "object", "properties": { "envoyPort": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "envoy port for the container", "title": "Envoy Port" }, @@ -30,38 +26,22 @@ "title": "Name" }, "port": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "Port", "title": "port for the container" }, "servicePort": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "port of the corresponding kubernetes service", "title": "Service Port" }, "supportStreaming": { - "type": [ - "boolean", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "boolean", "description": "field to enable/disable timeout for high performance protocols like grpc", "title": "Support Streaming" }, "useHTTP2": { - "type": [ - "boolean", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "boolean", "description": " field for setting if envoy container can accept(or not) HTTP2 requests", "title": "Use HTTP2" } @@ -75,11 +55,7 @@ "title": "Environment Variables" }, "GracePeriod": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "time for which Kubernetes waits before terminating the pods", "title": "Grace Period" }, @@ -100,11 +76,7 @@ "title": "Command" }, "failureThreshold": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "defines the maximum number of failures that are acceptable before a given container is not considered as live", "title": "Failure Threshold" }, @@ -126,29 +98,17 @@ } }, "initialDelaySeconds": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "defines the time to wait before a given container is checked for liveness", "title": "Initial Delay Seconds" }, "periodSeconds": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "defines the time to check a given container for liveness", "title": "Period Seconds" }, "port": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "port to access on the container", "title": "Port" }, @@ -158,58 +118,34 @@ "title": "Scheme" }, "successThreshold": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "defines the number of successes required before a given container is said to fulfil the liveness probe", "title": "Success Threshold" }, "tcp": { - "type": [ - "boolean", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "boolean", "description": "If enabled, the kubelet will attempt to open a socket to container. If connection is established, the container is considered healthy", "title": "TCP" }, "timeoutSeconds": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "defines the time for checking timeout", "title": "Timeout Seconds" } } }, "MaxSurge": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "maximum number of pods that can be created over the desired number of pods", "title": "Maximum Surge" }, "MaxUnavailable": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "maximum number of pods that can be unavailable during the update process", "title": "Maximum Unavailable" }, "MinReadySeconds": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "minimum number of seconds for which a newly created Pod should be ready without any of its containers crashing, for it to be considered available", "title": "Minimum Ready Seconds" }, @@ -230,11 +166,7 @@ "title": "Command" }, "failureThreshold": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "defines the maximum number of failures that are acceptable before a given container is not considered as ready", "title": "Failure Threshold" }, @@ -256,29 +188,17 @@ } }, "initialDelaySeconds": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "defines the time to wait before a given container is checked for readiness", "title": "Initial Delay Seconds" }, "periodSeconds": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "defines the time to check a given container for readiness", "title": "Period Seconds" }, "port": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "port to access on the container", "title": "Port" }, @@ -288,29 +208,17 @@ "title": "Scheme" }, "successThreshold": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "defines the number of successes required before a given container is said to fulfil the readiness probe", "title": "Success Threshold" }, "tcp": { - "type": [ - "boolean", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "boolean", "description": "If enabled, the kubelet will attempt to open a socket to container. If connection is established, the container is considered healthy", "title": "TCP" }, "timeoutSeconds": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "defines the time for checking timeout", "title": "Timeout Seconds" } @@ -356,11 +264,7 @@ "title": "Arguments", "properties": { "enabled": { - "type": [ - "boolean", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "boolean", "description": "used for enabling/disabling aruguments", "title": "Enabled" }, @@ -388,47 +292,27 @@ "title": "Autoscaling", "properties": { "MaxReplicas": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "Maximum number of replicas allowed for scaling", "title": "Maximum Replicas" }, "MinReplicas": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "Minimum number of replicas allowed for scaling", "title": "Minimum Replicas" }, "TargetCPUUtilizationPercentage": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "The target CPU utilization that is expected for a container", "title": "TargetCPUUtilizationPercentage" }, "TargetMemoryUtilizationPercentage": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "The target memory utilization that is expected for a container", "title": "TargetMemoryUtilizationPercentage" }, "enabled": { - "type": [ - "boolean", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "boolean", "description": "used for enabling/disabling autoscaling", "title": "Enabled" }, @@ -446,11 +330,7 @@ "title": "Command", "properties": { "enabled": { - "type": [ - "boolean", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "boolean", "description": "used for enabling/disabling commands" }, "value": { @@ -478,11 +358,7 @@ "enum": ["Forbid","Allow"] }, "failedJobsHistoryLimit": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "", "title": "Failed Job History Limit" }, @@ -498,29 +374,17 @@ "title": "Schedule" }, "startingDeadlineSeconds": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "if a CronJob controller cannot start a job run on its schedule, it will keep retrying until this value is reached", "title": "Starting Deadline Seconds" }, "successfulJobsHistoryLimit": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "specifies how many completed and failed jobs should be kept", "title": "Successful Jobs History Limit" }, "suspend": { - "type": [ - "boolean", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "boolean", "description": "used to suspend subsequent executions, does not apply on already started executions", "title": "Suspend", "default": false @@ -533,11 +397,7 @@ "title": "Db Migration Config", "properties": { "enabled": { - "type": [ - "boolean", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "boolean", "description": "used for enabling/disabling the config", "title": "Enabled" } @@ -650,11 +510,7 @@ } }, "enabled": { - "type": [ - "boolean", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "boolean", "description": "used to enable or disable ingress", "title": "Enabled" }, @@ -703,11 +559,7 @@ "title": "Annotations" }, "enabled": { - "type": [ - "boolean", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "boolean", "description": "used to enable or disable ingress", "title": "Enabled" }, @@ -765,70 +617,42 @@ "title": "Job Configs", "properties": { "activeDeadlineSeconds": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "once a Job reaches activeDeadlineSeconds, all of its running Pods are terminated", "title": "Active Deadline Seconds" }, "backoffLimit": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "number of retries after which a job is failed", "title": "BackOff Limit", "default": 6 }, "completions": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "used for getting fixed completion count Job", "title": "Completions" }, "parallelism": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "used to run aKubernetes Job with multiple parallel worker processes in a given pod", "title": "Parallelism", "default": 1 }, "suspend": { - "type": [ - "boolean", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "boolean", "description": "used to suspend subsequent executions, does not apply on already started executions", "title": "Suspend", "default": false }, "ttlSecondsAfterFinished": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "feature used for clean up of finished Jobs (Complete or Failed)", "title": "TTL Seconds After Finished" } } }, "pauseForSecondsBeforeSwitchActive": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "tell how much to wait for given period of time before switch active the container", "title": "Pause For Seconds Before SwitchActive" }, @@ -860,11 +684,7 @@ "title": "Raw YAML" }, "replicaCount": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "count of Replicas of pod", "title": "REplica Count" }, @@ -1000,11 +820,7 @@ "title": "Volumes" }, "waitForSecondsBeforeScalingDown": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "Wait for given period of time before scaling down the container", "title": "Wait For Seconds Before Scaling Down" } diff --git a/scripts/devtron-reference-helm-charts/deployment-chart_1-0-0/schema.json b/scripts/devtron-reference-helm-charts/deployment-chart_1-0-0/schema.json index c50940d861..61a2b36a7b 100644 --- a/scripts/devtron-reference-helm-charts/deployment-chart_1-0-0/schema.json +++ b/scripts/devtron-reference-helm-charts/deployment-chart_1-0-0/schema.json @@ -16,11 +16,7 @@ "type": "object", "properties": { "envoyPort": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "envoy port for the container", "title": "Envoy Port" }, @@ -35,38 +31,22 @@ "title": "Name" }, "port": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "Port", "title": "port for the container" }, "servicePort": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "port of the corresponding kubernetes service", "title": "Service Port" }, "supportStreaming": { - "type": [ - "boolean", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "boolean", "description": "field to enable/disable timeout for high performance protocols like grpc", "title": "Support Streaming" }, "useHTTP2": { - "type": [ - "boolean", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "boolean", "description": " field for setting if envoy container can accept(or not) HTTP2 requests", "title": "Use HTTP2" } @@ -102,11 +82,7 @@ ] }, "GracePeriod": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "time for which Kubernetes waits before terminating the pods", "title": "Grace Period" }, @@ -127,11 +103,7 @@ "title": "Command" }, "failureThreshold": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "defines the maximum number of failures that are acceptable before a given container is not considered as live", "title": "Failure Threshold" }, @@ -142,29 +114,17 @@ "title": "HTTP headers" }, "initialDelaySeconds": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "defines the time to wait before a given container is checked for liveness", "title": "Initial Delay Seconds" }, "periodSeconds": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "defines the time to check a given container for liveness", "title": "Period Seconds" }, "port": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "port to access on the container", "title": "Port" }, @@ -174,58 +134,34 @@ "title": "Scheme" }, "successThreshold": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "defines the number of successes required before a given container is said to fulfil the liveness probe", "title": "Success Threshold" }, "tcp": { - "type": [ - "boolean", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "boolean", "description": "If enabled, the kubelet will attempt to open a socket to container. If connection is established, the container is considered healthy", "title": "TCP" }, "timeoutSeconds": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "defines the time for checking timeout", "title": "Timeout Seconds" } } }, "MaxSurge": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "maximum number of pods that can be created over the desired number of pods", "title": "Maximum Surge" }, "MaxUnavailable": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "maximum number of pods that can be unavailable during the update process", "title": "Maximum Unavailable" }, "MinReadySeconds": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "minimum number of seconds for which a newly created Pod should be ready without any of its containers crashing, for it to be considered available", "title": "Minimum Ready Seconds" }, @@ -246,11 +182,7 @@ "title": "Command" }, "failureThreshold": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "defines the maximum number of failures that are acceptable before a given container is not considered as ready", "title": "Failure Threshold" }, @@ -261,29 +193,17 @@ "title": "HTTP headers" }, "initialDelaySeconds": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "defines the time to wait before a given container is checked for readiness", "title": "Initial Delay Seconds" }, "periodSeconds": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "defines the time to check a given container for readiness", "title": "Period Seconds" }, "port": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "port to access on the container", "title": "Port" }, @@ -293,29 +213,17 @@ "title": "Scheme" }, "successThreshold": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "defines the number of successes required before a given container is said to fulfil the readiness probe", "title": "Success Threshold" }, "tcp": { - "type": [ - "boolean", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "boolean", "description": "If enabled, the kubelet will attempt to open a socket to container. If connection is established, the container is considered healthy", "title": "TCP" }, "timeoutSeconds": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "defines the time for checking timeout", "title": "Timeout Seconds" } @@ -361,11 +269,7 @@ "title": "Arguments", "properties": { "enabled": { - "type": [ - "boolean", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "boolean", "description": "used for enabling/disabling aruguments", "title": "Enabled" }, @@ -393,38 +297,22 @@ "title": "Autoscaling", "properties": { "MaxReplicas": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "Maximum number of replicas allowed for scaling", "title": "Maximum Replicas" }, "MinReplicas": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "Minimum number of replicas allowed for scaling", "title": "Minimum Replicas" }, "TargetCPUUtilizationPercentage": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "The target CPU utilization that is expected for a container", "title": "TargetCPUUtilizationPercentage" }, "TargetMemoryUtilizationPercentage": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "The target memory utilization that is expected for a container", "title": "TargetMemoryUtilizationPercentage" }, @@ -434,11 +322,7 @@ "title": "Behavior" }, "enabled": { - "type": [ - "boolean", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "boolean", "description": "used for enabling/disabling autoscaling", "title": "Enabled" }, @@ -466,11 +350,7 @@ "title": "Command", "properties": { "enabled": { - "type": [ - "boolean", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "boolean", "description": "used for enabling/disabling commands" }, "value": { @@ -503,11 +383,7 @@ "title": "Db Migration Config", "properties": { "enabled": { - "type": [ - "boolean", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "boolean", "description": "used for enabling/disabling the config", "title": "Enabled" } @@ -662,11 +538,7 @@ "default": "" }, "enabled": { - "type": [ - "boolean", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "boolean", "description": "used to enable or disable ingress", "title": "Enabled" }, @@ -726,11 +598,7 @@ "default": "nginx-internal" }, "enabled": { - "type": [ - "boolean", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "boolean", "description": "used to enable or disable ingress", "title": "Enabled" }, @@ -851,11 +719,7 @@ } }, "pauseForSecondsBeforeSwitchActive": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "tell how much to wait for given period of time before switch active the container", "title": "Pause For Seconds Before SwitchActive" }, @@ -913,11 +777,7 @@ "title": "Raw YAML" }, "replicaCount": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "count of Replicas of pod", "title": "REplica Count" }, @@ -1079,11 +939,7 @@ "title": "Volumes" }, "waitForSecondsBeforeScalingDown": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "Wait for given period of time before scaling down the container", "title": "Wait For Seconds Before Scaling Down" } diff --git a/scripts/devtron-reference-helm-charts/deployment-chart_1-0-0/values.yaml b/scripts/devtron-reference-helm-charts/deployment-chart_1-0-0/values.yaml index be1cc73050..31e7c1360e 100644 --- a/scripts/devtron-reference-helm-charts/deployment-chart_1-0-0/values.yaml +++ b/scripts/devtron-reference-helm-charts/deployment-chart_1-0-0/values.yaml @@ -126,9 +126,9 @@ server: image_tag: 1-95af053 image: "" -EnvVariablesFromFieldPath: [] -# - name: POD_NAME -# fieldPath: metadata.name +EnvVariablesFromFieldPath: +- name: POD_NAME + fieldPath: metadata.name EnvVariables: - name: FLASK_ENV diff --git a/scripts/devtron-reference-helm-charts/deployment-chart_1-1-0/schema.json b/scripts/devtron-reference-helm-charts/deployment-chart_1-1-0/schema.json index 7cb1b96315..3b8b371c5b 100644 --- a/scripts/devtron-reference-helm-charts/deployment-chart_1-1-0/schema.json +++ b/scripts/devtron-reference-helm-charts/deployment-chart_1-1-0/schema.json @@ -16,11 +16,7 @@ "type": "object", "properties": { "envoyPort": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "envoy port for the container", "title": "Envoy Port" }, @@ -35,47 +31,27 @@ "title": "Name" }, "port": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "Port", "title": "port for the container" }, "servicePort": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "port of the corresponding kubernetes service", "title": "Service Port" }, "nodePort": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "nodeport of the corresponding kubernetes service", "title": "Node Port" }, "supportStreaming": { - "type": [ - "boolean", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "boolean", "description": "field to enable/disable timeout for high performance protocols like grpc", "title": "Support Streaming" }, "useHTTP2": { - "type": [ - "boolean", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "boolean", "description": " field for setting if envoy container can accept(or not) HTTP2 requests", "title": "Use HTTP2" } @@ -111,11 +87,7 @@ ] }, "GracePeriod": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "time for which Kubernetes waits before terminating the pods", "title": "Grace Period" }, @@ -136,11 +108,7 @@ "title": "Command" }, "failureThreshold": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "defines the maximum number of failures that are acceptable before a given container is not considered as live", "title": "Failure Threshold" }, @@ -151,29 +119,17 @@ "title": "HTTP headers" }, "initialDelaySeconds": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "defines the time to wait before a given container is checked for liveness", "title": "Initial Delay Seconds" }, "periodSeconds": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "defines the time to check a given container for liveness", "title": "Period Seconds" }, "port": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "port to access on the container", "title": "Port" }, @@ -183,58 +139,34 @@ "title": "Scheme" }, "successThreshold": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "defines the number of successes required before a given container is said to fulfil the liveness probe", "title": "Success Threshold" }, "tcp": { - "type": [ - "boolean", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "boolean", "description": "If enabled, the kubelet will attempt to open a socket to container. If connection is established, the container is considered healthy", "title": "TCP" }, "timeoutSeconds": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "defines the time for checking timeout", "title": "Timeout Seconds" } } }, "MaxSurge": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "maximum number of pods that can be created over the desired number of pods", "title": "Maximum Surge" }, "MaxUnavailable": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "maximum number of pods that can be unavailable during the update process", "title": "Maximum Unavailable" }, "MinReadySeconds": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "minimum number of seconds for which a newly created Pod should be ready without any of its containers crashing, for it to be considered available", "title": "Minimum Ready Seconds" }, @@ -255,11 +187,7 @@ "title": "Command" }, "failureThreshold": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "defines the maximum number of failures that are acceptable before a given container is not considered as ready", "title": "Failure Threshold" }, @@ -270,29 +198,17 @@ "title": "HTTP headers" }, "initialDelaySeconds": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "defines the time to wait before a given container is checked for readiness", "title": "Initial Delay Seconds" }, "periodSeconds": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "defines the time to check a given container for readiness", "title": "Period Seconds" }, "port": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "port to access on the container", "title": "Port" }, @@ -302,29 +218,17 @@ "title": "Scheme" }, "successThreshold": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "defines the number of successes required before a given container is said to fulfil the readiness probe", "title": "Success Threshold" }, "tcp": { - "type": [ - "boolean", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "boolean", "description": "If enabled, the kubelet will attempt to open a socket to container. If connection is established, the container is considered healthy", "title": "TCP" }, "timeoutSeconds": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "defines the time for checking timeout", "title": "Timeout Seconds" } @@ -380,20 +284,12 @@ "title": "CORS" }, "enabled": { - "type": [ - "boolean", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "boolean", "description": "used to specify whether to create an ambassador mapping or not", "title": "Enabled" }, "weight": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "used to specify weight for canary ambassador mappings" }, "hostname": { @@ -439,11 +335,7 @@ "title": "Arguments", "properties": { "enabled": { - "type": [ - "boolean", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "boolean", "description": "used for enabling/disabling aruguments", "title": "Enabled" }, @@ -471,38 +363,22 @@ "title": "Autoscaling", "properties": { "MaxReplicas": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "Maximum number of replicas allowed for scaling", "title": "Maximum Replicas" }, "MinReplicas": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "Minimum number of replicas allowed for scaling", "title": "Minimum Replicas" }, "TargetCPUUtilizationPercentage": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "The target CPU utilization that is expected for a container", "title": "TargetCPUUtilizationPercentage" }, "TargetMemoryUtilizationPercentage": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "The target memory utilization that is expected for a container", "title": "TargetMemoryUtilizationPercentage" }, @@ -512,11 +388,7 @@ "title": "Behavior" }, "enabled": { - "type": [ - "boolean", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "boolean", "description": "used for enabling/disabling autoscaling", "title": "Enabled" }, @@ -544,11 +416,7 @@ "title": "Command", "properties": { "enabled": { - "type": [ - "boolean", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "boolean", "description": "used for enabling/disabling commands" }, "value": { @@ -581,11 +449,7 @@ "title": "Db Migration Config", "properties": { "enabled": { - "type": [ - "boolean", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "boolean", "description": "used for enabling/disabling the config", "title": "Enabled" } @@ -740,11 +604,7 @@ "default": "" }, "enabled": { - "type": [ - "boolean", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "boolean", "description": "used to enable or disable ingress", "title": "Enabled" }, @@ -804,11 +664,7 @@ "default": "nginx-internal" }, "enabled": { - "type": [ - "boolean", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "boolean", "description": "used to enable or disable ingress", "title": "Enabled" }, @@ -929,11 +785,7 @@ } }, "pauseForSecondsBeforeSwitchActive": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "tell how much to wait for given period of time before switch active the container", "title": "Pause For Seconds Before SwitchActive" }, @@ -991,11 +843,7 @@ "title": "Raw YAML" }, "replicaCount": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "count of Replicas of pod", "title": "REplica Count" }, @@ -1157,11 +1005,7 @@ "title": "Volumes" }, "waitForSecondsBeforeScalingDown": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "Wait for given period of time before scaling down the container", "title": "Wait For Seconds Before Scaling Down" } diff --git a/scripts/devtron-reference-helm-charts/deployment-chart_1-1-0/values.yaml b/scripts/devtron-reference-helm-charts/deployment-chart_1-1-0/values.yaml index d934a0abbc..d8e740b5e5 100644 --- a/scripts/devtron-reference-helm-charts/deployment-chart_1-1-0/values.yaml +++ b/scripts/devtron-reference-helm-charts/deployment-chart_1-1-0/values.yaml @@ -280,9 +280,9 @@ server: image_tag: 1-95af053 image: "" -EnvVariablesFromFieldPath: [] -# - name: POD_NAME -# fieldPath: metadata.name +EnvVariablesFromFieldPath: +- name: POD_NAME + fieldPath: metadata.name EnvVariables: - name: FLASK_ENV diff --git a/scripts/devtron-reference-helm-charts/deployment-chart_4-18-0/schema.json b/scripts/devtron-reference-helm-charts/deployment-chart_4-18-0/schema.json index 6a332631a9..c58f0bdf97 100644 --- a/scripts/devtron-reference-helm-charts/deployment-chart_4-18-0/schema.json +++ b/scripts/devtron-reference-helm-charts/deployment-chart_4-18-0/schema.json @@ -16,11 +16,7 @@ "type": "object", "properties": { "envoyPort": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "envoy port for the container", "title": "Envoy Port" }, @@ -35,47 +31,27 @@ "title": "Name" }, "port": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "Port", "title": "port for the container" }, "servicePort": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "port of the corresponding kubernetes service", "title": "Service Port" }, "nodePort": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "nodeport of the corresponding kubernetes service", "title": "Node Port" }, "supportStreaming": { - "type": [ - "boolean", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "boolean", "description": "field to enable/disable timeout for high performance protocols like grpc", "title": "Support Streaming" }, "useHTTP2": { - "type": [ - "boolean", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "boolean", "description": " field for setting if envoy container can accept(or not) HTTP2 requests", "title": "Use HTTP2" } @@ -165,11 +141,7 @@ ] }, "GracePeriod": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "time for which Kubernetes waits before terminating the pods", "title": "Grace Period" }, @@ -190,11 +162,7 @@ "title": "Command" }, "failureThreshold": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "defines the maximum number of failures that are acceptable before a given container is not considered as live", "title": "Failure Threshold" }, @@ -205,29 +173,17 @@ "title": "HTTP headers" }, "initialDelaySeconds": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "defines the time to wait before a given container is checked for liveness", "title": "Initial Delay Seconds" }, "periodSeconds": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "defines the time to check a given container for liveness", "title": "Period Seconds" }, "port": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "port to access on the container", "title": "Port" }, @@ -237,58 +193,34 @@ "title": "Scheme" }, "successThreshold": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "defines the number of successes required before a given container is said to fulfil the liveness probe", "title": "Success Threshold" }, "tcp": { - "type": [ - "boolean", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "boolean", "description": "If enabled, the kubelet will attempt to open a socket to container. If connection is established, the container is considered healthy", "title": "TCP" }, "timeoutSeconds": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "defines the time for checking timeout", "title": "Timeout Seconds" } } }, "MaxSurge": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "maximum number of pods that can be created over the desired number of pods", "title": "Maximum Surge" }, "MaxUnavailable": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "maximum number of pods that can be unavailable during the update process", "title": "Maximum Unavailable" }, "MinReadySeconds": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "minimum number of seconds for which a newly created Pod should be ready without any of its containers crashing, for it to be considered available", "title": "Minimum Ready Seconds" }, @@ -309,11 +241,7 @@ "title": "Command" }, "failureThreshold": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "defines the maximum number of failures that are acceptable before a given container is not considered as ready", "title": "Failure Threshold" }, @@ -324,29 +252,17 @@ "title": "HTTP headers" }, "initialDelaySeconds": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "defines the time to wait before a given container is checked for readiness", "title": "Initial Delay Seconds" }, "periodSeconds": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "defines the time to check a given container for readiness", "title": "Period Seconds" }, "port": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "port to access on the container", "title": "Port" }, @@ -356,29 +272,17 @@ "title": "Scheme" }, "successThreshold": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "defines the number of successes required before a given container is said to fulfil the readiness probe", "title": "Success Threshold" }, "tcp": { - "type": [ - "boolean", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "boolean", "description": "If enabled, the kubelet will attempt to open a socket to container. If connection is established, the container is considered healthy", "title": "TCP" }, "timeoutSeconds": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "defines the time for checking timeout", "title": "Timeout Seconds" } @@ -489,20 +393,12 @@ "title": "CORS" }, "enabled": { - "type": [ - "boolean", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "boolean", "description": "used to specify whether to create an ambassador mapping or not", "title": "Enabled" }, "weight": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "used to specify weight for canary ambassador mappings" }, "hostname": { @@ -548,11 +444,7 @@ "title": "Arguments", "properties": { "enabled": { - "type": [ - "boolean", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "boolean", "description": "used for enabling/disabling aruguments", "title": "Enabled" }, @@ -580,38 +472,22 @@ "title": "Autoscaling", "properties": { "MaxReplicas": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "Maximum number of replicas allowed for scaling", "title": "Maximum Replicas" }, "MinReplicas": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "Minimum number of replicas allowed for scaling", "title": "Minimum Replicas" }, "TargetCPUUtilizationPercentage": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "The target CPU utilization that is expected for a container", "title": "TargetCPUUtilizationPercentage" }, "TargetMemoryUtilizationPercentage": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "The target memory utilization that is expected for a container", "title": "TargetMemoryUtilizationPercentage" }, @@ -621,11 +497,7 @@ "title": "Behavior" }, "enabled": { - "type": [ - "boolean", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "boolean", "description": "used for enabling/disabling autoscaling", "title": "Enabled" }, @@ -653,11 +525,7 @@ "title": "Command", "properties": { "enabled": { - "type": [ - "boolean", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "boolean", "description": "used for enabling/disabling commands" }, "value": { @@ -690,11 +558,7 @@ "title": "Db Migration Config", "properties": { "enabled": { - "type": [ - "boolean", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "boolean", "description": "used for enabling/disabling the config", "title": "Enabled" } @@ -853,11 +717,7 @@ "default": "" }, "enabled": { - "type": [ - "boolean", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "boolean", "description": "used to enable or disable ingress", "title": "Enabled" }, @@ -928,11 +788,7 @@ "default": "" }, "enabled": { - "type": [ - "boolean", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "boolean", "description": "used to enable or disable ingress", "title": "Enabled" }, @@ -992,11 +848,7 @@ "default": "nginx-internal" }, "enabled": { - "type": [ - "boolean", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "boolean", "description": "used to enable or disable ingress", "title": "Enabled" }, @@ -1117,11 +969,7 @@ } }, "pauseForSecondsBeforeSwitchActive": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "tell how much to wait for given period of time before switch active the container", "title": "Pause For Seconds Before SwitchActive" }, @@ -1146,17 +994,7 @@ } } }, - "deploymentLabels": { - "type": "object", - "description": "deploymentLabels is an object to define the label on deployment.", - "title": "DeploymentLabels" - }, - "deploymentAnnotations": { - "type": "object", - "description": "deploymentAnnotations is an object to define the annotations on deployment.", - "title": "DeploymentAnnotations" - }, - "podExtraSpecs":{ + "podExtraSpecs":{ "type": "object", "description": "ExtraSpec for the pods to be configured", "title": "podExtraSpecs" @@ -1189,11 +1027,7 @@ "title": "Raw YAML" }, "replicaCount": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "count of Replicas of pod", "title": "REplica Count" }, @@ -1355,11 +1189,7 @@ "title": "Volumes" }, "waitForSecondsBeforeScalingDown": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "Wait for given period of time before scaling down the container", "title": "Wait For Seconds Before Scaling Down" } diff --git a/scripts/devtron-reference-helm-charts/reference-chart_3-10-0/schema.json b/scripts/devtron-reference-helm-charts/reference-chart_3-10-0/schema.json index 026d009169..070eb2c8d8 100644 --- a/scripts/devtron-reference-helm-charts/reference-chart_3-10-0/schema.json +++ b/scripts/devtron-reference-helm-charts/reference-chart_3-10-0/schema.json @@ -11,11 +11,7 @@ "type": "object", "properties": { "envoyPort": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "envoy port for the container", "title": "Envoy Port" }, @@ -30,38 +26,22 @@ "title": "Name" }, "port": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "Port", "title": "port for the container" }, "servicePort": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "port of the corresponding kubernetes service", "title": "Service Port" }, "supportStreaming": { - "type": [ - "boolean", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "boolean", "description": "field to enable/disable timeout for high performance protocols like grpc", "title": "Support Streaming" }, "useHTTP2": { - "type": [ - "boolean", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "boolean", "description": " field for setting if envoy container can accept(or not) HTTP2 requests", "title": "Use HTTP2" } @@ -75,11 +55,7 @@ "title": "Environment Variables" }, "GracePeriod": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "time for which Kubernetes waits before terminating the pods", "title": "Grace Period" }, @@ -100,11 +76,7 @@ "title": "Command" }, "failureThreshold": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "defines the maximum number of failures that are acceptable before a given container is not considered as live", "title": "Failure Threshold" }, @@ -126,29 +98,17 @@ } }, "initialDelaySeconds": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "defines the time to wait before a given container is checked for liveness", "title": "Initial Delay Seconds" }, "periodSeconds": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "defines the time to check a given container for liveness", "title": "Period Seconds" }, "port": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "port to access on the container", "title": "Port" }, @@ -158,58 +118,34 @@ "title": "Scheme" }, "successThreshold": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "defines the number of successes required before a given container is said to fulfil the liveness probe", "title": "Success Threshold" }, "tcp": { - "type": [ - "boolean", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "boolean", "description": "If enabled, the kubelet will attempt to open a socket to container. If connection is established, the container is considered healthy", "title": "TCP" }, "timeoutSeconds": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "defines the time for checking timeout", "title": "Timeout Seconds" } } }, "MaxSurge": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "maximum number of pods that can be created over the desired number of pods", "title": "Maximum Surge" }, "MaxUnavailable": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "maximum number of pods that can be unavailable during the update process", "title": "Maximum Unavailable" }, "MinReadySeconds": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "minimum number of seconds for which a newly created Pod should be ready without any of its containers crashing, for it to be considered available", "title": "Minimum Ready Seconds" }, @@ -230,11 +166,7 @@ "title": "Command" }, "failureThreshold": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "defines the maximum number of failures that are acceptable before a given container is not considered as ready", "title": "Failure Threshold" }, @@ -256,29 +188,17 @@ } }, "initialDelaySeconds": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "defines the time to wait before a given container is checked for readiness", "title": "Initial Delay Seconds" }, "periodSeconds": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "defines the time to check a given container for readiness", "title": "Period Seconds" }, "port": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "port to access on the container", "title": "Port" }, @@ -288,29 +208,17 @@ "title": "Scheme" }, "successThreshold": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "defines the number of successes required before a given container is said to fulfil the readiness probe", "title": "Success Threshold" }, "tcp": { - "type": [ - "boolean", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "boolean", "description": "If enabled, the kubelet will attempt to open a socket to container. If connection is established, the container is considered healthy", "title": "TCP" }, "timeoutSeconds": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "defines the time for checking timeout", "title": "Timeout Seconds" } @@ -356,11 +264,7 @@ "title": "Arguments", "properties": { "enabled": { - "type": [ - "boolean", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "boolean", "description": "used for enabling/disabling aruguments", "title": "Enabled" }, @@ -388,47 +292,27 @@ "title": "Autoscaling", "properties": { "MaxReplicas": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "Maximum number of replicas allowed for scaling", "title": "Maximum Replicas" }, "MinReplicas": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "Minimum number of replicas allowed for scaling", "title": "Minimum Replicas" }, "TargetCPUUtilizationPercentage": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "The target CPU utilization that is expected for a container", "title": "TargetCPUUtilizationPercentage" }, "TargetMemoryUtilizationPercentage": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "The target memory utilization that is expected for a container", "title": "TargetMemoryUtilizationPercentage" }, "enabled": { - "type": [ - "boolean", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "boolean", "description": "used for enabling/disabling autoscaling", "title": "Enabled" }, @@ -456,11 +340,7 @@ "title": "Command", "properties": { "enabled": { - "type": [ - "boolean", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "boolean", "description": "used for enabling/disabling commands" }, "value": { @@ -482,11 +362,7 @@ "title": "Db Migration Config", "properties": { "enabled": { - "type": [ - "boolean", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "boolean", "description": "used for enabling/disabling the config", "title": "Enabled" } @@ -603,11 +479,7 @@ "default": "" }, "enabled": { - "type": [ - "boolean", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "boolean", "description": "used to enable or disable ingress", "title": "Enabled" }, @@ -639,11 +511,7 @@ "title": "Annotations" }, "enabled": { - "type": [ - "boolean", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "boolean", "description": "used to enable or disable ingress", "title": "Enabled" }, @@ -662,11 +530,7 @@ "title": "Init Containers" }, "pauseForSecondsBeforeSwitchActive": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "tell how much to wait for given period of time before switch active the container", "title": "Pause For Seconds Before SwitchActive" }, @@ -688,11 +552,7 @@ "title": "Raw YAML" }, "replicaCount": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "count of Replicas of pod", "title": "REplica Count" }, @@ -828,11 +688,7 @@ "title": "Volumes" }, "waitForSecondsBeforeScalingDown": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "Wait for given period of time before scaling down the container", "title": "Wait For Seconds Before Scaling Down" } diff --git a/scripts/devtron-reference-helm-charts/reference-chart_3-11-0/schema.json b/scripts/devtron-reference-helm-charts/reference-chart_3-11-0/schema.json index 6dcd0150b5..875b922249 100644 --- a/scripts/devtron-reference-helm-charts/reference-chart_3-11-0/schema.json +++ b/scripts/devtron-reference-helm-charts/reference-chart_3-11-0/schema.json @@ -11,11 +11,7 @@ "type": "object", "properties": { "envoyPort": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "envoy port for the container", "title": "Envoy Port" }, @@ -30,38 +26,22 @@ "title": "Name" }, "port": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "Port", "title": "port for the container" }, "servicePort": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "port of the corresponding kubernetes service", "title": "Service Port" }, "supportStreaming": { - "type": [ - "boolean", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "boolean", "description": "field to enable/disable timeout for high performance protocols like grpc", "title": "Support Streaming" }, "useHTTP2": { - "type": [ - "boolean", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "boolean", "description": " field for setting if envoy container can accept(or not) HTTP2 requests", "title": "Use HTTP2" } @@ -75,11 +55,7 @@ "title": "Environment Variables" }, "GracePeriod": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "time for which Kubernetes waits before terminating the pods", "title": "Grace Period" }, @@ -100,11 +76,7 @@ "title": "Command" }, "failureThreshold": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "defines the maximum number of failures that are acceptable before a given container is not considered as live", "title": "Failure Threshold" }, @@ -126,29 +98,17 @@ } }, "initialDelaySeconds": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "defines the time to wait before a given container is checked for liveness", "title": "Initial Delay Seconds" }, "periodSeconds": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "defines the time to check a given container for liveness", "title": "Period Seconds" }, "port": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "port to access on the container", "title": "Port" }, @@ -158,58 +118,34 @@ "title": "Scheme" }, "successThreshold": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "defines the number of successes required before a given container is said to fulfil the liveness probe", "title": "Success Threshold" }, "tcp": { - "type": [ - "boolean", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "boolean", "description": "If enabled, the kubelet will attempt to open a socket to container. If connection is established, the container is considered healthy", "title": "TCP" }, "timeoutSeconds": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "defines the time for checking timeout", "title": "Timeout Seconds" } } }, "MaxSurge": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "maximum number of pods that can be created over the desired number of pods", "title": "Maximum Surge" }, "MaxUnavailable": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "maximum number of pods that can be unavailable during the update process", "title": "Maximum Unavailable" }, "MinReadySeconds": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "minimum number of seconds for which a newly created Pod should be ready without any of its containers crashing, for it to be considered available", "title": "Minimum Ready Seconds" }, @@ -230,11 +166,7 @@ "title": "Command" }, "failureThreshold": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "defines the maximum number of failures that are acceptable before a given container is not considered as ready", "title": "Failure Threshold" }, @@ -256,29 +188,17 @@ } }, "initialDelaySeconds": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "defines the time to wait before a given container is checked for readiness", "title": "Initial Delay Seconds" }, "periodSeconds": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "defines the time to check a given container for readiness", "title": "Period Seconds" }, "port": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "port to access on the container", "title": "Port" }, @@ -288,29 +208,17 @@ "title": "Scheme" }, "successThreshold": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "defines the number of successes required before a given container is said to fulfil the readiness probe", "title": "Success Threshold" }, "tcp": { - "type": [ - "boolean", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "boolean", "description": "If enabled, the kubelet will attempt to open a socket to container. If connection is established, the container is considered healthy", "title": "TCP" }, "timeoutSeconds": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "defines the time for checking timeout", "title": "Timeout Seconds" } @@ -356,11 +264,7 @@ "title": "Arguments", "properties": { "enabled": { - "type": [ - "boolean", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "boolean", "description": "used for enabling/disabling aruguments", "title": "Enabled" }, @@ -388,47 +292,27 @@ "title": "Autoscaling", "properties": { "MaxReplicas": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "Maximum number of replicas allowed for scaling", "title": "Maximum Replicas" }, "MinReplicas": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "Minimum number of replicas allowed for scaling", "title": "Minimum Replicas" }, "TargetCPUUtilizationPercentage": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "The target CPU utilization that is expected for a container", "title": "TargetCPUUtilizationPercentage" }, "TargetMemoryUtilizationPercentage": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "The target memory utilization that is expected for a container", "title": "TargetMemoryUtilizationPercentage" }, "enabled": { - "type": [ - "boolean", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "boolean", "description": "used for enabling/disabling autoscaling", "title": "Enabled" }, @@ -456,11 +340,7 @@ "title": "Command", "properties": { "enabled": { - "type": [ - "boolean", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "boolean", "description": "used for enabling/disabling commands" }, "value": { @@ -482,11 +362,7 @@ "title": "Db Migration Config", "properties": { "enabled": { - "type": [ - "boolean", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "boolean", "description": "used for enabling/disabling the config", "title": "Enabled" } @@ -605,11 +481,7 @@ "default": "" }, "enabled": { - "type": [ - "boolean", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "boolean", "description": "used to enable or disable ingress", "title": "Enabled" }, @@ -658,11 +530,7 @@ "title": "Annotations" }, "enabled": { - "type": [ - "boolean", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "boolean", "description": "used to enable or disable ingress", "title": "Enabled" }, @@ -708,11 +576,7 @@ "title": "Init Containers" }, "pauseForSecondsBeforeSwitchActive": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "tell how much to wait for given period of time before switch active the container", "title": "Pause For Seconds Before SwitchActive" }, @@ -744,11 +608,7 @@ "title": "Raw YAML" }, "replicaCount": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "count of Replicas of pod", "title": "REplica Count" }, @@ -884,11 +744,7 @@ "title": "Volumes" }, "waitForSecondsBeforeScalingDown": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "Wait for given period of time before scaling down the container", "title": "Wait For Seconds Before Scaling Down" } diff --git a/scripts/devtron-reference-helm-charts/reference-chart_3-12-0/schema.json b/scripts/devtron-reference-helm-charts/reference-chart_3-12-0/schema.json index 37e7a2a7b7..6dd78d8cb7 100644 --- a/scripts/devtron-reference-helm-charts/reference-chart_3-12-0/schema.json +++ b/scripts/devtron-reference-helm-charts/reference-chart_3-12-0/schema.json @@ -11,11 +11,7 @@ "type": "object", "properties": { "envoyPort": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "envoy port for the container", "title": "Envoy Port" }, @@ -30,38 +26,22 @@ "title": "Name" }, "port": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "Port", "title": "port for the container" }, "servicePort": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "port of the corresponding kubernetes service", "title": "Service Port" }, "supportStreaming": { - "type": [ - "boolean", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "boolean", "description": "field to enable/disable timeout for high performance protocols like grpc", "title": "Support Streaming" }, "useHTTP2": { - "type": [ - "boolean", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "boolean", "description": " field for setting if envoy container can accept(or not) HTTP2 requests", "title": "Use HTTP2" } @@ -75,11 +55,7 @@ "title": "Environment Variables" }, "GracePeriod": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "time for which Kubernetes waits before terminating the pods", "title": "Grace Period" }, @@ -100,11 +76,7 @@ "title": "Command" }, "failureThreshold": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "defines the maximum number of failures that are acceptable before a given container is not considered as live", "title": "Failure Threshold" }, @@ -126,29 +98,17 @@ } }, "initialDelaySeconds": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "defines the time to wait before a given container is checked for liveness", "title": "Initial Delay Seconds" }, "periodSeconds": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "defines the time to check a given container for liveness", "title": "Period Seconds" }, "port": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "port to access on the container", "title": "Port" }, @@ -158,58 +118,34 @@ "title": "Scheme" }, "successThreshold": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "defines the number of successes required before a given container is said to fulfil the liveness probe", "title": "Success Threshold" }, "tcp": { - "type": [ - "boolean", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "boolean", "description": "If enabled, the kubelet will attempt to open a socket to container. If connection is established, the container is considered healthy", "title": "TCP" }, "timeoutSeconds": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "defines the time for checking timeout", "title": "Timeout Seconds" } } }, "MaxSurge": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "maximum number of pods that can be created over the desired number of pods", "title": "Maximum Surge" }, "MaxUnavailable": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "maximum number of pods that can be unavailable during the update process", "title": "Maximum Unavailable" }, "MinReadySeconds": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "minimum number of seconds for which a newly created Pod should be ready without any of its containers crashing, for it to be considered available", "title": "Minimum Ready Seconds" }, @@ -230,11 +166,7 @@ "title": "Command" }, "failureThreshold": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "defines the maximum number of failures that are acceptable before a given container is not considered as ready", "title": "Failure Threshold" }, @@ -256,29 +188,17 @@ } }, "initialDelaySeconds": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "defines the time to wait before a given container is checked for readiness", "title": "Initial Delay Seconds" }, "periodSeconds": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "defines the time to check a given container for readiness", "title": "Period Seconds" }, "port": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "port to access on the container", "title": "Port" }, @@ -288,29 +208,17 @@ "title": "Scheme" }, "successThreshold": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "defines the number of successes required before a given container is said to fulfil the readiness probe", "title": "Success Threshold" }, "tcp": { - "type": [ - "boolean", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "boolean", "description": "If enabled, the kubelet will attempt to open a socket to container. If connection is established, the container is considered healthy", "title": "TCP" }, "timeoutSeconds": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "defines the time for checking timeout", "title": "Timeout Seconds" } @@ -356,11 +264,7 @@ "title": "Arguments", "properties": { "enabled": { - "type": [ - "boolean", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "boolean", "description": "used for enabling/disabling aruguments", "title": "Enabled" }, @@ -388,47 +292,27 @@ "title": "Autoscaling", "properties": { "MaxReplicas": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "Maximum number of replicas allowed for scaling", "title": "Maximum Replicas" }, "MinReplicas": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "Minimum number of replicas allowed for scaling", "title": "Minimum Replicas" }, "TargetCPUUtilizationPercentage": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "The target CPU utilization that is expected for a container", "title": "TargetCPUUtilizationPercentage" }, "TargetMemoryUtilizationPercentage": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "The target memory utilization that is expected for a container", "title": "TargetMemoryUtilizationPercentage" }, "enabled": { - "type": [ - "boolean", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "boolean", "description": "used for enabling/disabling autoscaling", "title": "Enabled" }, @@ -456,11 +340,7 @@ "title": "Command", "properties": { "enabled": { - "type": [ - "boolean", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "boolean", "description": "used for enabling/disabling commands" }, "value": { @@ -482,11 +362,7 @@ "title": "Db Migration Config", "properties": { "enabled": { - "type": [ - "boolean", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "boolean", "description": "used for enabling/disabling the config", "title": "Enabled" } @@ -605,11 +481,7 @@ "default": "" }, "enabled": { - "type": [ - "boolean", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "boolean", "description": "used to enable or disable ingress", "title": "Enabled" }, @@ -658,11 +530,7 @@ "title": "Annotations" }, "enabled": { - "type": [ - "boolean", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "boolean", "description": "used to enable or disable ingress", "title": "Enabled" }, @@ -708,11 +576,7 @@ "title": "Init Containers" }, "pauseForSecondsBeforeSwitchActive": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "tell how much to wait for given period of time before switch active the container", "title": "Pause For Seconds Before SwitchActive" }, @@ -744,11 +608,7 @@ "title": "Raw YAML" }, "replicaCount": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "count of Replicas of pod", "title": "REplica Count" }, @@ -884,11 +744,7 @@ "title": "Volumes" }, "waitForSecondsBeforeScalingDown": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "Wait for given period of time before scaling down the container", "title": "Wait For Seconds Before Scaling Down" } diff --git a/scripts/devtron-reference-helm-charts/reference-chart_3-13-0/schema.json b/scripts/devtron-reference-helm-charts/reference-chart_3-13-0/schema.json index 83959192f6..07558526be 100644 --- a/scripts/devtron-reference-helm-charts/reference-chart_3-13-0/schema.json +++ b/scripts/devtron-reference-helm-charts/reference-chart_3-13-0/schema.json @@ -11,11 +11,7 @@ "type": "object", "properties": { "envoyPort": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "envoy port for the container", "title": "Envoy Port" }, @@ -30,38 +26,22 @@ "title": "Name" }, "port": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "Port", "title": "port for the container" }, "servicePort": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "port of the corresponding kubernetes service", "title": "Service Port" }, "supportStreaming": { - "type": [ - "boolean", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "boolean", "description": "field to enable/disable timeout for high performance protocols like grpc", "title": "Support Streaming" }, "useHTTP2": { - "type": [ - "boolean", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "boolean", "description": " field for setting if envoy container can accept(or not) HTTP2 requests", "title": "Use HTTP2" } @@ -75,11 +55,7 @@ "title": "Environment Variables" }, "GracePeriod": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "time for which Kubernetes waits before terminating the pods", "title": "Grace Period" }, @@ -100,11 +76,7 @@ "title": "Command" }, "failureThreshold": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "defines the maximum number of failures that are acceptable before a given container is not considered as live", "title": "Failure Threshold" }, @@ -126,29 +98,17 @@ } }, "initialDelaySeconds": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "defines the time to wait before a given container is checked for liveness", "title": "Initial Delay Seconds" }, "periodSeconds": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "defines the time to check a given container for liveness", "title": "Period Seconds" }, "port": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "port to access on the container", "title": "Port" }, @@ -158,58 +118,34 @@ "title": "Scheme" }, "successThreshold": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "defines the number of successes required before a given container is said to fulfil the liveness probe", "title": "Success Threshold" }, "tcp": { - "type": [ - "boolean", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "boolean", "description": "If enabled, the kubelet will attempt to open a socket to container. If connection is established, the container is considered healthy", "title": "TCP" }, "timeoutSeconds": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "defines the time for checking timeout", "title": "Timeout Seconds" } } }, "MaxSurge": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "maximum number of pods that can be created over the desired number of pods", "title": "Maximum Surge" }, "MaxUnavailable": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "maximum number of pods that can be unavailable during the update process", "title": "Maximum Unavailable" }, "MinReadySeconds": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "minimum number of seconds for which a newly created Pod should be ready without any of its containers crashing, for it to be considered available", "title": "Minimum Ready Seconds" }, @@ -230,11 +166,7 @@ "title": "Command" }, "failureThreshold": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "defines the maximum number of failures that are acceptable before a given container is not considered as ready", "title": "Failure Threshold" }, @@ -256,29 +188,17 @@ } }, "initialDelaySeconds": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "defines the time to wait before a given container is checked for readiness", "title": "Initial Delay Seconds" }, "periodSeconds": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "defines the time to check a given container for readiness", "title": "Period Seconds" }, "port": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "port to access on the container", "title": "Port" }, @@ -288,29 +208,17 @@ "title": "Scheme" }, "successThreshold": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "defines the number of successes required before a given container is said to fulfil the readiness probe", "title": "Success Threshold" }, "tcp": { - "type": [ - "boolean", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "boolean", "description": "If enabled, the kubelet will attempt to open a socket to container. If connection is established, the container is considered healthy", "title": "TCP" }, "timeoutSeconds": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "defines the time for checking timeout", "title": "Timeout Seconds" } @@ -356,11 +264,7 @@ "title": "Arguments", "properties": { "enabled": { - "type": [ - "boolean", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "boolean", "description": "used for enabling/disabling aruguments", "title": "Enabled" }, @@ -388,47 +292,27 @@ "title": "Autoscaling", "properties": { "MaxReplicas": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "Maximum number of replicas allowed for scaling", "title": "Maximum Replicas" }, "MinReplicas": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "Minimum number of replicas allowed for scaling", "title": "Minimum Replicas" }, "TargetCPUUtilizationPercentage": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "The target CPU utilization that is expected for a container", "title": "TargetCPUUtilizationPercentage" }, "TargetMemoryUtilizationPercentage": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "The target memory utilization that is expected for a container", "title": "TargetMemoryUtilizationPercentage" }, "enabled": { - "type": [ - "boolean", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "boolean", "description": "used for enabling/disabling autoscaling", "title": "Enabled" }, @@ -456,11 +340,7 @@ "title": "Command", "properties": { "enabled": { - "type": [ - "boolean", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "boolean", "description": "used for enabling/disabling commands" }, "value": { @@ -482,11 +362,7 @@ "title": "Db Migration Config", "properties": { "enabled": { - "type": [ - "boolean", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "boolean", "description": "used for enabling/disabling the config", "title": "Enabled" } @@ -605,11 +481,7 @@ "default": "" }, "enabled": { - "type": [ - "boolean", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "boolean", "description": "used to enable or disable ingress", "title": "Enabled" }, @@ -658,11 +530,7 @@ "title": "Annotations" }, "enabled": { - "type": [ - "boolean", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "boolean", "description": "used to enable or disable ingress", "title": "Enabled" }, @@ -708,11 +576,7 @@ "title": "Init Containers" }, "pauseForSecondsBeforeSwitchActive": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "tell how much to wait for given period of time before switch active the container", "title": "Pause For Seconds Before SwitchActive" }, @@ -744,11 +608,7 @@ "title": "Raw YAML" }, "replicaCount": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "count of Replicas of pod", "title": "REplica Count" }, @@ -884,11 +744,7 @@ "title": "Volumes" }, "waitForSecondsBeforeScalingDown": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "Wait for given period of time before scaling down the container", "title": "Wait For Seconds Before Scaling Down" } diff --git a/scripts/devtron-reference-helm-charts/reference-chart_3-9-0/schema.json b/scripts/devtron-reference-helm-charts/reference-chart_3-9-0/schema.json index b36b76fedc..d9a6d5bd2b 100644 --- a/scripts/devtron-reference-helm-charts/reference-chart_3-9-0/schema.json +++ b/scripts/devtron-reference-helm-charts/reference-chart_3-9-0/schema.json @@ -11,11 +11,7 @@ "type": "object", "properties": { "envoyPort": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "envoy port for the container", "title": "Envoy Port" }, @@ -30,38 +26,22 @@ "title": "Name" }, "port": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "Port", "title": "port for the container" }, "servicePort": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "port of the corresponding kubernetes service", "title": "Service Port" }, "supportStreaming": { - "type": [ - "boolean", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "boolean", "description": "field to enable/disable timeout for high performance protocols like grpc", "title": "Support Streaming" }, "useHTTP2": { - "type": [ - "boolean", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "boolean", "description": " field for setting if envoy container can accept(or not) HTTP2 requests", "title": "Use HTTP2" } @@ -75,11 +55,7 @@ "title": "Environment Variables" }, "GracePeriod": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "time for which Kubernetes waits before terminating the pods", "title": "Grace Period" }, @@ -100,11 +76,7 @@ "title": "Command" }, "failureThreshold": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "defines the maximum number of failures that are acceptable before a given container is not considered as live", "title": "Failure Threshold" }, @@ -126,29 +98,17 @@ } }, "initialDelaySeconds": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "defines the time to wait before a given container is checked for liveness", "title": "Initial Delay Seconds" }, "periodSeconds": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "defines the time to check a given container for liveness", "title": "Period Seconds" }, "port": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "port to access on the container", "title": "Port" }, @@ -158,58 +118,34 @@ "title": "Scheme" }, "successThreshold": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "defines the number of successes required before a given container is said to fulfil the liveness probe", "title": "Success Threshold" }, "tcp": { - "type": [ - "boolean", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "boolean", "description": "If enabled, the kubelet will attempt to open a socket to container. If connection is established, the container is considered healthy", "title": "TCP" }, "timeoutSeconds": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "defines the time for checking timeout", "title": "Timeout Seconds" } } }, "MaxSurge": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "maximum number of pods that can be created over the desired number of pods", "title": "Maximum Surge" }, "MaxUnavailable": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "maximum number of pods that can be unavailable during the update process", "title": "Maximum Unavailable" }, "MinReadySeconds": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "minimum number of seconds for which a newly created Pod should be ready without any of its containers crashing, for it to be considered available", "title": "Minimum Ready Seconds" }, @@ -230,11 +166,7 @@ "title": "Command" }, "failureThreshold": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "defines the maximum number of failures that are acceptable before a given container is not considered as ready", "title": "Failure Threshold" }, @@ -256,29 +188,17 @@ } }, "initialDelaySeconds": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "defines the time to wait before a given container is checked for readiness", "title": "Initial Delay Seconds" }, "periodSeconds": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "defines the time to check a given container for readiness", "title": "Period Seconds" }, "port": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "port to access on the container", "title": "Port" }, @@ -288,29 +208,17 @@ "title": "Scheme" }, "successThreshold": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "defines the number of successes required before a given container is said to fulfil the readiness probe", "title": "Success Threshold" }, "tcp": { - "type": [ - "boolean", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "boolean", "description": "If enabled, the kubelet will attempt to open a socket to container. If connection is established, the container is considered healthy", "title": "TCP" }, "timeoutSeconds": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "defines the time for checking timeout", "title": "Timeout Seconds" } @@ -356,11 +264,7 @@ "title": "Arguments", "properties": { "enabled": { - "type": [ - "boolean", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "boolean", "description": "used for enabling/disabling aruguments", "title": "Enabled" }, @@ -388,47 +292,27 @@ "title": "Autoscaling", "properties": { "MaxReplicas": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "Maximum number of replicas allowed for scaling", "title": "Maximum Replicas" }, "MinReplicas": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "Minimum number of replicas allowed for scaling", "title": "Minimum Replicas" }, "TargetCPUUtilizationPercentage": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "The target CPU utilization that is expected for a container", "title": "TargetCPUUtilizationPercentage" }, "TargetMemoryUtilizationPercentage": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "The target memory utilization that is expected for a container", "title": "TargetMemoryUtilizationPercentage" }, "enabled": { - "type": [ - "boolean", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "boolean", "description": "used for enabling/disabling autoscaling", "title": "Enabled" }, @@ -456,11 +340,7 @@ "title": "Command", "properties": { "enabled": { - "type": [ - "boolean", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "boolean", "description": "used for enabling/disabling commands" }, "value": { @@ -482,11 +362,7 @@ "title": "Db Migration Config", "properties": { "enabled": { - "type": [ - "boolean", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "boolean", "description": "used for enabling/disabling the config", "title": "Enabled" } @@ -603,11 +479,7 @@ "default": "" }, "enabled": { - "type": [ - "boolean", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "boolean", "description": "used to enable or disable ingress", "title": "Enabled" }, @@ -639,11 +511,7 @@ "title": "Annotations" }, "enabled": { - "type": [ - "boolean", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "boolean", "description": "used to enable or disable ingress", "title": "Enabled" }, @@ -662,11 +530,7 @@ "title": "Init Containers" }, "pauseForSecondsBeforeSwitchActive": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "tell how much to wait for given period of time before switch active the container", "title": "Pause For Seconds Before SwitchActive" }, @@ -688,11 +552,7 @@ "title": "Raw YAML" }, "replicaCount": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "count of Replicas of pod", "title": "REplica Count" }, @@ -828,11 +688,7 @@ "title": "Volumes" }, "waitForSecondsBeforeScalingDown": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "Wait for given period of time before scaling down the container", "title": "Wait For Seconds Before Scaling Down" } diff --git a/scripts/devtron-reference-helm-charts/reference-chart_4-10-0/schema.json b/scripts/devtron-reference-helm-charts/reference-chart_4-10-0/schema.json index df09be02c8..aa2dc38e2b 100644 --- a/scripts/devtron-reference-helm-charts/reference-chart_4-10-0/schema.json +++ b/scripts/devtron-reference-helm-charts/reference-chart_4-10-0/schema.json @@ -11,11 +11,7 @@ "type": "object", "properties": { "envoyPort": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "envoy port for the container", "title": "Envoy Port" }, @@ -30,38 +26,22 @@ "title": "Name" }, "port": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "Port", "title": "port for the container" }, "servicePort": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "port of the corresponding kubernetes service", "title": "Service Port" }, "supportStreaming": { - "type": [ - "boolean", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "boolean", "description": "field to enable/disable timeout for high performance protocols like grpc", "title": "Support Streaming" }, "useHTTP2": { - "type": [ - "boolean", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "boolean", "description": " field for setting if envoy container can accept(or not) HTTP2 requests", "title": "Use HTTP2" } @@ -75,11 +55,7 @@ "title": "Environment Variables" }, "GracePeriod": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "time for which Kubernetes waits before terminating the pods", "title": "Grace Period" }, @@ -100,11 +76,7 @@ "title": "Command" }, "failureThreshold": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "defines the maximum number of failures that are acceptable before a given container is not considered as live", "title": "Failure Threshold" }, @@ -115,29 +87,17 @@ "title": "HTTP headers" }, "initialDelaySeconds": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "defines the time to wait before a given container is checked for liveness", "title": "Initial Delay Seconds" }, "periodSeconds": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "defines the time to check a given container for liveness", "title": "Period Seconds" }, "port": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "port to access on the container", "title": "Port" }, @@ -147,58 +107,34 @@ "title": "Scheme" }, "successThreshold": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "defines the number of successes required before a given container is said to fulfil the liveness probe", "title": "Success Threshold" }, "tcp": { - "type": [ - "boolean", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "boolean", "description": "If enabled, the kubelet will attempt to open a socket to container. If connection is established, the container is considered healthy", "title": "TCP" }, "timeoutSeconds": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "defines the time for checking timeout", "title": "Timeout Seconds" } } }, "MaxSurge": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "maximum number of pods that can be created over the desired number of pods", "title": "Maximum Surge" }, "MaxUnavailable": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "maximum number of pods that can be unavailable during the update process", "title": "Maximum Unavailable" }, "MinReadySeconds": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "minimum number of seconds for which a newly created Pod should be ready without any of its containers crashing, for it to be considered available", "title": "Minimum Ready Seconds" }, @@ -219,11 +155,7 @@ "title": "Command" }, "failureThreshold": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "defines the maximum number of failures that are acceptable before a given container is not considered as ready", "title": "Failure Threshold" }, @@ -234,29 +166,17 @@ "title": "HTTP headers" }, "initialDelaySeconds": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "defines the time to wait before a given container is checked for readiness", "title": "Initial Delay Seconds" }, "periodSeconds": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "defines the time to check a given container for readiness", "title": "Period Seconds" }, "port": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "port to access on the container", "title": "Port" }, @@ -266,29 +186,17 @@ "title": "Scheme" }, "successThreshold": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "defines the number of successes required before a given container is said to fulfil the readiness probe", "title": "Success Threshold" }, "tcp": { - "type": [ - "boolean", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "boolean", "description": "If enabled, the kubelet will attempt to open a socket to container. If connection is established, the container is considered healthy", "title": "TCP" }, "timeoutSeconds": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "defines the time for checking timeout", "title": "Timeout Seconds" } @@ -334,11 +242,7 @@ "title": "Arguments", "properties": { "enabled": { - "type": [ - "boolean", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "boolean", "description": "used for enabling/disabling aruguments", "title": "Enabled" }, @@ -366,38 +270,22 @@ "title": "Autoscaling", "properties": { "MaxReplicas": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "Maximum number of replicas allowed for scaling", "title": "Maximum Replicas" }, "MinReplicas": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "Minimum number of replicas allowed for scaling", "title": "Minimum Replicas" }, "TargetCPUUtilizationPercentage": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "The target CPU utilization that is expected for a container", "title": "TargetCPUUtilizationPercentage" }, "TargetMemoryUtilizationPercentage": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "The target memory utilization that is expected for a container", "title": "TargetMemoryUtilizationPercentage" }, @@ -407,11 +295,7 @@ "title": "Behavior" }, "enabled": { - "type": [ - "boolean", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "boolean", "description": "used for enabling/disabling autoscaling", "title": "Enabled" }, @@ -439,11 +323,7 @@ "title": "Command", "properties": { "enabled": { - "type": [ - "boolean", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "boolean", "description": "used for enabling/disabling commands" }, "value": { @@ -471,11 +351,7 @@ "title": "Db Migration Config", "properties": { "enabled": { - "type": [ - "boolean", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "boolean", "description": "used for enabling/disabling the config", "title": "Enabled" } @@ -584,11 +460,7 @@ "default": "" }, "enabled": { - "type": [ - "boolean", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "boolean", "description": "used to enable or disable ingress", "title": "Enabled" }, @@ -648,11 +520,7 @@ "default": "nginx-internal" }, "enabled": { - "type": [ - "boolean", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "boolean", "description": "used to enable or disable ingress", "title": "Enabled" }, @@ -703,11 +571,7 @@ "title": "Init Containers" }, "pauseForSecondsBeforeSwitchActive": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "tell how much to wait for given period of time before switch active the container", "title": "Pause For Seconds Before SwitchActive" }, @@ -739,11 +603,7 @@ "title": "Raw YAML" }, "replicaCount": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "count of Replicas of pod", "title": "REplica Count" }, @@ -879,11 +739,7 @@ "title": "Volumes" }, "waitForSecondsBeforeScalingDown": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "Wait for given period of time before scaling down the container", "title": "Wait For Seconds Before Scaling Down" } diff --git a/scripts/devtron-reference-helm-charts/reference-chart_4-11-0/schema.json b/scripts/devtron-reference-helm-charts/reference-chart_4-11-0/schema.json index 2c65f79d3d..fcc54107ed 100644 --- a/scripts/devtron-reference-helm-charts/reference-chart_4-11-0/schema.json +++ b/scripts/devtron-reference-helm-charts/reference-chart_4-11-0/schema.json @@ -11,11 +11,7 @@ "type": "object", "properties": { "envoyPort": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "envoy port for the container", "title": "Envoy Port" }, @@ -30,38 +26,22 @@ "title": "Name" }, "port": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "Port", "title": "port for the container" }, "servicePort": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "port of the corresponding kubernetes service", "title": "Service Port" }, "supportStreaming": { - "type": [ - "boolean", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "boolean", "description": "field to enable/disable timeout for high performance protocols like grpc", "title": "Support Streaming" }, "useHTTP2": { - "type": [ - "boolean", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "boolean", "description": " field for setting if envoy container can accept(or not) HTTP2 requests", "title": "Use HTTP2" } @@ -75,11 +55,7 @@ "title": "Environment Variables" }, "GracePeriod": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "time for which Kubernetes waits before terminating the pods", "title": "Grace Period" }, @@ -100,11 +76,7 @@ "title": "Command" }, "failureThreshold": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "defines the maximum number of failures that are acceptable before a given container is not considered as live", "title": "Failure Threshold" }, @@ -115,29 +87,17 @@ "title": "HTTP headers" }, "initialDelaySeconds": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "defines the time to wait before a given container is checked for liveness", "title": "Initial Delay Seconds" }, "periodSeconds": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "defines the time to check a given container for liveness", "title": "Period Seconds" }, "port": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "port to access on the container", "title": "Port" }, @@ -147,58 +107,34 @@ "title": "Scheme" }, "successThreshold": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "defines the number of successes required before a given container is said to fulfil the liveness probe", "title": "Success Threshold" }, "tcp": { - "type": [ - "boolean", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "boolean", "description": "If enabled, the kubelet will attempt to open a socket to container. If connection is established, the container is considered healthy", "title": "TCP" }, "timeoutSeconds": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "defines the time for checking timeout", "title": "Timeout Seconds" } } }, "MaxSurge": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "maximum number of pods that can be created over the desired number of pods", "title": "Maximum Surge" }, "MaxUnavailable": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "maximum number of pods that can be unavailable during the update process", "title": "Maximum Unavailable" }, "MinReadySeconds": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "minimum number of seconds for which a newly created Pod should be ready without any of its containers crashing, for it to be considered available", "title": "Minimum Ready Seconds" }, @@ -219,11 +155,7 @@ "title": "Command" }, "failureThreshold": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "defines the maximum number of failures that are acceptable before a given container is not considered as ready", "title": "Failure Threshold" }, @@ -234,29 +166,17 @@ "title": "HTTP headers" }, "initialDelaySeconds": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "defines the time to wait before a given container is checked for readiness", "title": "Initial Delay Seconds" }, "periodSeconds": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "defines the time to check a given container for readiness", "title": "Period Seconds" }, "port": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "port to access on the container", "title": "Port" }, @@ -266,29 +186,17 @@ "title": "Scheme" }, "successThreshold": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "defines the number of successes required before a given container is said to fulfil the readiness probe", "title": "Success Threshold" }, "tcp": { - "type": [ - "boolean", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "boolean", "description": "If enabled, the kubelet will attempt to open a socket to container. If connection is established, the container is considered healthy", "title": "TCP" }, "timeoutSeconds": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "defines the time for checking timeout", "title": "Timeout Seconds" } @@ -334,11 +242,7 @@ "title": "Arguments", "properties": { "enabled": { - "type": [ - "boolean", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "boolean", "description": "used for enabling/disabling aruguments", "title": "Enabled" }, @@ -366,38 +270,22 @@ "title": "Autoscaling", "properties": { "MaxReplicas": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "Maximum number of replicas allowed for scaling", "title": "Maximum Replicas" }, "MinReplicas": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "Minimum number of replicas allowed for scaling", "title": "Minimum Replicas" }, "TargetCPUUtilizationPercentage": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "The target CPU utilization that is expected for a container", "title": "TargetCPUUtilizationPercentage" }, "TargetMemoryUtilizationPercentage": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "The target memory utilization that is expected for a container", "title": "TargetMemoryUtilizationPercentage" }, @@ -407,11 +295,7 @@ "title": "Behavior" }, "enabled": { - "type": [ - "boolean", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "boolean", "description": "used for enabling/disabling autoscaling", "title": "Enabled" }, @@ -439,11 +323,7 @@ "title": "Command", "properties": { "enabled": { - "type": [ - "boolean", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "boolean", "description": "used for enabling/disabling commands" }, "value": { @@ -476,11 +356,7 @@ "title": "Db Migration Config", "properties": { "enabled": { - "type": [ - "boolean", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "boolean", "description": "used for enabling/disabling the config", "title": "Enabled" } @@ -589,11 +465,7 @@ "default": "" }, "enabled": { - "type": [ - "boolean", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "boolean", "description": "used to enable or disable ingress", "title": "Enabled" }, @@ -653,11 +525,7 @@ "default": "nginx-internal" }, "enabled": { - "type": [ - "boolean", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "boolean", "description": "used to enable or disable ingress", "title": "Enabled" }, @@ -751,11 +619,7 @@ } }, "pauseForSecondsBeforeSwitchActive": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "tell how much to wait for given period of time before switch active the container", "title": "Pause For Seconds Before SwitchActive" }, @@ -792,11 +656,7 @@ "title": "Raw YAML" }, "replicaCount": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "count of Replicas of pod", "title": "REplica Count" }, @@ -938,11 +798,7 @@ "title": "Volumes" }, "waitForSecondsBeforeScalingDown": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "Wait for given period of time before scaling down the container", "title": "Wait For Seconds Before Scaling Down" } diff --git a/scripts/devtron-reference-helm-charts/reference-chart_4-12-0/schema.json b/scripts/devtron-reference-helm-charts/reference-chart_4-12-0/schema.json index 14c0d9645d..01711de132 100644 --- a/scripts/devtron-reference-helm-charts/reference-chart_4-12-0/schema.json +++ b/scripts/devtron-reference-helm-charts/reference-chart_4-12-0/schema.json @@ -11,11 +11,7 @@ "type": "object", "properties": { "envoyPort": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "envoy port for the container", "title": "Envoy Port" }, @@ -30,38 +26,22 @@ "title": "Name" }, "port": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "Port", "title": "port for the container" }, "servicePort": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "port of the corresponding kubernetes service", "title": "Service Port" }, "supportStreaming": { - "type": [ - "boolean", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "boolean", "description": "field to enable/disable timeout for high performance protocols like grpc", "title": "Support Streaming" }, "useHTTP2": { - "type": [ - "boolean", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "boolean", "description": " field for setting if envoy container can accept(or not) HTTP2 requests", "title": "Use HTTP2" } @@ -75,11 +55,7 @@ "title": "Environment Variables" }, "GracePeriod": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "time for which Kubernetes waits before terminating the pods", "title": "Grace Period" }, @@ -100,11 +76,7 @@ "title": "Command" }, "failureThreshold": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "defines the maximum number of failures that are acceptable before a given container is not considered as live", "title": "Failure Threshold" }, @@ -115,29 +87,17 @@ "title": "HTTP headers" }, "initialDelaySeconds": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "defines the time to wait before a given container is checked for liveness", "title": "Initial Delay Seconds" }, "periodSeconds": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "defines the time to check a given container for liveness", "title": "Period Seconds" }, "port": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "port to access on the container", "title": "Port" }, @@ -147,58 +107,34 @@ "title": "Scheme" }, "successThreshold": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "defines the number of successes required before a given container is said to fulfil the liveness probe", "title": "Success Threshold" }, "tcp": { - "type": [ - "boolean", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "boolean", "description": "If enabled, the kubelet will attempt to open a socket to container. If connection is established, the container is considered healthy", "title": "TCP" }, "timeoutSeconds": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "defines the time for checking timeout", "title": "Timeout Seconds" } } }, "MaxSurge": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "maximum number of pods that can be created over the desired number of pods", "title": "Maximum Surge" }, "MaxUnavailable": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "maximum number of pods that can be unavailable during the update process", "title": "Maximum Unavailable" }, "MinReadySeconds": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "minimum number of seconds for which a newly created Pod should be ready without any of its containers crashing, for it to be considered available", "title": "Minimum Ready Seconds" }, @@ -219,11 +155,7 @@ "title": "Command" }, "failureThreshold": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "defines the maximum number of failures that are acceptable before a given container is not considered as ready", "title": "Failure Threshold" }, @@ -234,29 +166,17 @@ "title": "HTTP headers" }, "initialDelaySeconds": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "defines the time to wait before a given container is checked for readiness", "title": "Initial Delay Seconds" }, "periodSeconds": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "defines the time to check a given container for readiness", "title": "Period Seconds" }, "port": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "port to access on the container", "title": "Port" }, @@ -266,29 +186,17 @@ "title": "Scheme" }, "successThreshold": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "defines the number of successes required before a given container is said to fulfil the readiness probe", "title": "Success Threshold" }, "tcp": { - "type": [ - "boolean", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "boolean", "description": "If enabled, the kubelet will attempt to open a socket to container. If connection is established, the container is considered healthy", "title": "TCP" }, "timeoutSeconds": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "defines the time for checking timeout", "title": "Timeout Seconds" } @@ -334,11 +242,7 @@ "title": "Arguments", "properties": { "enabled": { - "type": [ - "boolean", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "boolean", "description": "used for enabling/disabling aruguments", "title": "Enabled" }, @@ -366,38 +270,22 @@ "title": "Autoscaling", "properties": { "MaxReplicas": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "Maximum number of replicas allowed for scaling", "title": "Maximum Replicas" }, "MinReplicas": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "Minimum number of replicas allowed for scaling", "title": "Minimum Replicas" }, "TargetCPUUtilizationPercentage": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "The target CPU utilization that is expected for a container", "title": "TargetCPUUtilizationPercentage" }, "TargetMemoryUtilizationPercentage": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "The target memory utilization that is expected for a container", "title": "TargetMemoryUtilizationPercentage" }, @@ -407,11 +295,7 @@ "title": "Behavior" }, "enabled": { - "type": [ - "boolean", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "boolean", "description": "used for enabling/disabling autoscaling", "title": "Enabled" }, @@ -439,11 +323,7 @@ "title": "Command", "properties": { "enabled": { - "type": [ - "boolean", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "boolean", "description": "used for enabling/disabling commands" }, "value": { @@ -476,11 +356,7 @@ "title": "Db Migration Config", "properties": { "enabled": { - "type": [ - "boolean", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "boolean", "description": "used for enabling/disabling the config", "title": "Enabled" } @@ -589,11 +465,7 @@ "default": "" }, "enabled": { - "type": [ - "boolean", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "boolean", "description": "used to enable or disable ingress", "title": "Enabled" }, @@ -653,11 +525,7 @@ "default": "nginx-internal" }, "enabled": { - "type": [ - "boolean", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "boolean", "description": "used to enable or disable ingress", "title": "Enabled" }, @@ -751,11 +619,7 @@ } }, "pauseForSecondsBeforeSwitchActive": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "tell how much to wait for given period of time before switch active the container", "title": "Pause For Seconds Before SwitchActive" }, @@ -792,11 +656,7 @@ "title": "Raw YAML" }, "replicaCount": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "count of Replicas of pod", "title": "REplica Count" }, @@ -958,11 +818,7 @@ "title": "Volumes" }, "waitForSecondsBeforeScalingDown": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "Wait for given period of time before scaling down the container", "title": "Wait For Seconds Before Scaling Down" } diff --git a/scripts/devtron-reference-helm-charts/reference-chart_4-13-0/schema.json b/scripts/devtron-reference-helm-charts/reference-chart_4-13-0/schema.json index dafb60372d..48585478f7 100644 --- a/scripts/devtron-reference-helm-charts/reference-chart_4-13-0/schema.json +++ b/scripts/devtron-reference-helm-charts/reference-chart_4-13-0/schema.json @@ -11,11 +11,7 @@ "type": "object", "properties": { "envoyPort": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "envoy port for the container", "title": "Envoy Port" }, @@ -30,38 +26,22 @@ "title": "Name" }, "port": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "Port", "title": "port for the container" }, "servicePort": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "port of the corresponding kubernetes service", "title": "Service Port" }, "supportStreaming": { - "type": [ - "boolean", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "boolean", "description": "field to enable/disable timeout for high performance protocols like grpc", "title": "Support Streaming" }, "useHTTP2": { - "type": [ - "boolean", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "boolean", "description": " field for setting if envoy container can accept(or not) HTTP2 requests", "title": "Use HTTP2" } @@ -75,11 +55,7 @@ "title": "Environment Variables" }, "GracePeriod": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "time for which Kubernetes waits before terminating the pods", "title": "Grace Period" }, @@ -100,11 +76,7 @@ "title": "Command" }, "failureThreshold": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "defines the maximum number of failures that are acceptable before a given container is not considered as live", "title": "Failure Threshold" }, @@ -115,29 +87,17 @@ "title": "HTTP headers" }, "initialDelaySeconds": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "defines the time to wait before a given container is checked for liveness", "title": "Initial Delay Seconds" }, "periodSeconds": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "defines the time to check a given container for liveness", "title": "Period Seconds" }, "port": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "port to access on the container", "title": "Port" }, @@ -147,58 +107,34 @@ "title": "Scheme" }, "successThreshold": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "defines the number of successes required before a given container is said to fulfil the liveness probe", "title": "Success Threshold" }, "tcp": { - "type": [ - "boolean", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "boolean", "description": "If enabled, the kubelet will attempt to open a socket to container. If connection is established, the container is considered healthy", "title": "TCP" }, "timeoutSeconds": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "defines the time for checking timeout", "title": "Timeout Seconds" } } }, "MaxSurge": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "maximum number of pods that can be created over the desired number of pods", "title": "Maximum Surge" }, "MaxUnavailable": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "maximum number of pods that can be unavailable during the update process", "title": "Maximum Unavailable" }, "MinReadySeconds": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "minimum number of seconds for which a newly created Pod should be ready without any of its containers crashing, for it to be considered available", "title": "Minimum Ready Seconds" }, @@ -219,11 +155,7 @@ "title": "Command" }, "failureThreshold": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "defines the maximum number of failures that are acceptable before a given container is not considered as ready", "title": "Failure Threshold" }, @@ -234,29 +166,17 @@ "title": "HTTP headers" }, "initialDelaySeconds": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "defines the time to wait before a given container is checked for readiness", "title": "Initial Delay Seconds" }, "periodSeconds": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "defines the time to check a given container for readiness", "title": "Period Seconds" }, "port": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "port to access on the container", "title": "Port" }, @@ -266,29 +186,17 @@ "title": "Scheme" }, "successThreshold": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "defines the number of successes required before a given container is said to fulfil the readiness probe", "title": "Success Threshold" }, "tcp": { - "type": [ - "boolean", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "boolean", "description": "If enabled, the kubelet will attempt to open a socket to container. If connection is established, the container is considered healthy", "title": "TCP" }, "timeoutSeconds": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "defines the time for checking timeout", "title": "Timeout Seconds" } @@ -334,11 +242,7 @@ "title": "Arguments", "properties": { "enabled": { - "type": [ - "boolean", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "boolean", "description": "used for enabling/disabling aruguments", "title": "Enabled" }, @@ -392,38 +296,22 @@ "title": "Autoscaling", "properties": { "MaxReplicas": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "Maximum number of replicas allowed for scaling", "title": "Maximum Replicas" }, "MinReplicas": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "Minimum number of replicas allowed for scaling", "title": "Minimum Replicas" }, "TargetCPUUtilizationPercentage": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "The target CPU utilization that is expected for a container", "title": "TargetCPUUtilizationPercentage" }, "TargetMemoryUtilizationPercentage": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "The target memory utilization that is expected for a container", "title": "TargetMemoryUtilizationPercentage" }, @@ -433,11 +321,7 @@ "title": "Behavior" }, "enabled": { - "type": [ - "boolean", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "boolean", "description": "used for enabling/disabling autoscaling", "title": "Enabled" }, @@ -465,11 +349,7 @@ "title": "Command", "properties": { "enabled": { - "type": [ - "boolean", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "boolean", "description": "used for enabling/disabling commands" }, "value": { @@ -502,11 +382,7 @@ "title": "Db Migration Config", "properties": { "enabled": { - "type": [ - "boolean", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "boolean", "description": "used for enabling/disabling the config", "title": "Enabled" } @@ -615,11 +491,7 @@ "default": "" }, "enabled": { - "type": [ - "boolean", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "boolean", "description": "used to enable or disable ingress", "title": "Enabled" }, @@ -679,11 +551,7 @@ "default": "nginx-internal" }, "enabled": { - "type": [ - "boolean", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "boolean", "description": "used to enable or disable ingress", "title": "Enabled" }, @@ -777,11 +645,7 @@ } }, "pauseForSecondsBeforeSwitchActive": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "tell how much to wait for given period of time before switch active the container", "title": "Pause For Seconds Before SwitchActive" }, @@ -818,11 +682,7 @@ "title": "Raw YAML" }, "replicaCount": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "count of Replicas of pod", "title": "REplica Count" }, @@ -984,11 +844,7 @@ "title": "Volumes" }, "waitForSecondsBeforeScalingDown": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "Wait for given period of time before scaling down the container", "title": "Wait For Seconds Before Scaling Down" } diff --git a/scripts/devtron-reference-helm-charts/reference-chart_4-14-0/schema.json b/scripts/devtron-reference-helm-charts/reference-chart_4-14-0/schema.json index 81591200f5..2beea7006c 100644 --- a/scripts/devtron-reference-helm-charts/reference-chart_4-14-0/schema.json +++ b/scripts/devtron-reference-helm-charts/reference-chart_4-14-0/schema.json @@ -11,11 +11,7 @@ "type": "object", "properties": { "envoyPort": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "envoy port for the container", "title": "Envoy Port" }, @@ -30,38 +26,22 @@ "title": "Name" }, "port": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "Port", "title": "port for the container" }, "servicePort": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "port of the corresponding kubernetes service", "title": "Service Port" }, "supportStreaming": { - "type": [ - "boolean", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "boolean", "description": "field to enable/disable timeout for high performance protocols like grpc", "title": "Support Streaming" }, "useHTTP2": { - "type": [ - "boolean", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "boolean", "description": " field for setting if envoy container can accept(or not) HTTP2 requests", "title": "Use HTTP2" } @@ -75,11 +55,7 @@ "title": "Environment Variables" }, "GracePeriod": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "time for which Kubernetes waits before terminating the pods", "title": "Grace Period" }, @@ -100,11 +76,7 @@ "title": "Command" }, "failureThreshold": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "defines the maximum number of failures that are acceptable before a given container is not considered as live", "title": "Failure Threshold" }, @@ -115,29 +87,17 @@ "title": "HTTP headers" }, "initialDelaySeconds": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "defines the time to wait before a given container is checked for liveness", "title": "Initial Delay Seconds" }, "periodSeconds": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "defines the time to check a given container for liveness", "title": "Period Seconds" }, "port": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "port to access on the container", "title": "Port" }, @@ -147,58 +107,34 @@ "title": "Scheme" }, "successThreshold": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "defines the number of successes required before a given container is said to fulfil the liveness probe", "title": "Success Threshold" }, "tcp": { - "type": [ - "boolean", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "boolean", "description": "If enabled, the kubelet will attempt to open a socket to container. If connection is established, the container is considered healthy", "title": "TCP" }, "timeoutSeconds": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "defines the time for checking timeout", "title": "Timeout Seconds" } } }, "MaxSurge": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "maximum number of pods that can be created over the desired number of pods", "title": "Maximum Surge" }, "MaxUnavailable": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "maximum number of pods that can be unavailable during the update process", "title": "Maximum Unavailable" }, "MinReadySeconds": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "minimum number of seconds for which a newly created Pod should be ready without any of its containers crashing, for it to be considered available", "title": "Minimum Ready Seconds" }, @@ -219,11 +155,7 @@ "title": "Command" }, "failureThreshold": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "defines the maximum number of failures that are acceptable before a given container is not considered as ready", "title": "Failure Threshold" }, @@ -234,29 +166,17 @@ "title": "HTTP headers" }, "initialDelaySeconds": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "defines the time to wait before a given container is checked for readiness", "title": "Initial Delay Seconds" }, "periodSeconds": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "defines the time to check a given container for readiness", "title": "Period Seconds" }, "port": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "port to access on the container", "title": "Port" }, @@ -266,29 +186,17 @@ "title": "Scheme" }, "successThreshold": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "defines the number of successes required before a given container is said to fulfil the readiness probe", "title": "Success Threshold" }, "tcp": { - "type": [ - "boolean", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "boolean", "description": "If enabled, the kubelet will attempt to open a socket to container. If connection is established, the container is considered healthy", "title": "TCP" }, "timeoutSeconds": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "defines the time for checking timeout", "title": "Timeout Seconds" } @@ -334,11 +242,7 @@ "title": "Arguments", "properties": { "enabled": { - "type": [ - "boolean", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "boolean", "description": "used for enabling/disabling aruguments", "title": "Enabled" }, @@ -392,38 +296,22 @@ "title": "Autoscaling", "properties": { "MaxReplicas": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "Maximum number of replicas allowed for scaling", "title": "Maximum Replicas" }, "MinReplicas": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "Minimum number of replicas allowed for scaling", "title": "Minimum Replicas" }, "TargetCPUUtilizationPercentage": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "The target CPU utilization that is expected for a container", "title": "TargetCPUUtilizationPercentage" }, "TargetMemoryUtilizationPercentage": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "The target memory utilization that is expected for a container", "title": "TargetMemoryUtilizationPercentage" }, @@ -433,11 +321,7 @@ "title": "Behavior" }, "enabled": { - "type": [ - "boolean", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "boolean", "description": "used for enabling/disabling autoscaling", "title": "Enabled" }, @@ -465,11 +349,7 @@ "title": "Command", "properties": { "enabled": { - "type": [ - "boolean", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "boolean", "description": "used for enabling/disabling commands" }, "value": { @@ -502,11 +382,7 @@ "title": "Db Migration Config", "properties": { "enabled": { - "type": [ - "boolean", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "boolean", "description": "used for enabling/disabling the config", "title": "Enabled" } @@ -615,11 +491,7 @@ "default": "" }, "enabled": { - "type": [ - "boolean", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "boolean", "description": "used to enable or disable ingress", "title": "Enabled" }, @@ -679,11 +551,7 @@ "default": "nginx-internal" }, "enabled": { - "type": [ - "boolean", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "boolean", "description": "used to enable or disable ingress", "title": "Enabled" }, @@ -777,11 +645,7 @@ } }, "pauseForSecondsBeforeSwitchActive": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "tell how much to wait for given period of time before switch active the container", "title": "Pause For Seconds Before SwitchActive" }, @@ -818,11 +682,7 @@ "title": "Raw YAML" }, "replicaCount": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "count of Replicas of pod", "title": "REplica Count" }, @@ -984,11 +844,7 @@ "title": "Volumes" }, "waitForSecondsBeforeScalingDown": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "Wait for given period of time before scaling down the container", "title": "Wait For Seconds Before Scaling Down" } diff --git a/scripts/devtron-reference-helm-charts/reference-chart_4-14-0/values.yaml b/scripts/devtron-reference-helm-charts/reference-chart_4-14-0/values.yaml index 67e33f31eb..4ef8a04163 100644 --- a/scripts/devtron-reference-helm-charts/reference-chart_4-14-0/values.yaml +++ b/scripts/devtron-reference-helm-charts/reference-chart_4-14-0/values.yaml @@ -125,9 +125,9 @@ server: image_tag: 1-95af053 image: "" -EnvVariablesFromFieldPath: [] -# - name: POD_NAME -# fieldPath: metadata.name +EnvVariablesFromFieldPath: +- name: POD_NAME + fieldPath: metadata.name EnvVariables: - name: FLASK_ENV diff --git a/scripts/devtron-reference-helm-charts/reference-chart_4-15-0/schema.json b/scripts/devtron-reference-helm-charts/reference-chart_4-15-0/schema.json index 081e95cb05..d4c636b588 100644 --- a/scripts/devtron-reference-helm-charts/reference-chart_4-15-0/schema.json +++ b/scripts/devtron-reference-helm-charts/reference-chart_4-15-0/schema.json @@ -16,11 +16,7 @@ "type": "object", "properties": { "envoyPort": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "envoy port for the container", "title": "Envoy Port" }, @@ -35,38 +31,22 @@ "title": "Name" }, "port": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "Port", "title": "port for the container" }, "servicePort": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "port of the corresponding kubernetes service", "title": "Service Port" }, "supportStreaming": { - "type": [ - "boolean", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "boolean", "description": "field to enable/disable timeout for high performance protocols like grpc", "title": "Support Streaming" }, "useHTTP2": { - "type": [ - "boolean", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "boolean", "description": " field for setting if envoy container can accept(or not) HTTP2 requests", "title": "Use HTTP2" } @@ -102,11 +82,7 @@ ] }, "GracePeriod": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "time for which Kubernetes waits before terminating the pods", "title": "Grace Period" }, @@ -127,11 +103,7 @@ "title": "Command" }, "failureThreshold": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "defines the maximum number of failures that are acceptable before a given container is not considered as live", "title": "Failure Threshold" }, @@ -142,29 +114,17 @@ "title": "HTTP headers" }, "initialDelaySeconds": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "defines the time to wait before a given container is checked for liveness", "title": "Initial Delay Seconds" }, "periodSeconds": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "defines the time to check a given container for liveness", "title": "Period Seconds" }, "port": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "port to access on the container", "title": "Port" }, @@ -174,58 +134,34 @@ "title": "Scheme" }, "successThreshold": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "defines the number of successes required before a given container is said to fulfil the liveness probe", "title": "Success Threshold" }, "tcp": { - "type": [ - "boolean", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "boolean", "description": "If enabled, the kubelet will attempt to open a socket to container. If connection is established, the container is considered healthy", "title": "TCP" }, "timeoutSeconds": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "defines the time for checking timeout", "title": "Timeout Seconds" } } }, "MaxSurge": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "maximum number of pods that can be created over the desired number of pods", "title": "Maximum Surge" }, "MaxUnavailable": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "maximum number of pods that can be unavailable during the update process", "title": "Maximum Unavailable" }, "MinReadySeconds": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "minimum number of seconds for which a newly created Pod should be ready without any of its containers crashing, for it to be considered available", "title": "Minimum Ready Seconds" }, @@ -246,11 +182,7 @@ "title": "Command" }, "failureThreshold": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "defines the maximum number of failures that are acceptable before a given container is not considered as ready", "title": "Failure Threshold" }, @@ -261,29 +193,17 @@ "title": "HTTP headers" }, "initialDelaySeconds": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "defines the time to wait before a given container is checked for readiness", "title": "Initial Delay Seconds" }, "periodSeconds": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "defines the time to check a given container for readiness", "title": "Period Seconds" }, "port": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "port to access on the container", "title": "Port" }, @@ -293,29 +213,17 @@ "title": "Scheme" }, "successThreshold": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "defines the number of successes required before a given container is said to fulfil the readiness probe", "title": "Success Threshold" }, "tcp": { - "type": [ - "boolean", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "boolean", "description": "If enabled, the kubelet will attempt to open a socket to container. If connection is established, the container is considered healthy", "title": "TCP" }, "timeoutSeconds": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "defines the time for checking timeout", "title": "Timeout Seconds" } @@ -361,11 +269,7 @@ "title": "Arguments", "properties": { "enabled": { - "type": [ - "boolean", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "boolean", "description": "used for enabling/disabling aruguments", "title": "Enabled" }, @@ -393,38 +297,22 @@ "title": "Autoscaling", "properties": { "MaxReplicas": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "Maximum number of replicas allowed for scaling", "title": "Maximum Replicas" }, "MinReplicas": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "Minimum number of replicas allowed for scaling", "title": "Minimum Replicas" }, "TargetCPUUtilizationPercentage": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "The target CPU utilization that is expected for a container", "title": "TargetCPUUtilizationPercentage" }, "TargetMemoryUtilizationPercentage": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "The target memory utilization that is expected for a container", "title": "TargetMemoryUtilizationPercentage" }, @@ -434,11 +322,7 @@ "title": "Behavior" }, "enabled": { - "type": [ - "boolean", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "boolean", "description": "used for enabling/disabling autoscaling", "title": "Enabled" }, @@ -466,11 +350,7 @@ "title": "Command", "properties": { "enabled": { - "type": [ - "boolean", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "boolean", "description": "used for enabling/disabling commands" }, "value": { @@ -503,11 +383,7 @@ "title": "Db Migration Config", "properties": { "enabled": { - "type": [ - "boolean", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "boolean", "description": "used for enabling/disabling the config", "title": "Enabled" } @@ -662,11 +538,7 @@ "default": "" }, "enabled": { - "type": [ - "boolean", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "boolean", "description": "used to enable or disable ingress", "title": "Enabled" }, @@ -726,11 +598,7 @@ "default": "nginx-internal" }, "enabled": { - "type": [ - "boolean", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "boolean", "description": "used to enable or disable ingress", "title": "Enabled" }, @@ -851,11 +719,7 @@ } }, "pauseForSecondsBeforeSwitchActive": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "tell how much to wait for given period of time before switch active the container", "title": "Pause For Seconds Before SwitchActive" }, @@ -913,11 +777,7 @@ "title": "Raw YAML" }, "replicaCount": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "count of Replicas of pod", "title": "REplica Count" }, @@ -1079,11 +939,7 @@ "title": "Volumes" }, "waitForSecondsBeforeScalingDown": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "Wait for given period of time before scaling down the container", "title": "Wait For Seconds Before Scaling Down" } diff --git a/scripts/devtron-reference-helm-charts/reference-chart_4-15-0/values.yaml b/scripts/devtron-reference-helm-charts/reference-chart_4-15-0/values.yaml index 2a8b11d47c..28c9563974 100644 --- a/scripts/devtron-reference-helm-charts/reference-chart_4-15-0/values.yaml +++ b/scripts/devtron-reference-helm-charts/reference-chart_4-15-0/values.yaml @@ -125,9 +125,9 @@ server: image_tag: 1-95af053 image: "" -EnvVariablesFromFieldPath: [] -# - name: POD_NAME -# fieldPath: metadata.name +EnvVariablesFromFieldPath: +- name: POD_NAME + fieldPath: metadata.name EnvVariables: - name: FLASK_ENV diff --git a/scripts/devtron-reference-helm-charts/reference-chart_4-16-0/schema.json b/scripts/devtron-reference-helm-charts/reference-chart_4-16-0/schema.json index 081e95cb05..d4c636b588 100644 --- a/scripts/devtron-reference-helm-charts/reference-chart_4-16-0/schema.json +++ b/scripts/devtron-reference-helm-charts/reference-chart_4-16-0/schema.json @@ -16,11 +16,7 @@ "type": "object", "properties": { "envoyPort": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "envoy port for the container", "title": "Envoy Port" }, @@ -35,38 +31,22 @@ "title": "Name" }, "port": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "Port", "title": "port for the container" }, "servicePort": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "port of the corresponding kubernetes service", "title": "Service Port" }, "supportStreaming": { - "type": [ - "boolean", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "boolean", "description": "field to enable/disable timeout for high performance protocols like grpc", "title": "Support Streaming" }, "useHTTP2": { - "type": [ - "boolean", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "boolean", "description": " field for setting if envoy container can accept(or not) HTTP2 requests", "title": "Use HTTP2" } @@ -102,11 +82,7 @@ ] }, "GracePeriod": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "time for which Kubernetes waits before terminating the pods", "title": "Grace Period" }, @@ -127,11 +103,7 @@ "title": "Command" }, "failureThreshold": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "defines the maximum number of failures that are acceptable before a given container is not considered as live", "title": "Failure Threshold" }, @@ -142,29 +114,17 @@ "title": "HTTP headers" }, "initialDelaySeconds": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "defines the time to wait before a given container is checked for liveness", "title": "Initial Delay Seconds" }, "periodSeconds": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "defines the time to check a given container for liveness", "title": "Period Seconds" }, "port": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "port to access on the container", "title": "Port" }, @@ -174,58 +134,34 @@ "title": "Scheme" }, "successThreshold": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "defines the number of successes required before a given container is said to fulfil the liveness probe", "title": "Success Threshold" }, "tcp": { - "type": [ - "boolean", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "boolean", "description": "If enabled, the kubelet will attempt to open a socket to container. If connection is established, the container is considered healthy", "title": "TCP" }, "timeoutSeconds": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "defines the time for checking timeout", "title": "Timeout Seconds" } } }, "MaxSurge": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "maximum number of pods that can be created over the desired number of pods", "title": "Maximum Surge" }, "MaxUnavailable": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "maximum number of pods that can be unavailable during the update process", "title": "Maximum Unavailable" }, "MinReadySeconds": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "minimum number of seconds for which a newly created Pod should be ready without any of its containers crashing, for it to be considered available", "title": "Minimum Ready Seconds" }, @@ -246,11 +182,7 @@ "title": "Command" }, "failureThreshold": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "defines the maximum number of failures that are acceptable before a given container is not considered as ready", "title": "Failure Threshold" }, @@ -261,29 +193,17 @@ "title": "HTTP headers" }, "initialDelaySeconds": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "defines the time to wait before a given container is checked for readiness", "title": "Initial Delay Seconds" }, "periodSeconds": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "defines the time to check a given container for readiness", "title": "Period Seconds" }, "port": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "port to access on the container", "title": "Port" }, @@ -293,29 +213,17 @@ "title": "Scheme" }, "successThreshold": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "defines the number of successes required before a given container is said to fulfil the readiness probe", "title": "Success Threshold" }, "tcp": { - "type": [ - "boolean", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "boolean", "description": "If enabled, the kubelet will attempt to open a socket to container. If connection is established, the container is considered healthy", "title": "TCP" }, "timeoutSeconds": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "defines the time for checking timeout", "title": "Timeout Seconds" } @@ -361,11 +269,7 @@ "title": "Arguments", "properties": { "enabled": { - "type": [ - "boolean", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "boolean", "description": "used for enabling/disabling aruguments", "title": "Enabled" }, @@ -393,38 +297,22 @@ "title": "Autoscaling", "properties": { "MaxReplicas": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "Maximum number of replicas allowed for scaling", "title": "Maximum Replicas" }, "MinReplicas": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "Minimum number of replicas allowed for scaling", "title": "Minimum Replicas" }, "TargetCPUUtilizationPercentage": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "The target CPU utilization that is expected for a container", "title": "TargetCPUUtilizationPercentage" }, "TargetMemoryUtilizationPercentage": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "The target memory utilization that is expected for a container", "title": "TargetMemoryUtilizationPercentage" }, @@ -434,11 +322,7 @@ "title": "Behavior" }, "enabled": { - "type": [ - "boolean", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "boolean", "description": "used for enabling/disabling autoscaling", "title": "Enabled" }, @@ -466,11 +350,7 @@ "title": "Command", "properties": { "enabled": { - "type": [ - "boolean", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "boolean", "description": "used for enabling/disabling commands" }, "value": { @@ -503,11 +383,7 @@ "title": "Db Migration Config", "properties": { "enabled": { - "type": [ - "boolean", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "boolean", "description": "used for enabling/disabling the config", "title": "Enabled" } @@ -662,11 +538,7 @@ "default": "" }, "enabled": { - "type": [ - "boolean", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "boolean", "description": "used to enable or disable ingress", "title": "Enabled" }, @@ -726,11 +598,7 @@ "default": "nginx-internal" }, "enabled": { - "type": [ - "boolean", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "boolean", "description": "used to enable or disable ingress", "title": "Enabled" }, @@ -851,11 +719,7 @@ } }, "pauseForSecondsBeforeSwitchActive": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "tell how much to wait for given period of time before switch active the container", "title": "Pause For Seconds Before SwitchActive" }, @@ -913,11 +777,7 @@ "title": "Raw YAML" }, "replicaCount": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "count of Replicas of pod", "title": "REplica Count" }, @@ -1079,11 +939,7 @@ "title": "Volumes" }, "waitForSecondsBeforeScalingDown": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "Wait for given period of time before scaling down the container", "title": "Wait For Seconds Before Scaling Down" } diff --git a/scripts/devtron-reference-helm-charts/reference-chart_4-16-0/values.yaml b/scripts/devtron-reference-helm-charts/reference-chart_4-16-0/values.yaml index 2a8b11d47c..28c9563974 100644 --- a/scripts/devtron-reference-helm-charts/reference-chart_4-16-0/values.yaml +++ b/scripts/devtron-reference-helm-charts/reference-chart_4-16-0/values.yaml @@ -125,9 +125,9 @@ server: image_tag: 1-95af053 image: "" -EnvVariablesFromFieldPath: [] -# - name: POD_NAME -# fieldPath: metadata.name +EnvVariablesFromFieldPath: +- name: POD_NAME + fieldPath: metadata.name EnvVariables: - name: FLASK_ENV diff --git a/scripts/devtron-reference-helm-charts/reference-chart_4-17-0/schema.json b/scripts/devtron-reference-helm-charts/reference-chart_4-17-0/schema.json index 214a2baf6b..ef3cb091de 100644 --- a/scripts/devtron-reference-helm-charts/reference-chart_4-17-0/schema.json +++ b/scripts/devtron-reference-helm-charts/reference-chart_4-17-0/schema.json @@ -16,11 +16,7 @@ "type": "object", "properties": { "envoyPort": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "envoy port for the container", "title": "Envoy Port" }, @@ -35,47 +31,27 @@ "title": "Name" }, "port": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "Port", "title": "port for the container" }, "servicePort": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "port of the corresponding kubernetes service", "title": "Service Port" }, "nodePort": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "nodeport of the corresponding kubernetes service", "title": "Node Port" }, "supportStreaming": { - "type": [ - "boolean", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "boolean", "description": "field to enable/disable timeout for high performance protocols like grpc", "title": "Support Streaming" }, "useHTTP2": { - "type": [ - "boolean", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "boolean", "description": " field for setting if envoy container can accept(or not) HTTP2 requests", "title": "Use HTTP2" } @@ -111,11 +87,7 @@ ] }, "GracePeriod": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "time for which Kubernetes waits before terminating the pods", "title": "Grace Period" }, @@ -136,11 +108,7 @@ "title": "Command" }, "failureThreshold": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "defines the maximum number of failures that are acceptable before a given container is not considered as live", "title": "Failure Threshold" }, @@ -151,29 +119,17 @@ "title": "HTTP headers" }, "initialDelaySeconds": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "defines the time to wait before a given container is checked for liveness", "title": "Initial Delay Seconds" }, "periodSeconds": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "defines the time to check a given container for liveness", "title": "Period Seconds" }, "port": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "port to access on the container", "title": "Port" }, @@ -183,58 +139,34 @@ "title": "Scheme" }, "successThreshold": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "defines the number of successes required before a given container is said to fulfil the liveness probe", "title": "Success Threshold" }, "tcp": { - "type": [ - "boolean", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "boolean", "description": "If enabled, the kubelet will attempt to open a socket to container. If connection is established, the container is considered healthy", "title": "TCP" }, "timeoutSeconds": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "defines the time for checking timeout", "title": "Timeout Seconds" } } }, "MaxSurge": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "maximum number of pods that can be created over the desired number of pods", "title": "Maximum Surge" }, "MaxUnavailable": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "maximum number of pods that can be unavailable during the update process", "title": "Maximum Unavailable" }, "MinReadySeconds": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "minimum number of seconds for which a newly created Pod should be ready without any of its containers crashing, for it to be considered available", "title": "Minimum Ready Seconds" }, @@ -255,11 +187,7 @@ "title": "Command" }, "failureThreshold": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "defines the maximum number of failures that are acceptable before a given container is not considered as ready", "title": "Failure Threshold" }, @@ -270,29 +198,17 @@ "title": "HTTP headers" }, "initialDelaySeconds": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "defines the time to wait before a given container is checked for readiness", "title": "Initial Delay Seconds" }, "periodSeconds": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "defines the time to check a given container for readiness", "title": "Period Seconds" }, "port": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "port to access on the container", "title": "Port" }, @@ -302,29 +218,17 @@ "title": "Scheme" }, "successThreshold": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "defines the number of successes required before a given container is said to fulfil the readiness probe", "title": "Success Threshold" }, "tcp": { - "type": [ - "boolean", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "boolean", "description": "If enabled, the kubelet will attempt to open a socket to container. If connection is established, the container is considered healthy", "title": "TCP" }, "timeoutSeconds": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "defines the time for checking timeout", "title": "Timeout Seconds" } @@ -380,20 +284,12 @@ "title": "CORS" }, "enabled": { - "type": [ - "boolean", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "boolean", "description": "used to specify whether to create an ambassador mapping or not", "title": "Enabled" }, "weight": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "used to specify weight for canary ambassador mappings" }, "hostname": { @@ -439,11 +335,7 @@ "title": "Arguments", "properties": { "enabled": { - "type": [ - "boolean", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "boolean", "description": "used for enabling/disabling aruguments", "title": "Enabled" }, @@ -471,38 +363,22 @@ "title": "Autoscaling", "properties": { "MaxReplicas": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "Maximum number of replicas allowed for scaling", "title": "Maximum Replicas" }, "MinReplicas": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "Minimum number of replicas allowed for scaling", "title": "Minimum Replicas" }, "TargetCPUUtilizationPercentage": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "The target CPU utilization that is expected for a container", "title": "TargetCPUUtilizationPercentage" }, "TargetMemoryUtilizationPercentage": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "The target memory utilization that is expected for a container", "title": "TargetMemoryUtilizationPercentage" }, @@ -512,11 +388,7 @@ "title": "Behavior" }, "enabled": { - "type": [ - "boolean", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "boolean", "description": "used for enabling/disabling autoscaling", "title": "Enabled" }, @@ -544,11 +416,7 @@ "title": "Command", "properties": { "enabled": { - "type": [ - "boolean", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "boolean", "description": "used for enabling/disabling commands" }, "value": { @@ -581,11 +449,7 @@ "title": "Db Migration Config", "properties": { "enabled": { - "type": [ - "boolean", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "boolean", "description": "used for enabling/disabling the config", "title": "Enabled" } @@ -740,11 +604,7 @@ "default": "" }, "enabled": { - "type": [ - "boolean", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "boolean", "description": "used to enable or disable ingress", "title": "Enabled" }, @@ -804,11 +664,7 @@ "default": "nginx-internal" }, "enabled": { - "type": [ - "boolean", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "boolean", "description": "used to enable or disable ingress", "title": "Enabled" }, @@ -929,11 +785,7 @@ } }, "pauseForSecondsBeforeSwitchActive": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "tell how much to wait for given period of time before switch active the container", "title": "Pause For Seconds Before SwitchActive" }, @@ -991,11 +843,7 @@ "title": "Raw YAML" }, "replicaCount": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "count of Replicas of pod", "title": "REplica Count" }, @@ -1157,11 +1005,7 @@ "title": "Volumes" }, "waitForSecondsBeforeScalingDown": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "Wait for given period of time before scaling down the container", "title": "Wait For Seconds Before Scaling Down" } diff --git a/scripts/devtron-reference-helm-charts/reference-chart_4-17-0/values.yaml b/scripts/devtron-reference-helm-charts/reference-chart_4-17-0/values.yaml index d8f2cd026f..5f31067eb3 100644 --- a/scripts/devtron-reference-helm-charts/reference-chart_4-17-0/values.yaml +++ b/scripts/devtron-reference-helm-charts/reference-chart_4-17-0/values.yaml @@ -125,9 +125,9 @@ server: image_tag: 1-95af053 image: "" -EnvVariablesFromFieldPath: [] -# - name: POD_NAME -# fieldPath: metadata.name +EnvVariablesFromFieldPath: +- name: POD_NAME + fieldPath: metadata.name EnvVariables: - name: FLASK_ENV diff --git a/scripts/devtron-reference-helm-charts/reference-chart_4-18-0/schema.json b/scripts/devtron-reference-helm-charts/reference-chart_4-18-0/schema.json index da5cce59ea..2b9778bab0 100644 --- a/scripts/devtron-reference-helm-charts/reference-chart_4-18-0/schema.json +++ b/scripts/devtron-reference-helm-charts/reference-chart_4-18-0/schema.json @@ -15,11 +15,7 @@ "type": "object", "properties": { "envoyPort": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "envoy port for the container", "title": "Envoy Port" }, @@ -34,47 +30,27 @@ "title": "Name" }, "port": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "Port", "title": "port for the container" }, "servicePort": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "port of the corresponding kubernetes service", "title": "Service Port" }, "nodePort": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "nodeport of the corresponding kubernetes service", "title": "Node Port" }, "supportStreaming": { - "type": [ - "boolean", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "boolean", "description": "field to enable/disable timeout for high performance protocols like grpc", "title": "Support Streaming" }, "useHTTP2": { - "type": [ - "boolean", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "boolean", "description": " field for setting if envoy container can accept(or not) HTTP2 requests", "title": "Use HTTP2" } @@ -164,11 +140,7 @@ ] }, "GracePeriod": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "time for which Kubernetes waits before terminating the pods", "title": "Grace Period" }, @@ -189,11 +161,7 @@ "title": "Command" }, "failureThreshold": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "defines the maximum number of failures that are acceptable before a given container is not considered as live", "title": "Failure Threshold" }, @@ -204,29 +172,17 @@ "title": "HTTP headers" }, "initialDelaySeconds": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "defines the time to wait before a given container is checked for liveness", "title": "Initial Delay Seconds" }, "periodSeconds": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "defines the time to check a given container for liveness", "title": "Period Seconds" }, "port": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "port to access on the container", "title": "Port" }, @@ -236,58 +192,34 @@ "title": "Scheme" }, "successThreshold": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "defines the number of successes required before a given container is said to fulfil the liveness probe", "title": "Success Threshold" }, "tcp": { - "type": [ - "boolean", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "boolean", "description": "If enabled, the kubelet will attempt to open a socket to container. If connection is established, the container is considered healthy", "title": "TCP" }, "timeoutSeconds": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "defines the time for checking timeout", "title": "Timeout Seconds" } } }, "MaxSurge": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "maximum number of pods that can be created over the desired number of pods", "title": "Maximum Surge" }, "MaxUnavailable": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "maximum number of pods that can be unavailable during the update process", "title": "Maximum Unavailable" }, "MinReadySeconds": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "minimum number of seconds for which a newly created Pod should be ready without any of its containers crashing, for it to be considered available", "title": "Minimum Ready Seconds" }, @@ -308,11 +240,7 @@ "title": "Command" }, "failureThreshold": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "defines the maximum number of failures that are acceptable before a given container is not considered as ready", "title": "Failure Threshold" }, @@ -323,29 +251,17 @@ "title": "HTTP headers" }, "initialDelaySeconds": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "defines the time to wait before a given container is checked for readiness", "title": "Initial Delay Seconds" }, "periodSeconds": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "defines the time to check a given container for readiness", "title": "Period Seconds" }, "port": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "port to access on the container", "title": "Port" }, @@ -355,29 +271,17 @@ "title": "Scheme" }, "successThreshold": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "defines the number of successes required before a given container is said to fulfil the readiness probe", "title": "Success Threshold" }, "tcp": { - "type": [ - "boolean", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "boolean", "description": "If enabled, the kubelet will attempt to open a socket to container. If connection is established, the container is considered healthy", "title": "TCP" }, "timeoutSeconds": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "defines the time for checking timeout", "title": "Timeout Seconds" } @@ -433,20 +337,12 @@ "title": "CORS" }, "enabled": { - "type": [ - "boolean", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "boolean", "description": "used to specify whether to create an ambassador mapping or not", "title": "Enabled" }, "weight": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "used to specify weight for canary ambassador mappings" }, "hostname": { @@ -492,11 +388,7 @@ "title": "Arguments", "properties": { "enabled": { - "type": [ - "boolean", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "boolean", "description": "used for enabling/disabling aruguments", "title": "Enabled" }, @@ -524,38 +416,22 @@ "title": "Autoscaling", "properties": { "MaxReplicas": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "Maximum number of replicas allowed for scaling", "title": "Maximum Replicas" }, "MinReplicas": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "Minimum number of replicas allowed for scaling", "title": "Minimum Replicas" }, "TargetCPUUtilizationPercentage": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "The target CPU utilization that is expected for a container", "title": "TargetCPUUtilizationPercentage" }, "TargetMemoryUtilizationPercentage": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "The target memory utilization that is expected for a container", "title": "TargetMemoryUtilizationPercentage" }, @@ -565,11 +441,7 @@ "title": "Behavior" }, "enabled": { - "type": [ - "boolean", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "boolean", "description": "used for enabling/disabling autoscaling", "title": "Enabled" }, @@ -597,11 +469,7 @@ "title": "Command", "properties": { "enabled": { - "type": [ - "boolean", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "boolean", "description": "used for enabling/disabling commands" }, "value": { @@ -634,11 +502,7 @@ "title": "Db Migration Config", "properties": { "enabled": { - "type": [ - "boolean", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "boolean", "description": "used for enabling/disabling the config", "title": "Enabled" } @@ -800,11 +664,7 @@ "default": "" }, "enabled": { - "type": [ - "boolean", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "boolean", "description": "used to enable or disable ingress", "title": "Enabled" }, @@ -875,11 +735,7 @@ "default": "" }, "enabled": { - "type": [ - "boolean", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "boolean", "description": "used to enable or disable ingress", "title": "Enabled" }, @@ -939,11 +795,7 @@ "default": "nginx-internal" }, "enabled": { - "type": [ - "boolean", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "boolean", "description": "used to enable or disable ingress", "title": "Enabled" }, @@ -1114,11 +966,7 @@ } }, "pauseForSecondsBeforeSwitchActive": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "tell how much to wait for given period of time before switch active the container", "title": "Pause For Seconds Before SwitchActive" }, @@ -1176,11 +1024,7 @@ "title": "Raw YAML" }, "replicaCount": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "count of Replicas of pod", "title": "REplica Count" }, @@ -1286,11 +1130,7 @@ ] }, "enabled": { - "type": [ - "boolean", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "boolean", "description": "used to enable or disable service", "title": "Enabled" } @@ -1351,11 +1191,7 @@ "title": "Volumes" }, "waitForSecondsBeforeScalingDown": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "Wait for given period of time before scaling down the container", "title": "Wait For Seconds Before Scaling Down" } diff --git a/scripts/devtron-reference-helm-charts/reference-chart_5-0-0/schema.json b/scripts/devtron-reference-helm-charts/reference-chart_5-0-0/schema.json index 2a43e937cd..2b9778bab0 100644 --- a/scripts/devtron-reference-helm-charts/reference-chart_5-0-0/schema.json +++ b/scripts/devtron-reference-helm-charts/reference-chart_5-0-0/schema.json @@ -15,11 +15,7 @@ "type": "object", "properties": { "envoyPort": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "envoy port for the container", "title": "Envoy Port" }, @@ -34,47 +30,27 @@ "title": "Name" }, "port": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "Port", "title": "port for the container" }, "servicePort": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "port of the corresponding kubernetes service", "title": "Service Port" }, "nodePort": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "nodeport of the corresponding kubernetes service", "title": "Node Port" }, "supportStreaming": { - "type": [ - "boolean", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "boolean", "description": "field to enable/disable timeout for high performance protocols like grpc", "title": "Support Streaming" }, "useHTTP2": { - "type": [ - "boolean", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "boolean", "description": " field for setting if envoy container can accept(or not) HTTP2 requests", "title": "Use HTTP2" } @@ -164,11 +140,7 @@ ] }, "GracePeriod": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "time for which Kubernetes waits before terminating the pods", "title": "Grace Period" }, @@ -189,11 +161,7 @@ "title": "Command" }, "failureThreshold": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "defines the maximum number of failures that are acceptable before a given container is not considered as live", "title": "Failure Threshold" }, @@ -204,29 +172,17 @@ "title": "HTTP headers" }, "initialDelaySeconds": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "defines the time to wait before a given container is checked for liveness", "title": "Initial Delay Seconds" }, "periodSeconds": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "defines the time to check a given container for liveness", "title": "Period Seconds" }, "port": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "port to access on the container", "title": "Port" }, @@ -236,58 +192,34 @@ "title": "Scheme" }, "successThreshold": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "defines the number of successes required before a given container is said to fulfil the liveness probe", "title": "Success Threshold" }, "tcp": { - "type": [ - "boolean", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "boolean", "description": "If enabled, the kubelet will attempt to open a socket to container. If connection is established, the container is considered healthy", "title": "TCP" }, "timeoutSeconds": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "defines the time for checking timeout", "title": "Timeout Seconds" } } }, "MaxSurge": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "maximum number of pods that can be created over the desired number of pods", "title": "Maximum Surge" }, "MaxUnavailable": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "maximum number of pods that can be unavailable during the update process", "title": "Maximum Unavailable" }, "MinReadySeconds": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "minimum number of seconds for which a newly created Pod should be ready without any of its containers crashing, for it to be considered available", "title": "Minimum Ready Seconds" }, @@ -308,11 +240,7 @@ "title": "Command" }, "failureThreshold": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "defines the maximum number of failures that are acceptable before a given container is not considered as ready", "title": "Failure Threshold" }, @@ -323,29 +251,17 @@ "title": "HTTP headers" }, "initialDelaySeconds": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "defines the time to wait before a given container is checked for readiness", "title": "Initial Delay Seconds" }, "periodSeconds": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "defines the time to check a given container for readiness", "title": "Period Seconds" }, "port": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "port to access on the container", "title": "Port" }, @@ -355,29 +271,17 @@ "title": "Scheme" }, "successThreshold": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "defines the number of successes required before a given container is said to fulfil the readiness probe", "title": "Success Threshold" }, "tcp": { - "type": [ - "boolean", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "boolean", "description": "If enabled, the kubelet will attempt to open a socket to container. If connection is established, the container is considered healthy", "title": "TCP" }, "timeoutSeconds": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "defines the time for checking timeout", "title": "Timeout Seconds" } @@ -433,20 +337,12 @@ "title": "CORS" }, "enabled": { - "type": [ - "boolean", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "boolean", "description": "used to specify whether to create an ambassador mapping or not", "title": "Enabled" }, "weight": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "used to specify weight for canary ambassador mappings" }, "hostname": { @@ -492,11 +388,7 @@ "title": "Arguments", "properties": { "enabled": { - "type": [ - "boolean", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "boolean", "description": "used for enabling/disabling aruguments", "title": "Enabled" }, @@ -524,38 +416,22 @@ "title": "Autoscaling", "properties": { "MaxReplicas": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "Maximum number of replicas allowed for scaling", "title": "Maximum Replicas" }, "MinReplicas": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "Minimum number of replicas allowed for scaling", "title": "Minimum Replicas" }, "TargetCPUUtilizationPercentage": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "The target CPU utilization that is expected for a container", "title": "TargetCPUUtilizationPercentage" }, "TargetMemoryUtilizationPercentage": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "The target memory utilization that is expected for a container", "title": "TargetMemoryUtilizationPercentage" }, @@ -565,11 +441,7 @@ "title": "Behavior" }, "enabled": { - "type": [ - "boolean", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "boolean", "description": "used for enabling/disabling autoscaling", "title": "Enabled" }, @@ -597,11 +469,7 @@ "title": "Command", "properties": { "enabled": { - "type": [ - "boolean", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "boolean", "description": "used for enabling/disabling commands" }, "value": { @@ -634,11 +502,7 @@ "title": "Db Migration Config", "properties": { "enabled": { - "type": [ - "boolean", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "boolean", "description": "used for enabling/disabling the config", "title": "Enabled" } @@ -800,11 +664,7 @@ "default": "" }, "enabled": { - "type": [ - "boolean", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "boolean", "description": "used to enable or disable ingress", "title": "Enabled" }, @@ -875,11 +735,7 @@ "default": "" }, "enabled": { - "type": [ - "boolean", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "boolean", "description": "used to enable or disable ingress", "title": "Enabled" }, @@ -939,11 +795,7 @@ "default": "nginx-internal" }, "enabled": { - "type": [ - "boolean", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "boolean", "description": "used to enable or disable ingress", "title": "Enabled" }, @@ -1114,11 +966,7 @@ } }, "pauseForSecondsBeforeSwitchActive": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "tell how much to wait for given period of time before switch active the container", "title": "Pause For Seconds Before SwitchActive" }, @@ -1176,11 +1024,7 @@ "title": "Raw YAML" }, "replicaCount": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "count of Replicas of pod", "title": "REplica Count" }, @@ -1286,11 +1130,7 @@ ] }, "enabled": { - "type": [ - "boolean", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "boolean", "description": "used to enable or disable service", "title": "Enabled" } @@ -1351,11 +1191,7 @@ "title": "Volumes" }, "waitForSecondsBeforeScalingDown": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "Wait for given period of time before scaling down the container", "title": "Wait For Seconds Before Scaling Down" } diff --git a/scripts/devtron-reference-helm-charts/statefulset-chart_4-18-0/schema.json b/scripts/devtron-reference-helm-charts/statefulset-chart_4-18-0/schema.json index da40c1753a..25935a51ef 100644 --- a/scripts/devtron-reference-helm-charts/statefulset-chart_4-18-0/schema.json +++ b/scripts/devtron-reference-helm-charts/statefulset-chart_4-18-0/schema.json @@ -16,11 +16,7 @@ "type": "object", "properties": { "envoyPort": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "envoy port for the container", "title": "Envoy Port" }, @@ -35,47 +31,27 @@ "title": "Name" }, "port": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "Port", "title": "port for the container" }, "servicePort": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "port of the corresponding kubernetes service", "title": "Service Port" }, "nodePort": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "nodeport of the corresponding kubernetes service", "title": "Node Port" }, "supportStreaming": { - "type": [ - "boolean", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "boolean", "description": "field to enable/disable timeout for high performance protocols like grpc", "title": "Support Streaming" }, "useHTTP2": { - "type": [ - "boolean", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "boolean", "description": " field for setting if envoy container can accept(or not) HTTP2 requests", "title": "Use HTTP2" } @@ -108,11 +84,7 @@ "description": "used to provide mounts to the volume" }, "revisionHistoryLimit":{ - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "title": "RevisionHistoryLimit", "description": "revisionHistoryLimit is the maximum number of revisions that will bemaintained in the StatefulSet's revision history." }, @@ -409,11 +381,7 @@ ] }, "GracePeriod": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "time for which Kubernetes waits before terminating the pods", "title": "Grace Period" }, @@ -434,11 +402,7 @@ "title": "Command" }, "failureThreshold": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "defines the maximum number of failures that are acceptable before a given container is not considered as live", "title": "Failure Threshold" }, @@ -449,29 +413,17 @@ "title": "HTTP headers" }, "initialDelaySeconds": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "defines the time to wait before a given container is checked for liveness", "title": "Initial Delay Seconds" }, "periodSeconds": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "defines the time to check a given container for liveness", "title": "Period Seconds" }, "port": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "port to access on the container", "title": "Port" }, @@ -481,58 +433,34 @@ "title": "Scheme" }, "successThreshold": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "defines the number of successes required before a given container is said to fulfil the liveness probe", "title": "Success Threshold" }, "tcp": { - "type": [ - "boolean", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "boolean", "description": "If enabled, the kubelet will attempt to open a socket to container. If connection is established, the container is considered healthy", "title": "TCP" }, "timeoutSeconds": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "defines the time for checking timeout", "title": "Timeout Seconds" } } }, "MaxSurge": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "maximum number of pods that can be created over the desired number of pods", "title": "Maximum Surge" }, "MaxUnavailable": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "maximum number of pods that can be unavailable during the update process", "title": "Maximum Unavailable" }, "MinReadySeconds": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "minimum number of seconds for which a newly created Pod should be ready without any of its containers crashing, for it to be considered available", "title": "Minimum Ready Seconds" }, @@ -553,11 +481,7 @@ "title": "Command" }, "failureThreshold": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "defines the maximum number of failures that are acceptable before a given container is not considered as ready", "title": "Failure Threshold" }, @@ -568,29 +492,17 @@ "title": "HTTP headers" }, "initialDelaySeconds": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "defines the time to wait before a given container is checked for readiness", "title": "Initial Delay Seconds" }, "periodSeconds": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "defines the time to check a given container for readiness", "title": "Period Seconds" }, "port": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "port to access on the container", "title": "Port" }, @@ -600,29 +512,17 @@ "title": "Scheme" }, "successThreshold": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "defines the number of successes required before a given container is said to fulfil the readiness probe", "title": "Success Threshold" }, "tcp": { - "type": [ - "boolean", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "boolean", "description": "If enabled, the kubelet will attempt to open a socket to container. If connection is established, the container is considered healthy", "title": "TCP" }, "timeoutSeconds": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "defines the time for checking timeout", "title": "Timeout Seconds" } @@ -678,20 +578,12 @@ "title": "CORS" }, "enabled": { - "type": [ - "boolean", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "boolean", "description": "used to specify whether to create an ambassador mapping or not", "title": "Enabled" }, "weight": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "used to specify weight for canary ambassador mappings" }, "hostname": { @@ -737,11 +629,7 @@ "title": "Arguments", "properties": { "enabled": { - "type": [ - "boolean", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "boolean", "description": "used for enabling/disabling aruguments", "title": "Enabled" }, @@ -769,38 +657,22 @@ "title": "Autoscaling", "properties": { "MaxReplicas": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "Maximum number of replicas allowed for scaling", "title": "Maximum Replicas" }, "MinReplicas": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "Minimum number of replicas allowed for scaling", "title": "Minimum Replicas" }, "TargetCPUUtilizationPercentage": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "The target CPU utilization that is expected for a container", "title": "TargetCPUUtilizationPercentage" }, "TargetMemoryUtilizationPercentage": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "The target memory utilization that is expected for a container", "title": "TargetMemoryUtilizationPercentage" }, @@ -810,11 +682,7 @@ "title": "Behavior" }, "enabled": { - "type": [ - "boolean", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "boolean", "description": "used for enabling/disabling autoscaling", "title": "Enabled" }, @@ -842,11 +710,7 @@ "title": "Command", "properties": { "enabled": { - "type": [ - "boolean", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "boolean", "description": "used for enabling/disabling commands" }, "value": { @@ -879,11 +743,7 @@ "title": "Db Migration Config", "properties": { "enabled": { - "type": [ - "boolean", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "boolean", "description": "used for enabling/disabling the config", "title": "Enabled" } @@ -1032,11 +892,7 @@ "default": "" }, "enabled": { - "type": [ - "boolean", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "boolean", "description": "used to enable or disable ingress", "title": "Enabled" }, @@ -1107,11 +963,7 @@ "default": "" }, "enabled": { - "type": [ - "boolean", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "boolean", "description": "used to enable or disable ingress", "title": "Enabled" }, @@ -1171,11 +1023,7 @@ "default": "nginx-internal" }, "enabled": { - "type": [ - "boolean", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "boolean", "description": "used to enable or disable ingress", "title": "Enabled" }, @@ -1296,11 +1144,7 @@ } }, "pauseForSecondsBeforeSwitchActive": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "tell how much to wait for given period of time before switch active the container", "title": "Pause For Seconds Before SwitchActive" }, @@ -1358,11 +1202,7 @@ "title": "Raw YAML" }, "replicaCount": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "count of Replicas of pod", "title": "REplica Count" }, @@ -1524,11 +1364,7 @@ "title": "Volumes" }, "waitForSecondsBeforeScalingDown": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "Wait for given period of time before scaling down the container", "title": "Wait For Seconds Before Scaling Down" } diff --git a/scripts/devtron-reference-helm-charts/statefulset-chart_4-18-0/values.yaml b/scripts/devtron-reference-helm-charts/statefulset-chart_4-18-0/values.yaml index b58cc8d715..a0eaca9949 100644 --- a/scripts/devtron-reference-helm-charts/statefulset-chart_4-18-0/values.yaml +++ b/scripts/devtron-reference-helm-charts/statefulset-chart_4-18-0/values.yaml @@ -137,9 +137,9 @@ server: image_tag: 1-95af053 image: "" -EnvVariablesFromFieldPath: [] -# - name: POD_NAME -# fieldPath: metadata.name +EnvVariablesFromFieldPath: +- name: POD_NAME + fieldPath: metadata.name EnvVariables: - name: FLASK_ENV diff --git a/scripts/devtron-reference-helm-charts/statefulset-chart_5-0-0/schema.json b/scripts/devtron-reference-helm-charts/statefulset-chart_5-0-0/schema.json index 672df0a42e..25935a51ef 100644 --- a/scripts/devtron-reference-helm-charts/statefulset-chart_5-0-0/schema.json +++ b/scripts/devtron-reference-helm-charts/statefulset-chart_5-0-0/schema.json @@ -16,11 +16,7 @@ "type": "object", "properties": { "envoyPort": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "envoy port for the container", "title": "Envoy Port" }, @@ -35,47 +31,27 @@ "title": "Name" }, "port": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "Port", "title": "port for the container" }, "servicePort": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "port of the corresponding kubernetes service", "title": "Service Port" }, "nodePort": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "nodeport of the corresponding kubernetes service", "title": "Node Port" }, "supportStreaming": { - "type": [ - "boolean", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "boolean", "description": "field to enable/disable timeout for high performance protocols like grpc", "title": "Support Streaming" }, "useHTTP2": { - "type": [ - "boolean", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "boolean", "description": " field for setting if envoy container can accept(or not) HTTP2 requests", "title": "Use HTTP2" } @@ -108,11 +84,7 @@ "description": "used to provide mounts to the volume" }, "revisionHistoryLimit":{ - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "title": "RevisionHistoryLimit", "description": "revisionHistoryLimit is the maximum number of revisions that will bemaintained in the StatefulSet's revision history." }, @@ -409,11 +381,7 @@ ] }, "GracePeriod": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "time for which Kubernetes waits before terminating the pods", "title": "Grace Period" }, @@ -434,11 +402,7 @@ "title": "Command" }, "failureThreshold": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "defines the maximum number of failures that are acceptable before a given container is not considered as live", "title": "Failure Threshold" }, @@ -449,29 +413,17 @@ "title": "HTTP headers" }, "initialDelaySeconds": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "defines the time to wait before a given container is checked for liveness", "title": "Initial Delay Seconds" }, "periodSeconds": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "defines the time to check a given container for liveness", "title": "Period Seconds" }, "port": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "port to access on the container", "title": "Port" }, @@ -481,58 +433,34 @@ "title": "Scheme" }, "successThreshold": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "defines the number of successes required before a given container is said to fulfil the liveness probe", "title": "Success Threshold" }, "tcp": { - "type": [ - "boolean", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "boolean", "description": "If enabled, the kubelet will attempt to open a socket to container. If connection is established, the container is considered healthy", "title": "TCP" }, "timeoutSeconds": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "defines the time for checking timeout", "title": "Timeout Seconds" } } }, "MaxSurge": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "maximum number of pods that can be created over the desired number of pods", "title": "Maximum Surge" }, "MaxUnavailable": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "maximum number of pods that can be unavailable during the update process", "title": "Maximum Unavailable" }, "MinReadySeconds": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "minimum number of seconds for which a newly created Pod should be ready without any of its containers crashing, for it to be considered available", "title": "Minimum Ready Seconds" }, @@ -553,11 +481,7 @@ "title": "Command" }, "failureThreshold": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "defines the maximum number of failures that are acceptable before a given container is not considered as ready", "title": "Failure Threshold" }, @@ -568,26 +492,17 @@ "title": "HTTP headers" }, "initialDelaySeconds": { - "type": [ - "integer", - "string" ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "defines the time to wait before a given container is checked for readiness", "title": "Initial Delay Seconds" }, "periodSeconds": { - "type": [ - "integer", - "string" ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "defines the time to check a given container for readiness", "title": "Period Seconds" }, "port": { - "type": [ - "integer", - "string" ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "port to access on the container", "title": "Port" }, @@ -597,27 +512,17 @@ "title": "Scheme" }, "successThreshold": { - "type": [ - "integer", - "string" ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "defines the number of successes required before a given container is said to fulfil the readiness probe", "title": "Success Threshold" }, "tcp": { - "type": [ - "boolean", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "boolean", "description": "If enabled, the kubelet will attempt to open a socket to container. If connection is established, the container is considered healthy", "title": "TCP" }, "timeoutSeconds": { - "type": [ - "integer", - "string" ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "defines the time for checking timeout", "title": "Timeout Seconds" } @@ -673,20 +578,12 @@ "title": "CORS" }, "enabled": { - "type": [ - "boolean", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "boolean", "description": "used to specify whether to create an ambassador mapping or not", "title": "Enabled" }, "weight": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "used to specify weight for canary ambassador mappings" }, "hostname": { @@ -732,11 +629,7 @@ "title": "Arguments", "properties": { "enabled": { - "type": [ - "boolean", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "boolean", "description": "used for enabling/disabling aruguments", "title": "Enabled" }, @@ -764,37 +657,22 @@ "title": "Autoscaling", "properties": { "MaxReplicas": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "Maximum number of replicas allowed for scaling", "title": "Maximum Replicas" }, "MinReplicas": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "Minimum number of replicas allowed for scaling", "title": "Minimum Replicas" }, "TargetCPUUtilizationPercentage": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "The target CPU utilization that is expected for a container", "title": "TargetCPUUtilizationPercentage" }, "TargetMemoryUtilizationPercentage": { - "type": [ - "integer", - "string" ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "The target memory utilization that is expected for a container", "title": "TargetMemoryUtilizationPercentage" }, @@ -804,11 +682,7 @@ "title": "Behavior" }, "enabled": { - "type": [ - "boolean", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "boolean", "description": "used for enabling/disabling autoscaling", "title": "Enabled" }, @@ -836,11 +710,7 @@ "title": "Command", "properties": { "enabled": { - "type": [ - "boolean", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "boolean", "description": "used for enabling/disabling commands" }, "value": { @@ -873,11 +743,7 @@ "title": "Db Migration Config", "properties": { "enabled": { - "type": [ - "boolean", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "boolean", "description": "used for enabling/disabling the config", "title": "Enabled" } @@ -1026,11 +892,7 @@ "default": "" }, "enabled": { - "type": [ - "boolean", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "boolean", "description": "used to enable or disable ingress", "title": "Enabled" }, @@ -1101,11 +963,7 @@ "default": "" }, "enabled": { - "type": [ - "boolean", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "boolean", "description": "used to enable or disable ingress", "title": "Enabled" }, @@ -1165,11 +1023,7 @@ "default": "nginx-internal" }, "enabled": { - "type": [ - "boolean", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "boolean", "description": "used to enable or disable ingress", "title": "Enabled" }, @@ -1290,11 +1144,7 @@ } }, "pauseForSecondsBeforeSwitchActive": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "tell how much to wait for given period of time before switch active the container", "title": "Pause For Seconds Before SwitchActive" }, @@ -1352,11 +1202,7 @@ "title": "Raw YAML" }, "replicaCount": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "count of Replicas of pod", "title": "REplica Count" }, @@ -1518,10 +1364,7 @@ "title": "Volumes" }, "waitForSecondsBeforeScalingDown": { - "type": [ - "integer", - "string" ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "Wait for given period of time before scaling down the container", "title": "Wait For Seconds Before Scaling Down" } diff --git a/scripts/devtron-reference-helm-charts/statefulset-chart_5-0-0/values.yaml b/scripts/devtron-reference-helm-charts/statefulset-chart_5-0-0/values.yaml index 4a252b23e1..4fe896feee 100644 --- a/scripts/devtron-reference-helm-charts/statefulset-chart_5-0-0/values.yaml +++ b/scripts/devtron-reference-helm-charts/statefulset-chart_5-0-0/values.yaml @@ -140,9 +140,9 @@ server: image_tag: 1-95af053 image: "" -EnvVariablesFromFieldPath: [] -# - name: POD_NAME -# fieldPath: metadata.name +EnvVariablesFromFieldPath: +- name: POD_NAME + fieldPath: metadata.name EnvVariables: - name: FLASK_ENV diff --git a/scripts/devtron-reference-helm-charts/workflow-chart_1-0-0/schema.json b/scripts/devtron-reference-helm-charts/workflow-chart_1-0-0/schema.json index 6d970c183b..2e31a72972 100644 --- a/scripts/devtron-reference-helm-charts/workflow-chart_1-0-0/schema.json +++ b/scripts/devtron-reference-helm-charts/workflow-chart_1-0-0/schema.json @@ -11,11 +11,7 @@ "type": "object", "properties": { "envoyPort": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "envoy port for the container", "title": "Envoy Port" }, @@ -30,38 +26,22 @@ "title": "Name" }, "port": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "Port", "title": "port for the container" }, "servicePort": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "port of the corresponding kubernetes service", "title": "Service Port" }, "supportStreaming": { - "type": [ - "boolean", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "boolean", "description": "field to enable/disable timeout for high performance protocols like grpc", "title": "Support Streaming" }, "useHTTP2": { - "type": [ - "boolean", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "boolean", "description": " field for setting if envoy container can accept(or not) HTTP2 requests", "title": "Use HTTP2" } @@ -75,11 +55,7 @@ "title": "Environment Variables" }, "GracePeriod": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "time for which Kubernetes waits before terminating the pods", "title": "Grace Period" }, @@ -100,11 +76,7 @@ "title": "Command" }, "failureThreshold": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "defines the maximum number of failures that are acceptable before a given container is not considered as live", "title": "Failure Threshold" }, @@ -115,29 +87,17 @@ "title": "HTTP headers" }, "initialDelaySeconds": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "defines the time to wait before a given container is checked for liveness", "title": "Initial Delay Seconds" }, "periodSeconds": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "defines the time to check a given container for liveness", "title": "Period Seconds" }, "port": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "port to access on the container", "title": "Port" }, @@ -147,58 +107,34 @@ "title": "Scheme" }, "successThreshold": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "defines the number of successes required before a given container is said to fulfil the liveness probe", "title": "Success Threshold" }, "tcp": { - "type": [ - "boolean", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "boolean", "description": "If enabled, the kubelet will attempt to open a socket to container. If connection is established, the container is considered healthy", "title": "TCP" }, "timeoutSeconds": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "defines the time for checking timeout", "title": "Timeout Seconds" } } }, "MaxSurge": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "maximum number of pods that can be created over the desired number of pods", "title": "Maximum Surge" }, "MaxUnavailable": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "maximum number of pods that can be unavailable during the update process", "title": "Maximum Unavailable" }, "MinReadySeconds": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "minimum number of seconds for which a newly created Pod should be ready without any of its containers crashing, for it to be considered available", "title": "Minimum Ready Seconds" }, @@ -219,11 +155,7 @@ "title": "Command" }, "failureThreshold": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "defines the maximum number of failures that are acceptable before a given container is not considered as ready", "title": "Failure Threshold" }, @@ -234,29 +166,17 @@ "title": "HTTP headers" }, "initialDelaySeconds": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "defines the time to wait before a given container is checked for readiness", "title": "Initial Delay Seconds" }, "periodSeconds": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "defines the time to check a given container for readiness", "title": "Period Seconds" }, "port": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "port to access on the container", "title": "Port" }, @@ -266,29 +186,17 @@ "title": "Scheme" }, "successThreshold": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "defines the number of successes required before a given container is said to fulfil the readiness probe", "title": "Success Threshold" }, "tcp": { - "type": [ - "boolean", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "boolean", "description": "If enabled, the kubelet will attempt to open a socket to container. If connection is established, the container is considered healthy", "title": "TCP" }, "timeoutSeconds": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "defines the time for checking timeout", "title": "Timeout Seconds" } @@ -334,11 +242,7 @@ "title": "Arguments", "properties": { "enabled": { - "type": [ - "boolean", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "boolean", "description": "used for enabling/disabling aruguments", "title": "Enabled" }, @@ -366,38 +270,22 @@ "title": "Autoscaling", "properties": { "MaxReplicas": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "Maximum number of replicas allowed for scaling", "title": "Maximum Replicas" }, "MinReplicas": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "Minimum number of replicas allowed for scaling", "title": "Minimum Replicas" }, "TargetCPUUtilizationPercentage": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "The target CPU utilization that is expected for a container", "title": "TargetCPUUtilizationPercentage" }, "TargetMemoryUtilizationPercentage": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "The target memory utilization that is expected for a container", "title": "TargetMemoryUtilizationPercentage" }, @@ -407,11 +295,7 @@ "title": "Behavior" }, "enabled": { - "type": [ - "boolean", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "boolean", "description": "used for enabling/disabling autoscaling", "title": "Enabled" }, @@ -429,11 +313,7 @@ "title": "Command", "properties": { "enabled": { - "type": [ - "boolean", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "boolean", "description": "used for enabling/disabling commands" }, "value": { @@ -460,11 +340,7 @@ "title": "Db Migration Config", "properties": { "enabled": { - "type": [ - "boolean", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "boolean", "description": "used for enabling/disabling the config", "title": "Enabled" } @@ -567,11 +443,7 @@ "default": "nginx" }, "enabled": { - "type": [ - "boolean", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "boolean", "description": "used to enable or disable ingress", "title": "Enabled" }, @@ -631,11 +503,7 @@ "default": "nginx-internal" }, "enabled": { - "type": [ - "boolean", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "boolean", "description": "used to enable or disable ingress", "title": "Enabled" }, @@ -729,11 +597,7 @@ } }, "pauseForSecondsBeforeSwitchActive": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "tell how much to wait for given period of time before switch active the container", "title": "Pause For Seconds Before SwitchActive" }, @@ -770,11 +634,7 @@ "title": "Raw YAML" }, "replicaCount": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "count of Replicas of pod", "title": "REplica Count" }, @@ -916,11 +776,7 @@ "title": "Volumes" }, "waitForSecondsBeforeScalingDown": { - "type": [ - "integer", - "string" - ], - "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "type": "integer", "description": "Wait for given period of time before scaling down the container", "title": "Wait For Seconds Before Scaling Down" } diff --git a/scripts/sql/182_custom_image_tag.down.sql b/scripts/sql/177_custom_image_tag.down.sql similarity index 100% rename from scripts/sql/182_custom_image_tag.down.sql rename to scripts/sql/177_custom_image_tag.down.sql diff --git a/scripts/sql/182_custom_image_tag.up.sql b/scripts/sql/177_custom_image_tag.up.sql similarity index 100% rename from scripts/sql/182_custom_image_tag.up.sql rename to scripts/sql/177_custom_image_tag.up.sql diff --git a/scripts/sql/181_remove_index_image_scan_deploy_info.down.sql b/scripts/sql/181_remove_index_image_scan_deploy_info.down.sql deleted file mode 100644 index 2c9791ab66..0000000000 --- a/scripts/sql/181_remove_index_image_scan_deploy_info.down.sql +++ /dev/null @@ -1,2 +0,0 @@ -DROP index image_scan_deploy_info_unique; -CREATE UNIQUE INDEX image_scan_deploy_info_unique ON public.image_scan_deploy_info USING btree (scan_object_meta_id, object_type); diff --git a/scripts/sql/181_remove_index_image_scan_deploy_info.up.sql b/scripts/sql/181_remove_index_image_scan_deploy_info.up.sql deleted file mode 100644 index d57d541bc3..0000000000 --- a/scripts/sql/181_remove_index_image_scan_deploy_info.up.sql +++ /dev/null @@ -1,2 +0,0 @@ -DROP index image_scan_deploy_info_unique; -CREATE INDEX image_scan_deploy_info_unique ON public.image_scan_deploy_info USING btree (scan_object_meta_id, object_type); diff --git a/util/argo/ArgoUserService.go b/util/argo/ArgoUserService.go index 7536b9132d..3e7e24b65e 100644 --- a/util/argo/ArgoUserService.go +++ b/util/argo/ArgoUserService.go @@ -7,7 +7,6 @@ import ( "github.com/devtron-labs/authenticator/client" "github.com/devtron-labs/common-lib/utils/k8s" "github.com/devtron-labs/devtron/client/argocdServer" - "github.com/devtron-labs/devtron/client/argocdServer/connection" "github.com/devtron-labs/devtron/client/argocdServer/session" "github.com/devtron-labs/devtron/internal/sql/repository" "github.com/devtron-labs/devtron/pkg/cluster" @@ -49,12 +48,12 @@ type ArgoUserServiceImpl struct { devtronSecretConfig *util2.DevtronSecretConfig runTimeConfig *client.RuntimeConfig gitOpsRepository repository.GitOpsConfigRepository - argoCDConnectionManager connection.ArgoCDConnectionManager + argoCDConnectionManager argocdServer.ArgoCDConnectionManager versionService argocdServer.VersionService k8sUtil *k8s.K8sUtil } -func NewArgoUserServiceImpl(Logger *zap.SugaredLogger, clusterService cluster.ClusterService, devtronSecretConfig *util2.DevtronSecretConfig, runTimeConfig *client.RuntimeConfig, gitOpsRepository repository.GitOpsConfigRepository, argoCDConnectionManager connection.ArgoCDConnectionManager, versionService argocdServer.VersionService, k8sUtil *k8s.K8sUtil) (*ArgoUserServiceImpl, error) { +func NewArgoUserServiceImpl(Logger *zap.SugaredLogger, clusterService cluster.ClusterService, devtronSecretConfig *util2.DevtronSecretConfig, runTimeConfig *client.RuntimeConfig, gitOpsRepository repository.GitOpsConfigRepository, argoCDConnectionManager argocdServer.ArgoCDConnectionManager, versionService argocdServer.VersionService, k8sUtil *k8s.K8sUtil) (*ArgoUserServiceImpl, error) { argoUserServiceImpl := &ArgoUserServiceImpl{ logger: Logger, clusterService: clusterService, diff --git a/util/context-utils.go b/util/context-utils.go deleted file mode 100644 index c28b588f15..0000000000 --- a/util/context-utils.go +++ /dev/null @@ -1,23 +0,0 @@ -package util - -import ( - "context" - "fmt" - "reflect" -) - -const IsSuperAdminFlag = "isSuperAdmin" - -func SetSuperAdminInContext(ctx context.Context, isSuperAdmin bool) context.Context { - ctx = context.WithValue(ctx, IsSuperAdminFlag, isSuperAdmin) - return ctx -} - -func GetIsSuperAdminFromContext(ctx context.Context) (bool, error) { - flag := ctx.Value(IsSuperAdminFlag) - - if flag != nil && reflect.TypeOf(flag).Kind() == reflect.Bool { - return flag.(bool), nil - } - return false, fmt.Errorf("context not valid, isSuperAdmin flag not set correctly %v", flag) -} diff --git a/util/rbac/EnforcerUtil.go b/util/rbac/EnforcerUtil.go index 693ad2eb28..b1183d2862 100644 --- a/util/rbac/EnforcerUtil.go +++ b/util/rbac/EnforcerUtil.go @@ -30,6 +30,7 @@ import ( "github.com/devtron-labs/devtron/pkg/user/casbin" "github.com/go-pg/pg" "go.uber.org/zap" + "strings" ) type EnforcerUtil interface { @@ -114,7 +115,7 @@ func (impl EnforcerUtilImpl) GetRbacObjectsByEnvIdsAndAppId(envIds []int, appId for _, env := range envs { if _, ok := objects[env.Id]; !ok { - objects[env.Id] = fmt.Sprintf("%s/%s", env.EnvironmentIdentifier, appName) + objects[env.Id] = fmt.Sprintf("%s/%s", strings.ToLower(env.EnvironmentIdentifier), strings.ToLower(appName)) envObjectToName[objects[env.Id]] = env.Name } } @@ -130,7 +131,7 @@ func (impl EnforcerUtilImpl) GetRbacObjectsByAppIds(appIds []int) map[int]string } for _, item := range result { if _, ok := objects[item.Id]; !ok { - objects[item.Id] = fmt.Sprintf("%s/%s", item.Team.Name, item.AppName) + objects[item.Id] = fmt.Sprintf("%s/%s", strings.ToLower(item.Team.Name), strings.ToLower(item.AppName)) } } return objects @@ -139,17 +140,17 @@ func (impl EnforcerUtilImpl) GetRbacObjectsByAppIds(appIds []int) map[int]string func (impl EnforcerUtilImpl) GetAppRBACName(appName string) string { application, err := impl.appRepo.FindAppAndProjectByAppName(appName) if err != nil { - return fmt.Sprintf("%s/%s", "", appName) + return fmt.Sprintf("%s/%s", "", strings.ToLower(appName)) } - return fmt.Sprintf("%s/%s", application.Team.Name, appName) + return fmt.Sprintf("%s/%s", strings.ToLower(application.Team.Name), strings.ToLower(appName)) } func (impl EnforcerUtilImpl) GetProjectAdminRBACNameBYAppName(appName string) string { application, err := impl.appRepo.FindAppAndProjectByAppName(appName) if err != nil { - return fmt.Sprintf("%s/%s", "", appName) + return fmt.Sprintf("%s/%s", "", strings.ToLower(appName)) } - return fmt.Sprintf("%s/%s", application.Team.Name, "*") + return fmt.Sprintf("%s/%s", strings.ToLower(application.Team.Name), "*") } func (impl EnforcerUtilImpl) GetRbacObjectsForAllApps() map[int]string { @@ -160,7 +161,7 @@ func (impl EnforcerUtilImpl) GetRbacObjectsForAllApps() map[int]string { } for _, item := range result { if _, ok := objects[item.Id]; !ok { - objects[item.Id] = fmt.Sprintf("%s/%s", item.Team.Name, item.AppName) + objects[item.Id] = fmt.Sprintf("%s/%s", strings.ToLower(item.Team.Name), strings.ToLower(item.AppName)) } } return objects @@ -174,7 +175,7 @@ func (impl EnforcerUtilImpl) GetRbacObjectsForAllAppsWithTeamID(teamID int) map[ } for _, item := range result { if _, ok := objects[item.Id]; !ok { - objects[item.Id] = fmt.Sprintf("%s/%s", item.Team.Name, item.AppName) + objects[item.Id] = fmt.Sprintf("%s/%s", strings.ToLower(item.Team.Name), strings.ToLower(item.AppName)) } } return objects @@ -185,15 +186,15 @@ func (impl EnforcerUtilImpl) GetAppRBACNameByAppId(appId int) string { if err != nil { return fmt.Sprintf("%s/%s", "", "") } - return fmt.Sprintf("%s/%s", application.Team.Name, application.AppName) + return fmt.Sprintf("%s/%s", strings.ToLower(application.Team.Name), strings.ToLower(application.AppName)) } func (impl EnforcerUtilImpl) GetAppRBACByAppNameAndEnvId(appName string, envId int) string { env, err := impl.environmentRepository.FindById(envId) if err != nil { - return fmt.Sprintf("%s/%s", "", appName) + return fmt.Sprintf("%s/%s", "", strings.ToLower(appName)) } - return fmt.Sprintf("%s/%s", env.EnvironmentIdentifier, appName) + return fmt.Sprintf("%s/%s", strings.ToLower(env.EnvironmentIdentifier), strings.ToLower(appName)) } func (impl EnforcerUtilImpl) GetAppRBACByAppIdAndPipelineId(appId int, pipelineId int) string { @@ -203,13 +204,13 @@ func (impl EnforcerUtilImpl) GetAppRBACByAppIdAndPipelineId(appId int, pipelineI } pipeline, err := impl.pipelineRepository.FindById(pipelineId) if err != nil { - return fmt.Sprintf("%s/%s", "", application.AppName) + return fmt.Sprintf("%s/%s", "", strings.ToLower(application.AppName)) } env, err := impl.environmentRepository.FindById(pipeline.EnvironmentId) if err != nil { - return fmt.Sprintf("%s/%s", "", application.AppName) + return fmt.Sprintf("%s/%s", "", strings.ToLower(application.AppName)) } - return fmt.Sprintf("%s/%s", env.EnvironmentIdentifier, application.AppName) + return fmt.Sprintf("%s/%s", strings.ToLower(env.EnvironmentIdentifier), strings.ToLower(application.AppName)) } func (impl EnforcerUtilImpl) GetEnvRBACNameByAppId(appId int, envId int) string { @@ -220,9 +221,9 @@ func (impl EnforcerUtilImpl) GetEnvRBACNameByAppId(appId int, envId int) string var appName = application.AppName env, err := impl.environmentRepository.FindById(envId) if err != nil { - return fmt.Sprintf("%s/%s", "", appName) + return fmt.Sprintf("%s/%s", "", strings.ToLower(appName)) } - return fmt.Sprintf("%s/%s", env.EnvironmentIdentifier, appName) + return fmt.Sprintf("%s/%s", strings.ToLower(env.EnvironmentIdentifier), strings.ToLower(appName)) } func (impl EnforcerUtilImpl) GetTeamEnvRBACNameByAppId(appId int, envId int) string { @@ -234,9 +235,9 @@ func (impl EnforcerUtilImpl) GetTeamEnvRBACNameByAppId(appId int, envId int) str var teamName = application.Team.Name env, err := impl.environmentRepository.FindById(envId) if err != nil { - return fmt.Sprintf("%s/%s/%s", teamName, "", appName) + return fmt.Sprintf("%s/%s/%s", strings.ToLower(teamName), "", strings.ToLower(appName)) } - return fmt.Sprintf("%s/%s/%s", teamName, env.EnvironmentIdentifier, appName) + return fmt.Sprintf("%s/%s/%s", strings.ToLower(teamName), strings.ToLower(env.EnvironmentIdentifier), strings.ToLower(appName)) } func (impl EnforcerUtilImpl) GetTeamRBACByCiPipelineId(pipelineId int) string { @@ -275,9 +276,9 @@ func (impl EnforcerUtilImpl) GetEnvRBACNameByCiPipelineIdAndEnvId(ciPipelineId i appName := application.AppName env, err := impl.environmentRepository.FindById(envId) if err != nil { - return fmt.Sprintf("%s/%s", "", appName) + return fmt.Sprintf("%s/%s", "", strings.ToLower(appName)) } - return fmt.Sprintf("%s/%s", env.EnvironmentIdentifier, appName) + return fmt.Sprintf("%s/%s", strings.ToLower(env.EnvironmentIdentifier), strings.ToLower(appName)) } func (impl EnforcerUtilImpl) GetEnvRBACNameByCdPipelineIdAndEnvId(cdPipelineId int) string { @@ -286,7 +287,7 @@ func (impl EnforcerUtilImpl) GetEnvRBACNameByCdPipelineIdAndEnvId(cdPipelineId i impl.logger.Error(err) return fmt.Sprintf("%s/%s", "", "") } - return fmt.Sprintf("%s/%s", pipeline.Environment.EnvironmentIdentifier, pipeline.App.AppName) + return fmt.Sprintf("%s/%s", strings.ToLower(pipeline.Environment.EnvironmentIdentifier), strings.ToLower(pipeline.App.AppName)) } func (impl EnforcerUtilImpl) GetTeamRbacObjectByCiPipelineId(ciPipelineId int) string { @@ -298,7 +299,7 @@ func (impl EnforcerUtilImpl) GetTeamRbacObjectByCiPipelineId(ciPipelineId int) s if err != nil { return fmt.Sprintf("%s/%s", "", "") } - return fmt.Sprintf("%s/%s", application.Team.Name, ciPipeline.App.AppName) + return fmt.Sprintf("%s/%s", strings.ToLower(application.Team.Name), strings.ToLower(ciPipeline.App.AppName)) } func (impl EnforcerUtilImpl) GetTeamAndEnvironmentRbacObjectByCDPipelineId(pipelineId int) (string, string) { @@ -312,8 +313,8 @@ func (impl EnforcerUtilImpl) GetTeamAndEnvironmentRbacObjectByCDPipelineId(pipel impl.logger.Errorw("error on fetching data for rbac object", "err", err) return "", "" } - teamRbac := fmt.Sprintf("%s/%s", application.Team.Name, pipeline.App.AppName) - envRbac := fmt.Sprintf("%s/%s", pipeline.Environment.EnvironmentIdentifier, pipeline.App.AppName) + teamRbac := fmt.Sprintf("%s/%s", strings.ToLower(application.Team.Name), strings.ToLower(pipeline.App.AppName)) + envRbac := fmt.Sprintf("%s/%s", strings.ToLower(pipeline.Environment.EnvironmentIdentifier), strings.ToLower(pipeline.App.AppName)) return teamRbac, envRbac } @@ -327,7 +328,7 @@ func (impl EnforcerUtilImpl) GetRbacObjectsForAllAppsAndEnvironments() (map[int] } for _, item := range apps { if _, ok := appObjects[item.Id]; !ok { - appObjects[item.Id] = fmt.Sprintf("%s/%s", item.Team.Name, item.AppName) + appObjects[item.Id] = fmt.Sprintf("%s/%s", strings.ToLower(item.Team.Name), strings.ToLower(item.AppName)) } } @@ -340,7 +341,7 @@ func (impl EnforcerUtilImpl) GetRbacObjectsForAllAppsAndEnvironments() (map[int] for _, app := range apps { key := fmt.Sprintf("%d-%d", env.Id, app.Id) if _, ok := envObjects[key]; !ok { - envObjects[key] = fmt.Sprintf("%s/%s", env.EnvironmentIdentifier, app.AppName) + envObjects[key] = fmt.Sprintf("%s/%s", strings.ToLower(env.EnvironmentIdentifier), strings.ToLower(app.AppName)) } } } @@ -380,11 +381,11 @@ func (impl EnforcerUtilImpl) GetHelmObject(appId int, envId int) (string, string }*/ if environmentIdentifier2 == "" { - return fmt.Sprintf("%s/%s/%s", application.Team.Name, environmentIdentifier, application.AppName), "" + return fmt.Sprintf("%s/%s/%s", strings.ToLower(application.Team.Name), environmentIdentifier, strings.ToLower(application.AppName)), "" } - return fmt.Sprintf("%s/%s/%s", application.Team.Name, environmentIdentifier, application.AppName), - fmt.Sprintf("%s/%s/%s", application.Team.Name, environmentIdentifier2, application.AppName) + return fmt.Sprintf("%s/%s/%s", strings.ToLower(application.Team.Name), environmentIdentifier, strings.ToLower(application.AppName)), + fmt.Sprintf("%s/%s/%s", strings.ToLower(application.Team.Name), environmentIdentifier2, strings.ToLower(application.AppName)) } func (impl EnforcerUtilImpl) GetHelmObjectByAppNameAndEnvId(appName string, envId int) (string, string) { @@ -411,7 +412,7 @@ func (impl EnforcerUtilImpl) GetHelmObjectByAppNameAndEnvId(appName string, envI } } if environmentIdentifier2 == "" { - return fmt.Sprintf("%s/%s/%s", application.Team.Name, environmentIdentifier, application.AppName), "" + return fmt.Sprintf("%s/%s/%s", strings.ToLower(application.Team.Name), environmentIdentifier, strings.ToLower(application.AppName)), "" } //TODO - FIX required for futuristic permission for cluster__* all environment for migrated environment identifier only @@ -419,8 +420,8 @@ func (impl EnforcerUtilImpl) GetHelmObjectByAppNameAndEnvId(appName string, envI if !strings.HasPrefix(env.EnvironmentIdentifier, fmt.Sprintf("%s__", env.Cluster.ClusterName)) { environmentIdentifier = fmt.Sprintf("%s__%s", env.Cluster.ClusterName, env.EnvironmentIdentifier) }*/ - return fmt.Sprintf("%s/%s/%s", application.Team.Name, environmentIdentifier, application.AppName), - fmt.Sprintf("%s/%s/%s", application.Team.Name, environmentIdentifier2, application.AppName) + return fmt.Sprintf("%s/%s/%s", strings.ToLower(application.Team.Name), environmentIdentifier, strings.ToLower(application.AppName)), + fmt.Sprintf("%s/%s/%s", strings.ToLower(application.Team.Name), environmentIdentifier2, strings.ToLower(application.AppName)) } func (impl EnforcerUtilImpl) GetHelmObjectByProjectIdAndEnvId(teamId int, envId int) (string, string) { @@ -451,7 +452,7 @@ func (impl EnforcerUtilImpl) GetHelmObjectByProjectIdAndEnvId(teamId int, envId } if environmentIdentifier2 == "" { - return fmt.Sprintf("%s/%s/%s", team.Name, environmentIdentifier, "*"), "" + return fmt.Sprintf("%s/%s/%s", strings.ToLower(team.Name), environmentIdentifier, "*"), "" } //TODO - FIX required for futuristic permission for cluster__* all environment for migrated environment identifier only @@ -459,8 +460,8 @@ func (impl EnforcerUtilImpl) GetHelmObjectByProjectIdAndEnvId(teamId int, envId if !strings.HasPrefix(env.EnvironmentIdentifier, fmt.Sprintf("%s__", env.Cluster.ClusterName)) { environmentIdentifier = fmt.Sprintf("%s__%s", env.Cluster.ClusterName, env.EnvironmentIdentifier) }*/ - return fmt.Sprintf("%s/%s/%s", team.Name, environmentIdentifier, "*"), - fmt.Sprintf("%s/%s/%s", team.Name, environmentIdentifier2, "*") + return fmt.Sprintf("%s/%s/%s", strings.ToLower(team.Name), environmentIdentifier, "*"), + fmt.Sprintf("%s/%s/%s", strings.ToLower(team.Name), environmentIdentifier2, "*") } func (impl EnforcerUtilImpl) GetAppRBACNameByTeamIdAndAppId(teamId int, appId int) string { @@ -474,7 +475,7 @@ func (impl EnforcerUtilImpl) GetAppRBACNameByTeamIdAndAppId(teamId int, appId in impl.logger.Errorw("error on fetching data for rbac object", "err", err) return fmt.Sprintf("%s/%s", "", "") } - return fmt.Sprintf("%s/%s", team.Name, application.AppName) + return fmt.Sprintf("%s/%s", strings.ToLower(team.Name), strings.ToLower(application.AppName)) } func (impl EnforcerUtilImpl) GetRBACNameForClusterEntity(clusterName string, resourceIdentifier k8s.ResourceIdentifier) (resourceName, objectName string) { @@ -503,7 +504,7 @@ func (impl EnforcerUtilImpl) GetAppObjectByCiPipelineIds(ciPipelineIds []int) ma } for _, pipeline := range models { if _, ok := objects[pipeline.Id]; !ok { - appObject := fmt.Sprintf("%s/%s", pipeline.App.Team.Name, pipeline.App.AppName) + appObject := fmt.Sprintf("%s/%s", strings.ToLower(pipeline.App.Team.Name), strings.ToLower(pipeline.App.AppName)) objects[pipeline.Id] = appObject } } @@ -519,8 +520,8 @@ func (impl EnforcerUtilImpl) GetAppAndEnvObjectByPipelineIds(cdPipelineIds []int } for _, pipeline := range models { if _, ok := objects[pipeline.Id]; !ok { - appObject := fmt.Sprintf("%s/%s", pipeline.App.Team.Name, pipeline.App.AppName) - envObject := fmt.Sprintf("%s/%s", pipeline.Environment.EnvironmentIdentifier, pipeline.App.AppName) + appObject := fmt.Sprintf("%s/%s", strings.ToLower(pipeline.App.Team.Name), strings.ToLower(pipeline.App.AppName)) + envObject := fmt.Sprintf("%s/%s", strings.ToLower(pipeline.Environment.EnvironmentIdentifier), strings.ToLower(pipeline.App.AppName)) objects[pipeline.Id] = []string{appObject, envObject} } } @@ -535,7 +536,7 @@ func (impl EnforcerUtilImpl) GetRbacObjectsForAllAppsWithMatchingAppName(appName } for _, item := range result { if _, ok := objects[item.Id]; !ok { - objects[item.Id] = fmt.Sprintf("%s/%s", item.Team.Name, item.AppName) + objects[item.Id] = fmt.Sprintf("%s/%s", strings.ToLower(item.Team.Name), strings.ToLower(item.AppName)) } } return objects @@ -558,8 +559,8 @@ func (impl EnforcerUtilImpl) GetAppAndEnvObjectByPipeline(cdPipelines []*bean.CD } for _, pipeline := range cdPipelines { if _, ok := objects[pipeline.Id]; !ok { - appObject := fmt.Sprintf("%s/%s", teamMap[pipeline.TeamId], pipeline.AppName) - envObject := fmt.Sprintf("%s/%s", pipeline.EnvironmentIdentifier, pipeline.AppName) + appObject := fmt.Sprintf("%s/%s", strings.ToLower(teamMap[pipeline.TeamId]), strings.ToLower(pipeline.AppName)) + envObject := fmt.Sprintf("%s/%s", strings.ToLower(pipeline.EnvironmentIdentifier), strings.ToLower(pipeline.AppName)) objects[pipeline.Id] = []string{appObject, envObject} } } @@ -585,8 +586,8 @@ func (impl EnforcerUtilImpl) GetAppAndEnvObjectByDbPipeline(cdPipelines []*pipel } for _, pipeline := range cdPipelines { if _, ok := objects[pipeline.Id]; !ok { - appObject := fmt.Sprintf("%s/%s", teamMap[pipeline.App.TeamId], pipeline.App.AppName) - envObject := fmt.Sprintf("%s/%s", pipeline.Environment.EnvironmentIdentifier, pipeline.App.AppName) + appObject := fmt.Sprintf("%s/%s", strings.ToLower(teamMap[pipeline.App.TeamId]), strings.ToLower(pipeline.App.AppName)) + envObject := fmt.Sprintf("%s/%s", strings.ToLower(pipeline.Environment.EnvironmentIdentifier), strings.ToLower(pipeline.App.AppName)) objects[pipeline.Id] = []string{appObject, envObject} } } @@ -600,11 +601,11 @@ func (impl EnforcerUtilImpl) GetAllActiveTeamNames() ([]string, error) { return nil, err } for i, teamName := range teamNames { - teamNames[i] = teamName + teamNames[i] = strings.ToLower(teamName) } return teamNames, nil } func (impl EnforcerUtilImpl) GetAppRBACNameByAppAndProjectName(projectName, appName string) string { - return fmt.Sprintf("%s/%s", projectName, appName) + return fmt.Sprintf("%s/%s", strings.ToLower(projectName), strings.ToLower(appName)) } diff --git a/util/rbac/EnforcerUtilHelm.go b/util/rbac/EnforcerUtilHelm.go index 099c14a9e1..ea7bdd8565 100644 --- a/util/rbac/EnforcerUtilHelm.go +++ b/util/rbac/EnforcerUtilHelm.go @@ -8,6 +8,7 @@ import ( "github.com/devtron-labs/devtron/pkg/team" "github.com/go-pg/pg" "go.uber.org/zap" + "strings" ) type EnforcerUtilHelm interface { @@ -47,7 +48,7 @@ func (impl EnforcerUtilHelmImpl) GetHelmObjectByClusterId(clusterId int, namespa if err != nil { return fmt.Sprintf("%s/%s/%s", "", "", "") } - return fmt.Sprintf("%s/%s__%s/%s", team.UNASSIGNED_PROJECT, cluster.ClusterName, namespace, appName) + return fmt.Sprintf("%s/%s__%s/%s", team.UNASSIGNED_PROJECT, cluster.ClusterName, namespace, strings.ToLower(appName)) } func (impl EnforcerUtilHelmImpl) GetHelmObjectByTeamIdAndClusterId(teamId int, clusterId int, namespace string, appName string) string { @@ -59,7 +60,7 @@ func (impl EnforcerUtilHelmImpl) GetHelmObjectByTeamIdAndClusterId(teamId int, c if err != nil { return fmt.Sprintf("%s/%s/%s", "", "", "") } - return fmt.Sprintf("%s/%s__%s/%s", teamObj.Name, cluster.ClusterName, namespace, appName) + return fmt.Sprintf("%s/%s__%s/%s", teamObj.Name, cluster.ClusterName, namespace, strings.ToLower(appName)) } func (impl EnforcerUtilHelmImpl) GetHelmObjectByClusterIdNamespaceAndAppName(clusterId int, namespace string, appName string) (string, string) { @@ -88,27 +89,27 @@ func (impl EnforcerUtilHelmImpl) GetHelmObjectByClusterIdNamespaceAndAppName(clu if app.TeamId == 0 { // case if project is not assigned to cli app - return fmt.Sprintf("%s/%s__%s/%s", team.UNASSIGNED_PROJECT, cluster.ClusterName, namespace, appName), "" + return fmt.Sprintf("%s/%s__%s/%s", team.UNASSIGNED_PROJECT, cluster.ClusterName, namespace, strings.ToLower(appName)), "" } else { // case if project is assigned - return fmt.Sprintf("%s/%s__%s/%s", app.Team.Name, cluster.ClusterName, namespace, appName), "" + return fmt.Sprintf("%s/%s__%s/%s", app.Team.Name, cluster.ClusterName, namespace, strings.ToLower(appName)), "" } } if installedApp.App.TeamId == 0 { // for EA apps which have no project assigned to them - return fmt.Sprintf("%s/%s__%s/%s", team.UNASSIGNED_PROJECT, cluster.ClusterName, namespace, appName), - fmt.Sprintf("%s/%s/%s", team.UNASSIGNED_PROJECT, installedApp.Environment.EnvironmentIdentifier, appName) + return fmt.Sprintf("%s/%s__%s/%s", team.UNASSIGNED_PROJECT, cluster.ClusterName, namespace, strings.ToLower(appName)), + fmt.Sprintf("%s/%s/%s", team.UNASSIGNED_PROJECT, installedApp.Environment.EnvironmentIdentifier, strings.ToLower(appName)) } else { if installedApp.EnvironmentId == 0 { // for apps in EA mode, initally env can be 0. - return fmt.Sprintf("%s/%s__%s/%s", installedApp.App.Team.Name, cluster.ClusterName, namespace, appName), "" + return fmt.Sprintf("%s/%s__%s/%s", installedApp.App.Team.Name, cluster.ClusterName, namespace, strings.ToLower(appName)), "" } // for apps which are assigned to a project and have env ID - rbacOne := fmt.Sprintf("%s/%s/%s", installedApp.App.Team.Name, installedApp.Environment.EnvironmentIdentifier, appName) - rbacTwo := fmt.Sprintf("%s/%s__%s/%s", installedApp.App.Team.Name, cluster.ClusterName, namespace, appName) + rbacOne := fmt.Sprintf("%s/%s/%s", installedApp.App.Team.Name, installedApp.Environment.EnvironmentIdentifier, strings.ToLower(appName)) + rbacTwo := fmt.Sprintf("%s/%s__%s/%s", installedApp.App.Team.Name, cluster.ClusterName, namespace, strings.ToLower(appName)) if installedApp.Environment.IsVirtualEnvironment { return rbacOne, "" } @@ -124,7 +125,7 @@ func (impl EnforcerUtilHelmImpl) GetAppRBACNameByInstalledAppId(installedAppVers impl.logger.Errorw("error in fetching installed app version data", "err", err) return fmt.Sprintf("%s/%s/%s", "", "", ""), fmt.Sprintf("%s/%s/%s", "", "", "") } - rbacOne := fmt.Sprintf("%s/%s/%s", InstalledApp.App.Team.Name, InstalledApp.Environment.EnvironmentIdentifier, InstalledApp.App.AppName) + rbacOne := fmt.Sprintf("%s/%s/%s", InstalledApp.App.Team.Name, InstalledApp.Environment.EnvironmentIdentifier, strings.ToLower(InstalledApp.App.AppName)) if InstalledApp.Environment.IsVirtualEnvironment { return rbacOne, "" @@ -133,10 +134,11 @@ func (impl EnforcerUtilHelmImpl) GetAppRBACNameByInstalledAppId(installedAppVers var rbacTwo string if !InstalledApp.Environment.IsVirtualEnvironment { if InstalledApp.Environment.EnvironmentIdentifier != InstalledApp.Environment.Cluster.ClusterName+"__"+InstalledApp.Environment.Namespace { - rbacTwo = fmt.Sprintf("%s/%s/%s", InstalledApp.App.Team.Name, InstalledApp.Environment.Cluster.ClusterName+"__"+InstalledApp.Environment.Namespace, InstalledApp.App.AppName) + rbacTwo = fmt.Sprintf("%s/%s/%s", InstalledApp.App.Team.Name, InstalledApp.Environment.Cluster.ClusterName+"__"+InstalledApp.Environment.Namespace, strings.ToLower(InstalledApp.App.AppName)) return rbacOne, rbacTwo } } return rbacOne, "" + } diff --git a/wire_gen.go b/wire_gen.go index a095cb7752..ab75504855 100644 --- a/wire_gen.go +++ b/wire_gen.go @@ -44,8 +44,7 @@ import ( "github.com/devtron-labs/devtron/client/argocdServer" "github.com/devtron-labs/devtron/client/argocdServer/application" "github.com/devtron-labs/devtron/client/argocdServer/cluster" - "github.com/devtron-labs/devtron/client/argocdServer/connection" - repository9 "github.com/devtron-labs/devtron/client/argocdServer/repository" + repository8 "github.com/devtron-labs/devtron/client/argocdServer/repository" "github.com/devtron-labs/devtron/client/cron" "github.com/devtron-labs/devtron/client/dashboard" "github.com/devtron-labs/devtron/client/events" @@ -68,6 +67,7 @@ import ( "github.com/devtron-labs/devtron/internal/sql/repository/security" "github.com/devtron-labs/devtron/internal/util" "github.com/devtron-labs/devtron/internal/util/ArgoUtil" + "github.com/devtron-labs/devtron/pkg" "github.com/devtron-labs/devtron/pkg/apiToken" app2 "github.com/devtron-labs/devtron/pkg/app" "github.com/devtron-labs/devtron/pkg/app/status" @@ -100,7 +100,7 @@ import ( delete2 "github.com/devtron-labs/devtron/pkg/delete" "github.com/devtron-labs/devtron/pkg/deploymentGroup" "github.com/devtron-labs/devtron/pkg/devtronResource" - repository8 "github.com/devtron-labs/devtron/pkg/devtronResource/repository" + repository9 "github.com/devtron-labs/devtron/pkg/devtronResource/repository" "github.com/devtron-labs/devtron/pkg/dockerRegistry" "github.com/devtron-labs/devtron/pkg/externalLink" "github.com/devtron-labs/devtron/pkg/generateManifest" @@ -207,15 +207,15 @@ func InitializeApp() (*App, error) { return nil, err } k8sUtil := k8s.NewK8sUtil(sugaredLogger, runtimeConfig) - connectionConfig, err := connection.GetConfig() + argocdServerConfig, err := argocdServer.GetConfig() if err != nil { return nil, err } - settingsManager, err := connection.SettingsManager(connectionConfig) + settingsManager, err := argocdServer.SettingsManager(argocdServerConfig) if err != nil { return nil, err } - argoCDConnectionManagerImpl, err := connection.NewArgoCDConnectionManagerImpl(sugaredLogger, settingsManager, moduleRepositoryImpl) + argoCDConnectionManagerImpl, err := argocdServer.NewArgoCDConnectionManagerImpl(sugaredLogger, settingsManager, moduleRepositoryImpl) if err != nil { return nil, err } @@ -329,12 +329,28 @@ func InitializeApp() (*App, error) { if err != nil { return nil, err } + deploymentTemplateHistoryServiceImpl := history.NewDeploymentTemplateHistoryServiceImpl(sugaredLogger, deploymentTemplateHistoryRepositoryImpl, pipelineRepositoryImpl, chartRepositoryImpl, chartRefRepositoryImpl, envLevelAppMetricsRepositoryImpl, appLevelMetricsRepositoryImpl, userServiceImpl, cdWorkflowRepositoryImpl, variableSnapshotHistoryServiceImpl, variableTemplateParserImpl) + chartWorkingDir := _wireChartWorkingDirValue + globalEnvVariables, err := util3.GetGlobalEnvVariables() + if err != nil { + return nil, err + } + chartTemplateServiceImpl := util.NewChartTemplateServiceImpl(sugaredLogger, chartWorkingDir, httpClient, gitFactory, globalEnvVariables, gitOpsConfigRepositoryImpl, userRepositoryImpl, chartRepositoryImpl) + refChartDir := _wireRefChartDirValue + chartRepoRepositoryImpl := chartRepoRepository.NewChartRepoRepositoryImpl(db) + defaultChart := _wireDefaultChartValue + utilMergeUtil := util.MergeUtil{ + Logger: sugaredLogger, + } + repositoryServiceClientImpl := repository8.NewServiceClientImpl(sugaredLogger, argoCDConnectionManagerImpl) + variableEntityMappingRepositoryImpl := repository7.NewVariableEntityMappingRepository(sugaredLogger, db) + variableEntityMappingServiceImpl := variables.NewVariableEntityMappingServiceImpl(variableEntityMappingRepositoryImpl, sugaredLogger) scopedVariableRepositoryImpl := repository7.NewScopedVariableRepository(db, sugaredLogger) qualifiersMappingRepositoryImpl, err := resourceQualifiers.NewQualifiersMappingRepositoryImpl(db, sugaredLogger) if err != nil { return nil, err } - devtronResourceSearchableKeyRepositoryImpl := repository8.NewDevtronResourceSearchableKeyRepositoryImpl(sugaredLogger, db) + devtronResourceSearchableKeyRepositoryImpl := repository9.NewDevtronResourceSearchableKeyRepositoryImpl(sugaredLogger, db) devtronResourceSearchableKeyServiceImpl, err := devtronResource.NewDevtronResourceSearchableKeyServiceImpl(sugaredLogger, devtronResourceSearchableKeyRepositoryImpl) if err != nil { return nil, err @@ -347,22 +363,6 @@ func InitializeApp() (*App, error) { if err != nil { return nil, err } - deploymentTemplateHistoryServiceImpl := history.NewDeploymentTemplateHistoryServiceImpl(sugaredLogger, deploymentTemplateHistoryRepositoryImpl, pipelineRepositoryImpl, chartRepositoryImpl, chartRefRepositoryImpl, envLevelAppMetricsRepositoryImpl, appLevelMetricsRepositoryImpl, userServiceImpl, cdWorkflowRepositoryImpl, variableSnapshotHistoryServiceImpl, variableTemplateParserImpl, scopedVariableServiceImpl) - chartWorkingDir := _wireChartWorkingDirValue - globalEnvVariables, err := util3.GetGlobalEnvVariables() - if err != nil { - return nil, err - } - chartTemplateServiceImpl := util.NewChartTemplateServiceImpl(sugaredLogger, chartWorkingDir, httpClient, gitFactory, globalEnvVariables, gitOpsConfigRepositoryImpl, userRepositoryImpl, chartRepositoryImpl) - refChartDir := _wireRefChartDirValue - chartRepoRepositoryImpl := chartRepoRepository.NewChartRepoRepositoryImpl(db) - defaultChart := _wireDefaultChartValue - utilMergeUtil := util.MergeUtil{ - Logger: sugaredLogger, - } - repositoryServiceClientImpl := repository9.NewServiceClientImpl(sugaredLogger, argoCDConnectionManagerImpl) - variableEntityMappingRepositoryImpl := repository7.NewVariableEntityMappingRepository(sugaredLogger, db) - variableEntityMappingServiceImpl := variables.NewVariableEntityMappingServiceImpl(variableEntityMappingRepositoryImpl, sugaredLogger) chartServiceImpl := chart.NewChartServiceImpl(chartRepositoryImpl, sugaredLogger, chartTemplateServiceImpl, chartRepoRepositoryImpl, appRepositoryImpl, refChartDir, defaultChart, utilMergeUtil, repositoryServiceClientImpl, chartRefRepositoryImpl, envConfigOverrideRepositoryImpl, pipelineConfigRepositoryImpl, configMapRepositoryImpl, environmentRepositoryImpl, pipelineRepositoryImpl, appLevelMetricsRepositoryImpl, envLevelAppMetricsRepositoryImpl, httpClient, deploymentTemplateHistoryServiceImpl, variableEntityMappingServiceImpl, variableTemplateParserImpl, scopedVariableServiceImpl) devtronSecretConfig, err := util3.GetDevtronSecretName() if err != nil { @@ -393,24 +393,23 @@ func InitializeApp() (*App, error) { return nil, err } appStatusServiceImpl := appStatus2.NewAppStatusServiceImpl(appStatusRepositoryImpl, sugaredLogger, enforcerImpl, enforcerUtilImpl) - chartGroupDeploymentRepositoryImpl := repository3.NewChartGroupDeploymentRepositoryImpl(db, sugaredLogger) clusterInstalledAppsRepositoryImpl := repository3.NewClusterInstalledAppsRepositoryImpl(db, sugaredLogger) refChartProxyDir := _wireRefChartProxyDirValue appStoreDeploymentCommonServiceImpl := appStoreDeploymentCommon.NewAppStoreDeploymentCommonServiceImpl(sugaredLogger, installedAppRepositoryImpl, appStoreApplicationVersionRepositoryImpl, environmentRepositoryImpl, chartTemplateServiceImpl, refChartProxyDir, gitFactory, gitOpsConfigRepositoryImpl) ociRegistryConfigRepositoryImpl := repository5.NewOCIRegistryConfigRepositoryImpl(db) appStoreDeploymentHelmServiceImpl := appStoreDeploymentTool.NewAppStoreDeploymentHelmServiceImpl(sugaredLogger, helmAppServiceImpl, appStoreApplicationVersionRepositoryImpl, environmentRepositoryImpl, helmAppClientImpl, installedAppRepositoryImpl, appStoreDeploymentCommonServiceImpl, ociRegistryConfigRepositoryImpl) - argoClientWrapperServiceImpl := argocdServer.NewArgoClientWrapperServiceImpl(sugaredLogger, applicationServiceClientImpl) - appStoreDeploymentFullModeServiceImpl := appStoreDeploymentFullMode.NewAppStoreDeploymentFullModeServiceImpl(sugaredLogger, chartTemplateServiceImpl, refChartProxyDir, repositoryServiceClientImpl, appStoreApplicationVersionRepositoryImpl, environmentRepositoryImpl, applicationServiceClientImpl, argoK8sClientImpl, gitFactory, acdAuthConfig, globalEnvVariables, installedAppRepositoryImpl, tokenCache, argoUserServiceImpl, gitOpsConfigRepositoryImpl, pipelineStatusTimelineServiceImpl, appStoreDeploymentCommonServiceImpl, argoClientWrapperServiceImpl) - appStoreDeploymentArgoCdServiceImpl := appStoreDeploymentGitopsTool.NewAppStoreDeploymentArgoCdServiceImpl(sugaredLogger, appStoreDeploymentFullModeServiceImpl, applicationServiceClientImpl, chartGroupDeploymentRepositoryImpl, installedAppRepositoryImpl, installedAppVersionHistoryRepositoryImpl, chartTemplateServiceImpl, gitFactory, argoUserServiceImpl, appStoreDeploymentCommonServiceImpl, helmAppServiceImpl, gitOpsConfigRepositoryImpl, appStatusServiceImpl, pipelineStatusTimelineServiceImpl, userServiceImpl, pipelineStatusTimelineRepositoryImpl, appStoreApplicationVersionRepositoryImpl, argoClientWrapperServiceImpl) + appStoreDeploymentFullModeServiceImpl := appStoreDeploymentFullMode.NewAppStoreDeploymentFullModeServiceImpl(sugaredLogger, chartTemplateServiceImpl, refChartProxyDir, repositoryServiceClientImpl, appStoreApplicationVersionRepositoryImpl, environmentRepositoryImpl, applicationServiceClientImpl, argoK8sClientImpl, gitFactory, acdAuthConfig, globalEnvVariables, installedAppRepositoryImpl, tokenCache, argoUserServiceImpl, gitOpsConfigRepositoryImpl, pipelineStatusTimelineServiceImpl, appStoreDeploymentCommonServiceImpl) + chartGroupDeploymentRepositoryImpl := repository3.NewChartGroupDeploymentRepositoryImpl(db, sugaredLogger) + appStoreDeploymentArgoCdServiceImpl := appStoreDeploymentGitopsTool.NewAppStoreDeploymentArgoCdServiceImpl(sugaredLogger, appStoreDeploymentFullModeServiceImpl, applicationServiceClientImpl, chartGroupDeploymentRepositoryImpl, installedAppRepositoryImpl, installedAppVersionHistoryRepositoryImpl, chartTemplateServiceImpl, gitFactory, argoUserServiceImpl, appStoreDeploymentCommonServiceImpl, helmAppServiceImpl, gitOpsConfigRepositoryImpl, appStatusServiceImpl, pipelineStatusTimelineServiceImpl, userServiceImpl, pipelineStatusTimelineRepositoryImpl, appStoreApplicationVersionRepositoryImpl) deploymentServiceTypeConfig, err := service.GetDeploymentServiceTypeConfig() if err != nil { return nil, err } - appStoreDeploymentServiceImpl := service.NewAppStoreDeploymentServiceImpl(sugaredLogger, installedAppRepositoryImpl, chartGroupDeploymentRepositoryImpl, appStoreApplicationVersionRepositoryImpl, environmentRepositoryImpl, clusterInstalledAppsRepositoryImpl, appRepositoryImpl, appStoreDeploymentHelmServiceImpl, appStoreDeploymentArgoCdServiceImpl, environmentServiceImpl, clusterServiceImplExtended, helmAppServiceImpl, appStoreDeploymentCommonServiceImpl, globalEnvVariables, installedAppVersionHistoryRepositoryImpl, gitOpsConfigRepositoryImpl, attributesServiceImpl, deploymentServiceTypeConfig, chartTemplateServiceImpl, pubSubClientServiceImpl) + appStoreDeploymentServiceImpl := service.NewAppStoreDeploymentServiceImpl(sugaredLogger, installedAppRepositoryImpl, appStoreApplicationVersionRepositoryImpl, environmentRepositoryImpl, clusterInstalledAppsRepositoryImpl, appRepositoryImpl, appStoreDeploymentHelmServiceImpl, appStoreDeploymentArgoCdServiceImpl, environmentServiceImpl, clusterServiceImplExtended, helmAppServiceImpl, appStoreDeploymentCommonServiceImpl, globalEnvVariables, installedAppVersionHistoryRepositoryImpl, gitOpsConfigRepositoryImpl, attributesServiceImpl, deploymentServiceTypeConfig, chartTemplateServiceImpl, pubSubClientServiceImpl) k8sCommonServiceImpl := k8s2.NewK8sCommonServiceImpl(sugaredLogger, k8sUtil, clusterServiceImplExtended) manifestPushConfigRepositoryImpl := repository11.NewManifestPushConfigRepository(sugaredLogger, db) gitOpsManifestPushServiceImpl := app2.NewGitOpsManifestPushServiceImpl(sugaredLogger, chartTemplateServiceImpl, chartServiceImpl, gitOpsConfigRepositoryImpl, gitFactory, pipelineStatusTimelineServiceImpl) - appServiceImpl := app2.NewAppService(envConfigOverrideRepositoryImpl, pipelineOverrideRepositoryImpl, mergeUtil, sugaredLogger, ciArtifactRepositoryImpl, pipelineRepositoryImpl, dbMigrationConfigRepositoryImpl, eventRESTClientImpl, eventSimpleFactoryImpl, applicationServiceClientImpl, tokenCache, acdAuthConfig, enforcerImpl, enforcerUtilImpl, userServiceImpl, appListingRepositoryImpl, appRepositoryImpl, environmentRepositoryImpl, pipelineConfigRepositoryImpl, configMapRepositoryImpl, appLevelMetricsRepositoryImpl, envLevelAppMetricsRepositoryImpl, chartRepositoryImpl, ciPipelineMaterialRepositoryImpl, cdWorkflowRepositoryImpl, commonServiceImpl, imageScanDeployInfoRepositoryImpl, imageScanHistoryRepositoryImpl, argoK8sClientImpl, gitFactory, pipelineStrategyHistoryServiceImpl, configMapHistoryServiceImpl, deploymentTemplateHistoryServiceImpl, chartTemplateServiceImpl, refChartDir, chartRefRepositoryImpl, chartServiceImpl, helmAppClientImpl, argoUserServiceImpl, pipelineStatusTimelineRepositoryImpl, appCrudOperationServiceImpl, configMapHistoryRepositoryImpl, pipelineStrategyHistoryRepositoryImpl, deploymentTemplateHistoryRepositoryImpl, dockerRegistryIpsConfigServiceImpl, pipelineStatusTimelineResourcesServiceImpl, pipelineStatusSyncDetailServiceImpl, pipelineStatusTimelineServiceImpl, appServiceConfig, gitOpsConfigRepositoryImpl, appStatusServiceImpl, installedAppRepositoryImpl, appStoreDeploymentServiceImpl, k8sCommonServiceImpl, installedAppVersionHistoryRepositoryImpl, globalEnvVariables, helmAppServiceImpl, manifestPushConfigRepositoryImpl, gitOpsManifestPushServiceImpl, variableSnapshotHistoryServiceImpl, scopedVariableServiceImpl, variableEntityMappingServiceImpl, variableTemplateParserImpl, argoClientWrapperServiceImpl) + appServiceImpl := app2.NewAppService(envConfigOverrideRepositoryImpl, pipelineOverrideRepositoryImpl, mergeUtil, sugaredLogger, ciArtifactRepositoryImpl, pipelineRepositoryImpl, dbMigrationConfigRepositoryImpl, eventRESTClientImpl, eventSimpleFactoryImpl, applicationServiceClientImpl, tokenCache, acdAuthConfig, enforcerImpl, enforcerUtilImpl, userServiceImpl, appListingRepositoryImpl, appRepositoryImpl, environmentRepositoryImpl, pipelineConfigRepositoryImpl, configMapRepositoryImpl, appLevelMetricsRepositoryImpl, envLevelAppMetricsRepositoryImpl, chartRepositoryImpl, ciPipelineMaterialRepositoryImpl, cdWorkflowRepositoryImpl, commonServiceImpl, imageScanDeployInfoRepositoryImpl, imageScanHistoryRepositoryImpl, argoK8sClientImpl, gitFactory, pipelineStrategyHistoryServiceImpl, configMapHistoryServiceImpl, deploymentTemplateHistoryServiceImpl, chartTemplateServiceImpl, refChartDir, chartRefRepositoryImpl, chartServiceImpl, helmAppClientImpl, argoUserServiceImpl, pipelineStatusTimelineRepositoryImpl, appCrudOperationServiceImpl, configMapHistoryRepositoryImpl, pipelineStrategyHistoryRepositoryImpl, deploymentTemplateHistoryRepositoryImpl, dockerRegistryIpsConfigServiceImpl, pipelineStatusTimelineResourcesServiceImpl, pipelineStatusSyncDetailServiceImpl, pipelineStatusTimelineServiceImpl, appServiceConfig, gitOpsConfigRepositoryImpl, appStatusServiceImpl, installedAppRepositoryImpl, appStoreDeploymentServiceImpl, k8sCommonServiceImpl, installedAppVersionHistoryRepositoryImpl, globalEnvVariables, helmAppServiceImpl, manifestPushConfigRepositoryImpl, gitOpsManifestPushServiceImpl, variableSnapshotHistoryServiceImpl, scopedVariableServiceImpl, variableEntityMappingServiceImpl, variableTemplateParserImpl) validate, err := util.IntValidator() if err != nil { return nil, err @@ -446,10 +445,15 @@ func InitializeApp() (*App, error) { pipelineStageRepositoryImpl := repository11.NewPipelineStageRepository(sugaredLogger, db) globalPluginRepositoryImpl := repository12.NewGlobalPluginRepository(sugaredLogger, db) pipelineStageServiceImpl := pipeline.NewPipelineStageService(sugaredLogger, pipelineStageRepositoryImpl, globalPluginRepositoryImpl, pipelineRepositoryImpl, scopedVariableServiceImpl, variableEntityMappingServiceImpl, variableTemplateParserImpl) - workflowDagExecutorImpl := pipeline.NewWorkflowDagExecutorImpl(sugaredLogger, pipelineRepositoryImpl, cdWorkflowRepositoryImpl, pubSubClientServiceImpl, appServiceImpl, workflowServiceImpl, ciArtifactRepositoryImpl, ciPipelineRepositoryImpl, materialRepositoryImpl, pipelineOverrideRepositoryImpl, userServiceImpl, deploymentGroupRepositoryImpl, environmentRepositoryImpl, enforcerImpl, enforcerUtilImpl, tokenCache, acdAuthConfig, eventSimpleFactoryImpl, eventRESTClientImpl, cvePolicyRepositoryImpl, imageScanResultRepositoryImpl, appWorkflowRepositoryImpl, prePostCdScriptHistoryServiceImpl, argoUserServiceImpl, pipelineStatusTimelineRepositoryImpl, pipelineStatusTimelineServiceImpl, ciTemplateRepositoryImpl, ciWorkflowRepositoryImpl, appLabelRepositoryImpl, clientImpl, pipelineStageServiceImpl, k8sCommonServiceImpl, variableSnapshotHistoryServiceImpl, deploymentTemplateHistoryServiceImpl, configMapHistoryServiceImpl, pipelineStrategyHistoryServiceImpl, manifestPushConfigRepositoryImpl, gitOpsManifestPushServiceImpl, ciPipelineMaterialRepositoryImpl, imageScanHistoryRepositoryImpl, imageScanDeployInfoRepositoryImpl, appCrudOperationServiceImpl, pipelineConfigRepositoryImpl, dockerRegistryIpsConfigServiceImpl, chartRepositoryImpl, chartTemplateServiceImpl, pipelineStrategyHistoryRepositoryImpl, appRepositoryImpl, deploymentTemplateHistoryRepositoryImpl, argoK8sClientImpl, configMapRepositoryImpl, configMapHistoryRepositoryImpl, refChartDir, helmAppServiceImpl, helmAppClientImpl, chartRefRepositoryImpl, envConfigOverrideRepositoryImpl, appLevelMetricsRepositoryImpl, envLevelAppMetricsRepositoryImpl, dbMigrationConfigRepositoryImpl, mergeUtil, gitOpsConfigRepositoryImpl, gitFactory, applicationServiceClientImpl, variableEntityMappingServiceImpl, variableTemplateParserImpl, argoClientWrapperServiceImpl, scopedVariableServiceImpl) + globalPluginServiceImpl := plugin.NewGlobalPluginService(sugaredLogger, globalPluginRepositoryImpl) + dockerRegistryConfigImpl := pipeline.NewDockerRegistryConfigImpl(sugaredLogger, helmAppServiceImpl, dockerArtifactStoreRepositoryImpl, dockerRegistryIpsConfigRepositoryImpl, ociRegistryConfigRepositoryImpl) + imageTagRepositoryImpl := repository.NewImageTagRepository(db, sugaredLogger) + customTagServiceImpl := pkg.NewCustomTagService(sugaredLogger, imageTagRepositoryImpl) + pluginInputVariableParserImpl := pipeline.NewPluginInputVariableParserImpl(sugaredLogger, dockerRegistryConfigImpl, customTagServiceImpl) + workflowDagExecutorImpl := pipeline.NewWorkflowDagExecutorImpl(sugaredLogger, pipelineRepositoryImpl, cdWorkflowRepositoryImpl, pubSubClientServiceImpl, appServiceImpl, workflowServiceImpl, ciArtifactRepositoryImpl, ciPipelineRepositoryImpl, materialRepositoryImpl, pipelineOverrideRepositoryImpl, userServiceImpl, deploymentGroupRepositoryImpl, environmentRepositoryImpl, enforcerImpl, enforcerUtilImpl, tokenCache, acdAuthConfig, eventSimpleFactoryImpl, eventRESTClientImpl, cvePolicyRepositoryImpl, imageScanResultRepositoryImpl, appWorkflowRepositoryImpl, prePostCdScriptHistoryServiceImpl, argoUserServiceImpl, pipelineStatusTimelineRepositoryImpl, pipelineStatusTimelineServiceImpl, ciTemplateRepositoryImpl, ciWorkflowRepositoryImpl, appLabelRepositoryImpl, clientImpl, pipelineStageServiceImpl, k8sCommonServiceImpl, variableSnapshotHistoryServiceImpl, globalPluginServiceImpl, pluginInputVariableParserImpl) deploymentGroupAppRepositoryImpl := repository.NewDeploymentGroupAppRepositoryImpl(sugaredLogger, db) deploymentGroupServiceImpl := deploymentGroup.NewDeploymentGroupServiceImpl(appRepositoryImpl, sugaredLogger, pipelineRepositoryImpl, ciPipelineRepositoryImpl, deploymentGroupRepositoryImpl, environmentRepositoryImpl, deploymentGroupAppRepositoryImpl, ciArtifactRepositoryImpl, appWorkflowRepositoryImpl, workflowDagExecutorImpl) - deploymentConfigServiceImpl := pipeline.NewDeploymentConfigServiceImpl(sugaredLogger, envConfigOverrideRepositoryImpl, chartRepositoryImpl, pipelineRepositoryImpl, envLevelAppMetricsRepositoryImpl, appLevelMetricsRepositoryImpl, pipelineConfigRepositoryImpl, configMapRepositoryImpl, configMapHistoryServiceImpl, chartRefRepositoryImpl, variableEntityMappingServiceImpl, scopedVariableServiceImpl, variableTemplateParserImpl) + deploymentConfigServiceImpl := pipeline.NewDeploymentConfigServiceImpl(sugaredLogger, envConfigOverrideRepositoryImpl, chartRepositoryImpl, pipelineRepositoryImpl, envLevelAppMetricsRepositoryImpl, appLevelMetricsRepositoryImpl, pipelineConfigRepositoryImpl, configMapRepositoryImpl, configMapHistoryServiceImpl, chartRefRepositoryImpl, variableEntityMappingServiceImpl, scopedVariableServiceImpl) pipelineTriggerRestHandlerImpl := restHandler.NewPipelineRestHandler(appServiceImpl, userServiceImpl, validate, enforcerImpl, teamServiceImpl, sugaredLogger, enforcerUtilImpl, workflowDagExecutorImpl, deploymentGroupServiceImpl, argoUserServiceImpl, deploymentConfigServiceImpl) sseSSE := sse.NewSSE() pipelineTriggerRouterImpl := router.NewPipelineTriggerRouter(pipelineTriggerRestHandlerImpl, sseSSE) @@ -464,8 +468,6 @@ func InitializeApp() (*App, error) { ciBuildConfigServiceImpl := pipeline.NewCiBuildConfigServiceImpl(sugaredLogger, ciBuildConfigRepositoryImpl) ciTemplateServiceImpl := pipeline.NewCiTemplateServiceImpl(sugaredLogger, ciBuildConfigServiceImpl, ciTemplateRepositoryImpl, ciTemplateOverrideRepositoryImpl) configMapServiceImpl := pipeline.NewConfigMapServiceImpl(chartRepositoryImpl, sugaredLogger, chartRepoRepositoryImpl, utilMergeUtil, pipelineConfigRepositoryImpl, configMapRepositoryImpl, envConfigOverrideRepositoryImpl, commonServiceImpl, appRepositoryImpl, configMapHistoryServiceImpl, environmentRepositoryImpl) - imageTagRepositoryImpl := repository.NewImageTagRepository(db, sugaredLogger) - customTagServiceImpl := pipeline.NewCustomTagService(sugaredLogger, imageTagRepositoryImpl) ciCdPipelineOrchestratorImpl := pipeline.NewCiCdPipelineOrchestrator(appRepositoryImpl, sugaredLogger, materialRepositoryImpl, pipelineRepositoryImpl, ciPipelineRepositoryImpl, ciPipelineMaterialRepositoryImpl, clientImpl, ciCdConfig, appWorkflowRepositoryImpl, environmentRepositoryImpl, attributesServiceImpl, appListingRepositoryImpl, appCrudOperationServiceImpl, userAuthServiceImpl, prePostCdScriptHistoryServiceImpl, prePostCiScriptHistoryServiceImpl, pipelineStageServiceImpl, ciTemplateOverrideRepositoryImpl, gitMaterialHistoryServiceImpl, ciPipelineHistoryServiceImpl, ciTemplateServiceImpl, dockerArtifactStoreRepositoryImpl, configMapServiceImpl, customTagServiceImpl, genericNoteServiceImpl) ecrConfig, err := pipeline.GetEcrConfig() if err != nil { @@ -487,7 +489,7 @@ func InitializeApp() (*App, error) { return nil, err } devtronAppCMCSServiceImpl := pipeline.NewDevtronAppCMCSServiceImpl(sugaredLogger, appServiceImpl, attributesRepositoryImpl) - cdPipelineConfigServiceImpl := pipeline.NewCdPipelineConfigServiceImpl(sugaredLogger, pipelineRepositoryImpl, environmentRepositoryImpl, pipelineConfigRepositoryImpl, appWorkflowRepositoryImpl, pipelineStageServiceImpl, appRepositoryImpl, appServiceImpl, deploymentGroupRepositoryImpl, ciCdPipelineOrchestratorImpl, appStatusRepositoryImpl, ciPipelineRepositoryImpl, prePostCdScriptHistoryServiceImpl, clusterRepositoryImpl, helmAppServiceImpl, enforcerUtilImpl, gitOpsConfigRepositoryImpl, pipelineStrategyHistoryServiceImpl, chartRepositoryImpl, resourceGroupServiceImpl, chartDeploymentServiceImpl, chartTemplateServiceImpl, propertiesConfigServiceImpl, appLevelMetricsRepositoryImpl, deploymentTemplateHistoryServiceImpl, variableEntityMappingServiceImpl, variableTemplateParserImpl, pipelineDeploymentServiceTypeConfig, applicationServiceClientImpl, devtronAppCMCSServiceImpl) + cdPipelineConfigServiceImpl := pipeline.NewCdPipelineConfigServiceImpl(sugaredLogger, pipelineRepositoryImpl, environmentRepositoryImpl, pipelineConfigRepositoryImpl, appWorkflowRepositoryImpl, pipelineStageServiceImpl, appRepositoryImpl, appServiceImpl, deploymentGroupRepositoryImpl, ciCdPipelineOrchestratorImpl, appStatusRepositoryImpl, ciPipelineRepositoryImpl, prePostCdScriptHistoryServiceImpl, clusterRepositoryImpl, helmAppServiceImpl, enforcerUtilImpl, gitOpsConfigRepositoryImpl, pipelineStrategyHistoryServiceImpl, chartRepositoryImpl, resourceGroupServiceImpl, chartDeploymentServiceImpl, chartTemplateServiceImpl, propertiesConfigServiceImpl, appLevelMetricsRepositoryImpl, deploymentTemplateHistoryServiceImpl, variableEntityMappingServiceImpl, variableTemplateParserImpl, pipelineDeploymentServiceTypeConfig, applicationServiceClientImpl, devtronAppCMCSServiceImpl, customTagServiceImpl) appArtifactManagerImpl := pipeline.NewAppArtifactManagerImpl(sugaredLogger, cdWorkflowRepositoryImpl, userServiceImpl, imageTaggingServiceImpl, ciArtifactRepositoryImpl, ciWorkflowRepositoryImpl, pipelineStageServiceImpl, cdPipelineConfigServiceImpl) globalStrategyMetadataChartRefMappingRepositoryImpl := chartRepoRepository.NewGlobalStrategyMetadataChartRefMappingRepositoryImpl(db, sugaredLogger) devtronAppStrategyServiceImpl := pipeline.NewDevtronAppStrategyServiceImpl(sugaredLogger, chartRepositoryImpl, globalStrategyMetadataChartRefMappingRepositoryImpl, ciCdPipelineOrchestratorImpl, cdPipelineConfigServiceImpl) @@ -502,14 +504,13 @@ func InitializeApp() (*App, error) { } ciHandlerImpl := pipeline.NewCiHandlerImpl(sugaredLogger, ciServiceImpl, ciPipelineMaterialRepositoryImpl, clientImpl, ciWorkflowRepositoryImpl, workflowServiceImpl, ciLogServiceImpl, ciArtifactRepositoryImpl, userServiceImpl, eventRESTClientImpl, eventSimpleFactoryImpl, ciPipelineRepositoryImpl, appListingRepositoryImpl, k8sUtil, pipelineRepositoryImpl, enforcerUtilImpl, resourceGroupServiceImpl, environmentRepositoryImpl, imageTaggingServiceImpl, appWorkflowRepositoryImpl, customTagServiceImpl, k8sCommonServiceImpl) gitRegistryConfigImpl := pipeline.NewGitRegistryConfigImpl(sugaredLogger, gitProviderRepositoryImpl, clientImpl) - dockerRegistryConfigImpl := pipeline.NewDockerRegistryConfigImpl(sugaredLogger, helmAppServiceImpl, dockerArtifactStoreRepositoryImpl, dockerRegistryIpsConfigRepositoryImpl, ociRegistryConfigRepositoryImpl) appListingViewBuilderImpl := app2.NewAppListingViewBuilderImpl(sugaredLogger) linkoutsRepositoryImpl := repository.NewLinkoutsRepositoryImpl(sugaredLogger, db) appListingServiceImpl := app2.NewAppListingServiceImpl(sugaredLogger, appListingRepositoryImpl, applicationServiceClientImpl, appRepositoryImpl, appListingViewBuilderImpl, pipelineRepositoryImpl, linkoutsRepositoryImpl, appLevelMetricsRepositoryImpl, envLevelAppMetricsRepositoryImpl, cdWorkflowRepositoryImpl, pipelineOverrideRepositoryImpl, environmentRepositoryImpl, argoUserServiceImpl, envConfigOverrideRepositoryImpl, chartRepositoryImpl, ciPipelineRepositoryImpl, dockerRegistryIpsConfigServiceImpl) deploymentEventHandlerImpl := app2.NewDeploymentEventHandlerImpl(sugaredLogger, appListingServiceImpl, eventRESTClientImpl, eventSimpleFactoryImpl) cdHandlerImpl := pipeline.NewCdHandlerImpl(sugaredLogger, userServiceImpl, cdWorkflowRepositoryImpl, ciLogServiceImpl, ciArtifactRepositoryImpl, ciPipelineMaterialRepositoryImpl, pipelineRepositoryImpl, environmentRepositoryImpl, ciWorkflowRepositoryImpl, helmAppServiceImpl, pipelineOverrideRepositoryImpl, workflowDagExecutorImpl, appListingServiceImpl, appListingRepositoryImpl, pipelineStatusTimelineRepositoryImpl, applicationServiceClientImpl, argoUserServiceImpl, deploymentEventHandlerImpl, eventRESTClientImpl, pipelineStatusTimelineResourcesServiceImpl, pipelineStatusSyncDetailServiceImpl, pipelineStatusTimelineServiceImpl, appServiceImpl, appStatusServiceImpl, enforcerUtilImpl, installedAppRepositoryImpl, installedAppVersionHistoryRepositoryImpl, appRepositoryImpl, resourceGroupServiceImpl, imageTaggingServiceImpl, k8sUtil, workflowServiceImpl) appWorkflowServiceImpl := appWorkflow2.NewAppWorkflowServiceImpl(sugaredLogger, appWorkflowRepositoryImpl, ciCdPipelineOrchestratorImpl, ciPipelineRepositoryImpl, pipelineRepositoryImpl, enforcerUtilImpl, resourceGroupServiceImpl) - appCloneServiceImpl := appClone.NewAppCloneServiceImpl(sugaredLogger, pipelineBuilderImpl, materialRepositoryImpl, chartServiceImpl, configMapServiceImpl, appWorkflowServiceImpl, appListingServiceImpl, propertiesConfigServiceImpl, ciTemplateOverrideRepositoryImpl, pipelineStageServiceImpl, ciTemplateServiceImpl, appRepositoryImpl, ciPipelineRepositoryImpl, pipelineRepositoryImpl, appWorkflowRepositoryImpl) + appCloneServiceImpl := appClone.NewAppCloneServiceImpl(sugaredLogger, pipelineBuilderImpl, materialRepositoryImpl, chartServiceImpl, configMapServiceImpl, appWorkflowServiceImpl, appListingServiceImpl, propertiesConfigServiceImpl, ciTemplateOverrideRepositoryImpl, pipelineStageServiceImpl, ciTemplateServiceImpl, appRepositoryImpl) deploymentTemplateRepositoryImpl := repository.NewDeploymentTemplateRepositoryImpl(db, sugaredLogger) deploymentTemplateServiceImpl := generateManifest.NewDeploymentTemplateServiceImpl(sugaredLogger, chartServiceImpl, appListingServiceImpl, appListingRepositoryImpl, deploymentTemplateRepositoryImpl, helmAppServiceImpl, chartRepositoryImpl, chartTemplateServiceImpl, helmAppClientImpl, k8sUtil, propertiesConfigServiceImpl, deploymentTemplateHistoryServiceImpl, environmentRepositoryImpl, appRepositoryImpl) imageScanObjectMetaRepositoryImpl := security.NewImageScanObjectMetaRepositoryImpl(db, sugaredLogger) @@ -744,7 +745,6 @@ func InitializeApp() (*App, error) { externalLinkServiceImpl := externalLink.NewExternalLinkServiceImpl(sugaredLogger, externalLinkMonitoringToolRepositoryImpl, externalLinkIdentifierMappingRepositoryImpl, externalLinkRepositoryImpl) externalLinkRestHandlerImpl := externalLink2.NewExternalLinkRestHandlerImpl(sugaredLogger, externalLinkServiceImpl, userServiceImpl, enforcerImpl, enforcerUtilImpl) externalLinkRouterImpl := externalLink2.NewExternalLinkRouterImpl(externalLinkRestHandlerImpl) - globalPluginServiceImpl := plugin.NewGlobalPluginService(sugaredLogger, globalPluginRepositoryImpl) globalPluginRestHandlerImpl := restHandler.NewGlobalPluginRestHandler(sugaredLogger, globalPluginServiceImpl, enforcerUtilImpl, enforcerImpl, pipelineBuilderImpl) globalPluginRouterImpl := router.NewGlobalPluginRouter(sugaredLogger, globalPluginRestHandlerImpl) moduleRestHandlerImpl := module2.NewModuleRestHandlerImpl(sugaredLogger, moduleServiceImpl, userServiceImpl, enforcerImpl, validate) From ee8fbbf0f1046c4f1e827397630a76fed3b84a36 Mon Sep 17 00:00:00 2001 From: ayushmaheshwari Date: Thu, 26 Oct 2023 11:19:33 +0530 Subject: [PATCH 061/143] Revert "Revert "Merge branch 'refactor-tag-generation' into custom-tag-cd"" This reverts commit 5d131dba6708bb7672b17cb2a5bf25fcce321cd9. --- .github/workflows/pr-issue-validator.yaml | 2 +- CHANGELOG/release-notes-v0.6.23.md | 74 + Wire.go | 17 +- api/bean/ConfigMapAndSecret.go | 8 + api/bean/ValuesOverrideRequest.go | 2 +- api/k8s/capacity/k8sCapacityRestHandler.go | 5 +- api/restHandler/CoreAppRestHandler.go | 96 +- api/restHandler/PipelineHistoryRestHandler.go | 18 +- api/restHandler/PipelineTriggerRestHandler.go | 8 +- .../app/BuildPipelineRestHandler.go | 3 +- .../app/DeploymentPipelineRestHandler.go | 13 +- api/router/pubsub/ApplicationStatusHandler.go | 2 +- charts/devtron/Chart.yaml | 4 +- charts/devtron/devtron-bom.yaml | 56 +- charts/devtron/templates/NOTES.txt | 2 + charts/devtron/templates/gitsensor.yaml | 160 ++ charts/devtron/templates/lens.yaml | 115 + charts/devtron/templates/migrator.yaml | 2 +- charts/devtron/templates/workflow.yaml | 843 ++++++- charts/devtron/values.yaml | 57 +- .../argocdServer/ArgoClientWrapperService.go | 39 + client/argocdServer/Version.go | 5 +- .../argocdServer/application/Application.go | 6 +- client/argocdServer/bean/bean.go | 3 + client/argocdServer/cluster/Cluster.go | 6 +- .../argocdServer/{ => connection}/Config.go | 2 +- .../{ => connection}/Connection.go | 2 +- client/argocdServer/{ => connection}/Tls.go | 2 +- client/argocdServer/{ => connection}/Token.go | 2 +- client/argocdServer/{ => connection}/proxy.go | 2 +- .../{ => connection}/proxy_test.go | 2 +- client/argocdServer/repository/Repository.go | 6 +- client/argocdServer/session/Session.go | 4 +- cmd/external-app/wire.go | 7 + cmd/external-app/wire_gen.go | 3 +- docs/SUMMARY.md | 4 +- docs/reference/glossary.md | 6 +- .../creating-application/git-material.md | 149 +- .../deploying-application/triggering-ci.md | 17 + .../integrations/build-and-deploy-ci-cd.md | 4 + docs/user-guide/use-cases/oci-pull.md | 73 + .../pipelineConfig/CiWorkflowRepository.go | 2 +- .../security/ImageScanDeployInfoRepository.go | 9 +- internal/util/MergeUtil.go | 105 +- manifests/install/devtron-installer.yaml | 2 +- manifests/installation-script | 73 +- manifests/release.txt | 2 +- manifests/version.txt | 2 +- manifests/yamls/dashboard.yaml | 2 +- manifests/yamls/devtron.yaml | 11 +- manifests/yamls/gitsensor.yaml | 4 +- manifests/yamls/kubelink.yaml | 2 +- manifests/yamls/kubewatch.yaml | 2 +- manifests/yamls/migrator.yaml | 6 +- manifests/yamls/notifier.yaml | 2 +- manifests/yamls/serviceaccount.yaml | 26 +- pkg/app/AppCrudOperationService.go | 9 +- pkg/app/AppService.go | 1913 +--------------- pkg/appClone/AppCloneService.go | 277 +-- .../AppStoreDeploymentFullModeService.go | 9 + .../service/AppStoreDeploymentService.go | 31 +- .../tool/AppStoreDeploymentHelmService.go | 7 + .../gitops/AppStoreDeploymentArgoCdService.go | 24 +- pkg/auth/UserAuthOidcHelper.go | 6 +- pkg/bean/app.go | 3 + pkg/chart/ChartService.go | 37 +- pkg/cluster/ClusterCronService.go | 2 +- pkg/cluster/ClusterService.go | 14 + pkg/cluster/ClusterServiceExtended.go | 22 +- pkg/cluster/repository/ClusterRepository.go | 11 + .../DeployementTemplateService.go | 67 +- pkg/k8s/K8sCommonService.go | 56 +- pkg/k8s/capacity/bean/bean.go | 1 - pkg/k8s/capacity/k8sCapacityService.go | 4 +- pkg/pipeline/BuildPipelineConfigService.go | 17 +- pkg/pipeline/CdHandler.go | 12 +- pkg/pipeline/CiCdConfig.go | 44 + pkg/pipeline/CiCdPipelineOrchestrator.go | 38 +- pkg/pipeline/CiHandler.go | 11 +- pkg/pipeline/CiService.go | 13 +- pkg/{ => pipeline}/CustomTagService.go | 78 +- pkg/pipeline/DeploymentConfigService.go | 58 +- .../DeploymentPipelineConfigService.go | 59 +- pkg/pipeline/PipelineStageService.go | 12 +- pkg/pipeline/WebhookService.go | 5 +- pkg/pipeline/WorkflowDagExecutor.go | 2005 ++++++++++++++++- pkg/pipeline/WorkflowService.go | 34 +- pkg/pipeline/WorkflowUtils.go | 19 +- pkg/pipeline/bean/CustomTagService.go | 25 + .../DeployedConfigurationHistoryService.go | 19 +- .../DeploymentTemplateHistoryService.go | 79 +- pkg/pipeline/history/bean.go | 20 +- .../repository/PipelineStageRepository.go | 2 +- pkg/resourceQualifiers/bean.go | 3 + pkg/resourceQualifiers/constants.go | 10 +- pkg/user/casbin/rbac.go | 7 +- pkg/util/artifact-utils.go | 12 + pkg/variables/ScopedVariableService.go | 79 +- pkg/variables/ScopedVariableValidator.go | 5 + pkg/variables/models/variable-payload.go | 17 + .../parsers/VariableTemplateParser.go | 70 +- pkg/variables/parsers/bean.go | 24 +- .../repository/ScopedVariableRepository.go | 16 + releasenotes.md | 98 +- .../cronjob-chart_1-2-0/schema.json | 276 ++- .../deployment-chart_1-0-0/schema.json | 216 +- .../deployment-chart_1-0-0/values.yaml | 6 +- .../deployment-chart_1-1-0/schema.json | 234 +- .../deployment-chart_1-1-0/values.yaml | 6 +- .../deployment-chart_4-18-0/schema.json | 252 ++- .../reference-chart_3-10-0/schema.json | 216 +- .../reference-chart_3-11-0/schema.json | 216 +- .../reference-chart_3-12-0/schema.json | 216 +- .../reference-chart_3-13-0/schema.json | 216 +- .../reference-chart_3-9-0/schema.json | 216 +- .../reference-chart_4-10-0/schema.json | 216 +- .../reference-chart_4-11-0/schema.json | 216 +- .../reference-chart_4-12-0/schema.json | 216 +- .../reference-chart_4-13-0/schema.json | 216 +- .../reference-chart_4-14-0/schema.json | 216 +- .../reference-chart_4-14-0/values.yaml | 6 +- .../reference-chart_4-15-0/schema.json | 216 +- .../reference-chart_4-15-0/values.yaml | 6 +- .../reference-chart_4-16-0/schema.json | 216 +- .../reference-chart_4-16-0/values.yaml | 6 +- .../reference-chart_4-17-0/schema.json | 234 +- .../reference-chart_4-17-0/values.yaml | 6 +- .../reference-chart_4-18-0/schema.json | 246 +- .../reference-chart_5-0-0/schema.json | 246 +- .../statefulset-chart_4-18-0/schema.json | 246 +- .../statefulset-chart_4-18-0/values.yaml | 6 +- .../statefulset-chart_5-0-0/schema.json | 239 +- .../statefulset-chart_5-0-0/values.yaml | 6 +- .../workflow-chart_1-0-0/schema.json | 216 +- ...move_index_image_scan_deploy_info.down.sql | 2 + ...remove_index_image_scan_deploy_info.up.sql | 2 + ...down.sql => 182_custom_image_tag.down.sql} | 0 ...tag.up.sql => 182_custom_image_tag.up.sql} | 0 util/argo/ArgoUserService.go | 5 +- util/context-utils.go | 23 + util/rbac/EnforcerUtil.go | 93 +- util/rbac/EnforcerUtilHelm.go | 24 +- wire_gen.go | 74 +- 143 files changed, 9124 insertions(+), 3609 deletions(-) create mode 100644 CHANGELOG/release-notes-v0.6.23.md create mode 100644 charts/devtron/templates/gitsensor.yaml create mode 100644 charts/devtron/templates/lens.yaml create mode 100644 client/argocdServer/ArgoClientWrapperService.go create mode 100644 client/argocdServer/bean/bean.go rename client/argocdServer/{ => connection}/Config.go (97%) rename client/argocdServer/{ => connection}/Connection.go (99%) rename client/argocdServer/{ => connection}/Tls.go (99%) rename client/argocdServer/{ => connection}/Token.go (98%) rename client/argocdServer/{ => connection}/proxy.go (99%) rename client/argocdServer/{ => connection}/proxy_test.go (98%) create mode 100644 docs/user-guide/use-cases/oci-pull.md rename pkg/{ => pipeline}/CustomTagService.go (71%) create mode 100644 pkg/pipeline/bean/CustomTagService.go create mode 100644 pkg/util/artifact-utils.go create mode 100644 scripts/sql/181_remove_index_image_scan_deploy_info.down.sql create mode 100644 scripts/sql/181_remove_index_image_scan_deploy_info.up.sql rename scripts/sql/{177_custom_image_tag.down.sql => 182_custom_image_tag.down.sql} (100%) rename scripts/sql/{177_custom_image_tag.up.sql => 182_custom_image_tag.up.sql} (100%) create mode 100644 util/context-utils.go diff --git a/.github/workflows/pr-issue-validator.yaml b/.github/workflows/pr-issue-validator.yaml index 0ca7060cc4..55ee469178 100644 --- a/.github/workflows/pr-issue-validator.yaml +++ b/.github/workflows/pr-issue-validator.yaml @@ -39,7 +39,7 @@ jobs: TITLE: ${{ github.event.pull_request.title }} run: | set -x - if [[ "$TITLE" == *"doc:"* || "$TITLE" == *"docs:"* || "$TITLE" == *"chore:"* ]]; then + if [[ "$TITLE" == *"doc:"* || "$TITLE" == *"docs:"* || "$TITLE" == *"chore:"* || "$TITLE" == *"release:"* || "$TITLE" == *"Release:"* ]]; then echo "Skipping validation as this is a PR for documentation or chore." gh pr edit $PRNUM --remove-label "PR:Issue-verification-failed" gh pr edit $PRNUM --add-label "PR:Ready-to-Review" diff --git a/CHANGELOG/release-notes-v0.6.23.md b/CHANGELOG/release-notes-v0.6.23.md new file mode 100644 index 0000000000..e7cc215a5c --- /dev/null +++ b/CHANGELOG/release-notes-v0.6.23.md @@ -0,0 +1,74 @@ +## v0.6.23 + + + +## Bugs +- fix: DT19-v1 bug fixes (#3962) +- fix: ci pod request correction (#3980) +- fix: pipelineOverride id being sent instead of pipelineId (#3984) +- fix: Iam role handling script for plugin pull image from CR (#3955) +- fix: Deployment Template HCL parsing with % keyword (#4012) +- fix: handled releaseNotExists case for helm type cd pipeline resource tree fetch (#4016) +- fix: auto post cd not working in case of multiple parallel gitOps pipeline (#4018) +- fix: handled error in bulk trigger deploy (#4034) +- fix: The manager(non-admin user) of the application is unable to select a list of apps when assigning permissions (#4053) +- fix: ci job handling in app create api (#4054) +- fix: Deploying currently Active image using TriggerDeploy API from devtctl tool is broken (#4056) +- fix: Unable to delete ci pipeline in case you configure multi git (#4072) +- fix: env for specific deployment (#4085) +- fix: update build configuration fix (#4093) +- fix: Artifacts filter in CD trigger view (#4064) +- fix: Bugathon DT-19 version-2 fixes (#4105) +- fix: App Labels node selector not getting attach in ci-workflow (#4084) +- fix: Update cd pipeline create empty pre post cd steps (#4113) +- fix: normal Refresh after triggering gitops deployment to avoid sync delay in argo (#4066) +- fix: helm chart delete when no rows are found (#4124) +- fix: Unable to abort pre-cd and post-cd workflow (#4121) +- fix: Helm Apps permissions do not allow Terminal or Logs view (#4110) +- fix: port service mapping (#4132) +## Enhancements +- feat: Helm async install (#3856) +- feat: handle CI success event auto trigger in batch (#3951) +- feat: added env variable to skip gitops validation on create/update (#3956) +- feat: added flag to configure ecr repo creation (#3963) +- feat: Ability to change branch for all selected applications during bulk build from Application Groups (#3955) +- feat: Variables support in pre-post CI, CD and Jobs (#3911) +- feat: Poll Images from ECR Container Repository Plugin (#3971) +- feat: resource groups CRUD and environment filtering (#3974) +- feat: Scoped variables primitive handling (#4033) +- feat: adding DEVTRON_APP_NAME system variable for deployment template (#4041) +- feat: wf pod restart (#3892) +- feat: added deduction for system variables (#4075) +- feat: manifest comparision (#3844) +- feat: multiple images handling for single workflow for ECR Plugin Poll Images (#4027) +- feat: Jenkins plugin migration (#4039) +- feat: clone cd pipelines while cloning app across project (#4087) +## Documentation +- doc: Glossary of jargonish terms for layman in the context of Devtron (#3820) +- docs: Ephemeral Container Doc (#3912) +- docs: New Image Alignment in Ephemeral doc (#3959) +- docs: Snapshot updation in PVC docs + PreBuild CI-CD (#3964) +- doc: Fixed issuer url in okta docs (#4062) +- docs: Config Approval Draft (#3981) +- docs: Modified Existing Container Registry Doc (#4048) +- docs: Added OCI Pull in Usecases (#4112) +## Others +- chore: added workflow to escalate pager-duty issue (#3927) +- chore: changed loop from for to while (#3928) +- chore: scheduled escalate pager duty issue workflow (#3933) +- chore: added log config for dev mode (#3953) +- chore: minor correction in devtron reference charts (#3957) +- chore: workflow refactoring (#3714) +- chore: pr-issue-validator permissions fix (#3967) +- chore: added CODEOWNERS (#3966) +- chore: Scoped variable refactoring (#3977) +- chore: modified labels of keda autoscale object in deployment chart (#3999) +- chore: Update pr-issue-validator.yaml (#3854) +- chore: refactoring around PipelineBuilder (#4043) +- chore: moved k8s library to common-lib and added scripts for adding sshTunnel config to clusters (#3848) +- chore: Add pager-duty issue template (#3988) +- chore: first cut refactor ci-pipeline (#4091) +- chore: refactored appartifact manager and cimaterialconfigservice (#4096) +- chore: Remove the EnvVariablesFromFieldPath from values.yaml in refcharts (#4111) +- chore: Updated schema for Scope Variable (#4079) +- chore: skip validation for release PRs (#4128) diff --git a/Wire.go b/Wire.go index daa6421273..248c021b56 100644 --- a/Wire.go +++ b/Wire.go @@ -55,6 +55,7 @@ import ( "github.com/devtron-labs/devtron/client/argocdServer" "github.com/devtron-labs/devtron/client/argocdServer/application" cluster2 "github.com/devtron-labs/devtron/client/argocdServer/cluster" + "github.com/devtron-labs/devtron/client/argocdServer/connection" repository2 "github.com/devtron-labs/devtron/client/argocdServer/repository" session2 "github.com/devtron-labs/devtron/client/argocdServer/session" "github.com/devtron-labs/devtron/client/cron" @@ -79,7 +80,6 @@ import ( security2 "github.com/devtron-labs/devtron/internal/sql/repository/security" "github.com/devtron-labs/devtron/internal/util" "github.com/devtron-labs/devtron/internal/util/ArgoUtil" - "github.com/devtron-labs/devtron/pkg" "github.com/devtron-labs/devtron/pkg/app" "github.com/devtron-labs/devtron/pkg/app/status" "github.com/devtron-labs/devtron/pkg/appClone" @@ -172,10 +172,10 @@ func InitializeApp() (*App, error) { wire.Value(appStoreBean.RefChartProxyDir("scripts/devtron-reference-helm-charts")), wire.Value(chart.DefaultChart("reference-app-rolling")), wire.Value(util.ChartWorkingDir("/tmp/charts/")), - argocdServer.SettingsManager, + connection.SettingsManager, //auth.GetConfig, - argocdServer.GetConfig, + connection.GetConfig, wire.Bind(new(session2.ServiceClient), new(*middleware.LoginService)), sse.NewSSE, @@ -289,8 +289,8 @@ func InitializeApp() (*App, error) { repository.NewImageTagRepository, wire.Bind(new(repository.ImageTagRepository), new(*repository.ImageTagRepositoryImpl)), - pkg.NewCustomTagService, - wire.Bind(new(pkg.CustomTagService), new(*pkg.CustomTagServiceImpl)), + pipeline.NewCustomTagService, + wire.Bind(new(pipeline.CustomTagService), new(*pipeline.CustomTagServiceImpl)), repository.NewGitProviderRepositoryImpl, wire.Bind(new(repository.GitProviderRepository), new(*repository.GitProviderRepositoryImpl)), @@ -821,8 +821,8 @@ func InitializeApp() (*App, error) { wire.Bind(new(pipeline.PipelineStageService), new(*pipeline.PipelineStageServiceImpl)), //plugin ends - argocdServer.NewArgoCDConnectionManagerImpl, - wire.Bind(new(argocdServer.ArgoCDConnectionManager), new(*argocdServer.ArgoCDConnectionManagerImpl)), + connection.NewArgoCDConnectionManagerImpl, + wire.Bind(new(connection.ArgoCDConnectionManager), new(*connection.ArgoCDConnectionManagerImpl)), argo.NewArgoUserServiceImpl, wire.Bind(new(argo.ArgoUserService), new(*argo.ArgoUserServiceImpl)), util2.GetDevtronSecretName, @@ -951,6 +951,9 @@ func InitializeApp() (*App, error) { devtronResource.NewDevtronResourceSearchableKeyServiceImpl, wire.Bind(new(devtronResource.DevtronResourceService), new(*devtronResource.DevtronResourceSearchableKeyServiceImpl)), + argocdServer.NewArgoClientWrapperServiceImpl, + wire.Bind(new(argocdServer.ArgoClientWrapperService), new(*argocdServer.ArgoClientWrapperServiceImpl)), + pipeline.NewPluginInputVariableParserImpl, wire.Bind(new(pipeline.PluginInputVariableParser), new(*pipeline.PluginInputVariableParserImpl)), ) diff --git a/api/bean/ConfigMapAndSecret.go b/api/bean/ConfigMapAndSecret.go index d35d58b824..b600ecd5ef 100644 --- a/api/bean/ConfigMapAndSecret.go +++ b/api/bean/ConfigMapAndSecret.go @@ -19,6 +19,7 @@ package bean import ( "encoding/json" + "github.com/devtron-labs/devtron/util" ) type ConfigMapRootJson struct { @@ -61,3 +62,10 @@ func (configSecret ConfigSecretMap) GetDataMap() (map[string]string, error) { err := json.Unmarshal(configSecret.Data, &datamap) return datamap, err } +func (configSecretJson ConfigSecretJson) GetDereferencedSecrets() []ConfigSecretMap { + return util.GetDeReferencedArray(configSecretJson.Secrets) +} + +func (configSecretJson *ConfigSecretJson) SetReferencedSecrets(secrets []ConfigSecretMap) { + configSecretJson.Secrets = util.GetReferencedArray(secrets) +} diff --git a/api/bean/ValuesOverrideRequest.go b/api/bean/ValuesOverrideRequest.go index 5dc7b24682..7bd30e8082 100644 --- a/api/bean/ValuesOverrideRequest.go +++ b/api/bean/ValuesOverrideRequest.go @@ -72,7 +72,7 @@ type ValuesOverrideRequest struct { AppName string `json:"-"` PipelineName string `json:"-"` DeploymentAppType string `json:"-"` - ImageTag string `json:"-"` + Image string `json:"-"` } type BulkCdDeployEvent struct { diff --git a/api/k8s/capacity/k8sCapacityRestHandler.go b/api/k8s/capacity/k8sCapacityRestHandler.go index 8197408057..56d0714a1b 100644 --- a/api/k8s/capacity/k8sCapacityRestHandler.go +++ b/api/k8s/capacity/k8sCapacityRestHandler.go @@ -61,7 +61,7 @@ func (handler *K8sCapacityRestHandlerImpl) GetClusterListRaw(w http.ResponseWrit return } token := r.Header.Get("token") - clusters, err := handler.clusterService.FindAll() + clusters, err := handler.clusterService.FindAllExceptVirtual() if err != nil { handler.logger.Errorw("error in getting all clusters", "err", err) common.WriteJsonResp(w, err, nil, http.StatusInternalServerError) @@ -83,7 +83,6 @@ func (handler *K8sCapacityRestHandlerImpl) GetClusterListRaw(w http.ResponseWrit Id: cluster.Id, Name: cluster.ClusterName, ErrorInConnection: cluster.ErrorInConnecting, - IsVirtualCluster: cluster.IsVirtualCluster, } clusterDetailList = append(clusterDetailList, clusterDetail) } @@ -102,7 +101,7 @@ func (handler *K8sCapacityRestHandlerImpl) GetClusterListWithDetail(w http.Respo return } token := r.Header.Get("token") - clusters, err := handler.clusterService.FindAll() + clusters, err := handler.clusterService.FindAllExceptVirtual() if err != nil { handler.logger.Errorw("error in getting all clusters", "err", err) common.WriteJsonResp(w, err, nil, http.StatusInternalServerError) diff --git a/api/restHandler/CoreAppRestHandler.go b/api/restHandler/CoreAppRestHandler.go index dce1a87c23..5427d8b0d7 100644 --- a/api/restHandler/CoreAppRestHandler.go +++ b/api/restHandler/CoreAppRestHandler.go @@ -1192,45 +1192,50 @@ func (handler CoreAppRestHandlerImpl) deleteApp(ctx context.Context, appId int, // delete all CD pipelines for app starts cdPipelines, err := handler.pipelineBuilder.GetCdPipelinesForApp(appId) - if err != nil { + if err != nil && err != pg.ErrNoRows { handler.logger.Errorw("service err, GetCdPipelines in DeleteApp", "err", err, "appId", appId) return err } - - for _, cdPipeline := range cdPipelines.Pipelines { - cdPipelineDeleteRequest := &bean.CDPatchRequest{ - AppId: appId, - UserId: userId, - Action: bean.CD_DELETE, - ForceDelete: true, - NonCascadeDelete: false, - Pipeline: cdPipeline, - } - _, err = handler.pipelineBuilder.PatchCdPipelines(cdPipelineDeleteRequest, ctx) - if err != nil { - handler.logger.Errorw("err in deleting cd pipeline in DeleteApp", "err", err, "payload", cdPipelineDeleteRequest) - return err + if err != pg.ErrNoRows { + for _, cdPipeline := range cdPipelines.Pipelines { + cdPipelineDeleteRequest := &bean.CDPatchRequest{ + AppId: appId, + UserId: userId, + Action: bean.CD_DELETE, + ForceDelete: true, + NonCascadeDelete: false, + Pipeline: cdPipeline, + } + _, err = handler.pipelineBuilder.PatchCdPipelines(cdPipelineDeleteRequest, ctx) + if err != nil { + handler.logger.Errorw("err in deleting cd pipeline in DeleteApp", "err", err, "payload", cdPipelineDeleteRequest) + return err + } } + } // delete all CD pipelines for app ends // delete all CI pipelines for app starts ciPipelines, err := handler.pipelineBuilder.GetCiPipeline(appId) - if err != nil { + if err != nil && err != pg.ErrNoRows { handler.logger.Errorw("service err, GetCiPipelines in DeleteApp", "err", err, "appId", appId) return err } - for _, ciPipeline := range ciPipelines.CiPipelines { - ciPipelineDeleteRequest := &bean.CiPatchRequest{ - AppId: appId, - UserId: userId, - Action: bean.DELETE, - CiPipeline: ciPipeline, - } - _, err := handler.pipelineBuilder.PatchCiPipeline(ciPipelineDeleteRequest) - if err != nil { - handler.logger.Errorw("err in deleting ci pipeline in DeleteApp", "err", err, "payload", ciPipelineDeleteRequest) - return err + if err != pg.ErrNoRows { + + for _, ciPipeline := range ciPipelines.CiPipelines { + ciPipelineDeleteRequest := &bean.CiPatchRequest{ + AppId: appId, + UserId: userId, + Action: bean.DELETE, + CiPipeline: ciPipeline, + } + _, err := handler.pipelineBuilder.PatchCiPipeline(ciPipelineDeleteRequest) + if err != nil { + handler.logger.Errorw("err in deleting ci pipeline in DeleteApp", "err", err, "payload", ciPipelineDeleteRequest) + return err + } } } // delete all CI pipelines for app ends @@ -1551,16 +1556,37 @@ func (handler CoreAppRestHandlerImpl) createWorkflows(ctx context.Context, appId //Creating workflow ends //Creating CI pipeline starts - ciPipelineId, err := handler.createCiPipeline(appId, userId, workflowId, workflow.CiPipeline) + ciPipeline, err := handler.createCiPipeline(appId, userId, workflowId, workflow.CiPipeline) if err != nil { + err1 := handler.appWorkflowService.DeleteAppWorkflow(workflowId, userId) + if err1 != nil { + handler.logger.Errorw("service err, DeleteAppWorkflow ") + return err1, http.StatusInternalServerError + } handler.logger.Errorw("err in saving ci pipelines", err, "appId", appId) return err, http.StatusInternalServerError } //Creating CI pipeline ends //Creating CD pipeline starts - err = handler.createCdPipelines(ctx, appId, userId, workflowId, ciPipelineId, workflow.CdPipelines) + err = handler.createCdPipelines(ctx, appId, userId, workflowId, ciPipeline.Id, workflow.CdPipelines) if err != nil { + ciPipelineDeleteRequest := &bean.CiPatchRequest{ + AppId: appId, + UserId: userId, + Action: bean.DELETE, + CiPipeline: ciPipeline, + } + _, err1 := handler.pipelineBuilder.PatchCiPipeline(ciPipelineDeleteRequest) + if err1 != nil { + handler.logger.Errorw("err in deleting ci pipeline in DeleteApp", "err", err, "payload", ciPipelineDeleteRequest) + return err1, http.StatusInternalServerError + } + err1 = handler.appWorkflowService.DeleteAppWorkflow(workflowId, userId) + if err1 != nil { + handler.logger.Errorw("service err, DeleteAppWorkflow ") + return err1, http.StatusInternalServerError + } handler.logger.Errorw("err in saving cd pipelines", err, "appId", appId) return err, http.StatusInternalServerError } @@ -1590,13 +1616,13 @@ func (handler CoreAppRestHandlerImpl) createWorkflowInDb(workflowName string, ap return savedAppWf.Id, nil } -func (handler CoreAppRestHandlerImpl) createCiPipeline(appId int, userId int32, workflowId int, ciPipelineData *appBean.CiPipelineDetails) (int, error) { +func (handler CoreAppRestHandlerImpl) createCiPipeline(appId int, userId int32, workflowId int, ciPipelineData *appBean.CiPipelineDetails) (*bean.CiPipeline, error) { // if ci pipeline is of external type, then throw error as we are not supporting it as of now if ciPipelineData.ParentCiPipeline == 0 && ciPipelineData.ParentAppId == 0 && ciPipelineData.IsExternal { err := errors.New("external ci pipeline creation is not supported yet") handler.logger.Error("external ci pipeline creation is not supported yet") - return 0, err + return nil, err } // build ci pipeline materials starts @@ -1613,13 +1639,13 @@ func (handler CoreAppRestHandlerImpl) createCiPipeline(appId int, userId int32, } if err != nil { handler.logger.Errorw("service err, FindByAppIdAndCheckoutPath in CreateWorkflows", "err", err, "appId", appId) - return 0, err + return nil, err } if gitMaterial == nil { err = errors.New("gitMaterial is nil") handler.logger.Errorw("gitMaterial is nil", "checkoutPath", ciMaterial.CheckoutPath) - return 0, err + return nil, err } ciMaterialRequest := &bean.CiMaterial{ @@ -1664,10 +1690,10 @@ func (handler CoreAppRestHandlerImpl) createCiPipeline(appId int, userId int32, res, err := handler.pipelineBuilder.PatchCiPipeline(ciPipelineRequest) if err != nil { handler.logger.Errorw("service err, PatchCiPipelines", "err", err, "appId", appId) - return 0, err + return nil, err } - return res.CiPipelines[0].Id, nil + return res.CiPipelines[0], nil } func (handler CoreAppRestHandlerImpl) createCdPipelines(ctx context.Context, appId int, userId int32, workflowId int, ciPipelineId int, cdPipelines []*appBean.CdPipelineDetails) error { diff --git a/api/restHandler/PipelineHistoryRestHandler.go b/api/restHandler/PipelineHistoryRestHandler.go index 376e88067e..a0e3edcd37 100644 --- a/api/restHandler/PipelineHistoryRestHandler.go +++ b/api/restHandler/PipelineHistoryRestHandler.go @@ -6,6 +6,7 @@ import ( history2 "github.com/devtron-labs/devtron/pkg/pipeline/history" "github.com/devtron-labs/devtron/pkg/user" "github.com/devtron-labs/devtron/pkg/user/casbin" + "github.com/devtron-labs/devtron/util" "github.com/devtron-labs/devtron/util/rbac" "github.com/gorilla/mux" "go.uber.org/zap" @@ -209,7 +210,11 @@ func (handler *PipelineHistoryRestHandlerImpl) FetchDeployedHistoryComponentDeta //RBAC END //checking if user has admin access userHasAdminAccess := handler.enforcer.Enforce(token, casbin.ResourceApplications, casbin.ActionUpdate, resourceName) - res, err := handler.deployedConfigurationHistoryService.GetDeployedHistoryComponentDetail(pipelineId, id, historyComponent, historyComponentName, userHasAdminAccess) + isSuperAdmin, _ := handler.userAuthService.IsSuperAdmin(int(userId)) + + ctx := r.Context() + ctx = util.SetSuperAdminInContext(ctx, isSuperAdmin) + res, err := handler.deployedConfigurationHistoryService.GetDeployedHistoryComponentDetail(ctx, pipelineId, id, historyComponent, historyComponentName, userHasAdminAccess) if err != nil { handler.logger.Errorw("service err, GetDeployedHistoryComponentDetail", "err", err, "pipelineId", pipelineId, "id", id) common.WriteJsonResp(w, err, nil, http.StatusInternalServerError) @@ -249,7 +254,10 @@ func (handler *PipelineHistoryRestHandlerImpl) GetAllDeployedConfigurationHistor //RBAC END //checking if user has admin access userHasAdminAccess := handler.enforcer.Enforce(token, casbin.ResourceApplications, casbin.ActionUpdate, resourceName) - res, err := handler.deployedConfigurationHistoryService.GetAllDeployedConfigurationByPipelineIdAndLatestWfrId(pipelineId, userHasAdminAccess) + isSuperAdmin, _ := handler.userAuthService.IsSuperAdmin(int(userId)) + ctx := r.Context() + ctx = util.SetSuperAdminInContext(ctx, isSuperAdmin) + res, err := handler.deployedConfigurationHistoryService.GetAllDeployedConfigurationByPipelineIdAndLatestWfrId(ctx, pipelineId, userHasAdminAccess) if err != nil { handler.logger.Errorw("service err, GetAllDeployedConfigurationByPipelineIdAndLatestWfrId", "err", err, "pipelineId", pipelineId) common.WriteJsonResp(w, err, nil, http.StatusInternalServerError) @@ -294,9 +302,13 @@ func (handler *PipelineHistoryRestHandlerImpl) GetAllDeployedConfigurationHistor return } //RBAC END + + isSuperAdmin, _ := handler.userAuthService.IsSuperAdmin(int(userId)) + ctx := r.Context() + ctx = util.SetSuperAdminInContext(ctx, isSuperAdmin) //checking if user has admin access userHasAdminAccess := handler.enforcer.Enforce(token, casbin.ResourceApplications, casbin.ActionUpdate, resourceName) - res, err := handler.deployedConfigurationHistoryService.GetAllDeployedConfigurationByPipelineIdAndWfrId(pipelineId, wfrId, userHasAdminAccess) + res, err := handler.deployedConfigurationHistoryService.GetAllDeployedConfigurationByPipelineIdAndWfrId(ctx, pipelineId, wfrId, userHasAdminAccess) if err != nil { handler.logger.Errorw("service err, GetAllDeployedConfigurationByPipelineIdAndWfrId", "err", err, "pipelineId", pipelineId, "wfrId", wfrId) common.WriteJsonResp(w, err, nil, http.StatusInternalServerError) diff --git a/api/restHandler/PipelineTriggerRestHandler.go b/api/restHandler/PipelineTriggerRestHandler.go index f5344bff68..bd6235c478 100644 --- a/api/restHandler/PipelineTriggerRestHandler.go +++ b/api/restHandler/PipelineTriggerRestHandler.go @@ -21,6 +21,8 @@ import ( "context" "encoding/json" "fmt" + "github.com/devtron-labs/devtron/pkg/app" + "github.com/devtron-labs/devtron/util" "github.com/gorilla/mux" "go.opentelemetry.io/otel" @@ -29,7 +31,6 @@ import ( "github.com/devtron-labs/devtron/api/bean" "github.com/devtron-labs/devtron/api/restHandler/common" - "github.com/devtron-labs/devtron/pkg/app" "github.com/devtron-labs/devtron/pkg/deploymentGroup" "github.com/devtron-labs/devtron/pkg/pipeline" "github.com/devtron-labs/devtron/pkg/team" @@ -352,9 +353,12 @@ func (handler PipelineTriggerRestHandlerImpl) GetAllLatestDeploymentConfiguratio return } //RBAC END + isSuperAdmin, _ := handler.userAuthService.IsSuperAdmin(int(userId)) + ctx := r.Context() + ctx = util.SetSuperAdminInContext(ctx, isSuperAdmin) //checking if user has admin access userHasAdminAccess := handler.enforcer.Enforce(token, casbin.ResourceApplications, casbin.ActionUpdate, resourceName) - allDeploymentconfig, err := handler.deploymentConfigService.GetLatestDeploymentConfigurationByPipelineId(pipelineId, userHasAdminAccess) + allDeploymentconfig, err := handler.deploymentConfigService.GetLatestDeploymentConfigurationByPipelineId(ctx, pipelineId, userHasAdminAccess) if err != nil { handler.logger.Errorw("error in getting latest deployment config, GetAllDeployedConfigurationHistoryForSpecificWfrIdForPipeline", "err", err, "pipelineId", pipelineId) common.WriteJsonResp(w, err, nil, http.StatusInternalServerError) diff --git a/api/restHandler/app/BuildPipelineRestHandler.go b/api/restHandler/app/BuildPipelineRestHandler.go index 59deb01c5e..ea5a2342a7 100644 --- a/api/restHandler/app/BuildPipelineRestHandler.go +++ b/api/restHandler/app/BuildPipelineRestHandler.go @@ -13,7 +13,6 @@ import ( "github.com/devtron-labs/devtron/internal/sql/repository/helper" "github.com/devtron-labs/devtron/internal/sql/repository/pipelineConfig" "github.com/devtron-labs/devtron/internal/util" - "github.com/devtron-labs/devtron/pkg" "github.com/devtron-labs/devtron/pkg/bean" "github.com/devtron-labs/devtron/pkg/pipeline" bean1 "github.com/devtron-labs/devtron/pkg/pipeline/bean" @@ -594,7 +593,7 @@ func (handler PipelineConfigRestHandlerImpl) TriggerCiPipeline(w http.ResponseWr //RBAC ENDS response := make(map[string]string) resp, err := handler.ciHandler.HandleCIManual(ciTriggerRequest) - if errors.Is(err, pkg.ErrImagePathInUse) { + if errors.Is(err, bean1.ErrImagePathInUse) { handler.Logger.Errorw("service err duplicate image tag, TriggerCiPipeline", "err", err, "payload", ciTriggerRequest) common.WriteJsonResp(w, err, response, http.StatusConflict) return diff --git a/api/restHandler/app/DeploymentPipelineRestHandler.go b/api/restHandler/app/DeploymentPipelineRestHandler.go index 1d614ff2a3..72d9f311a9 100644 --- a/api/restHandler/app/DeploymentPipelineRestHandler.go +++ b/api/restHandler/app/DeploymentPipelineRestHandler.go @@ -19,6 +19,7 @@ import ( resourceGroup2 "github.com/devtron-labs/devtron/pkg/resourceGroup" "github.com/devtron-labs/devtron/pkg/resourceQualifiers" "github.com/devtron-labs/devtron/pkg/user/casbin" + util2 "github.com/devtron-labs/devtron/util" "github.com/go-pg/pg" "github.com/gorilla/mux" "go.opentelemetry.io/otel" @@ -928,10 +929,21 @@ func (handler PipelineConfigRestHandlerImpl) GetDeploymentTemplateData(w http.Re common.WriteJsonResp(w, err, "unauthorized user", http.StatusForbidden) return } + + userId, err := handler.userAuthService.GetLoggedInUser(r) + if userId == 0 || err != nil { + handler.Logger.Errorw("request err, userId", "err", err, "payload", userId) + common.WriteJsonResp(w, err, "Unauthorized User", http.StatusUnauthorized) + return + } + isSuperAdmin, _ := handler.userAuthService.IsSuperAdmin(int(userId)) + //RBAC enforcer Ends ctx, cancel := context.WithTimeout(r.Context(), 60*time.Second) + ctx = util2.SetSuperAdminInContext(ctx, isSuperAdmin) defer cancel() + //TODO fix resp, err := handler.deploymentTemplateService.GetDeploymentTemplate(ctx, request) if err != nil { handler.Logger.Errorw("service err, GetEnvConfigOverride", "err", err, "payload", request) @@ -939,7 +951,6 @@ func (handler PipelineConfigRestHandlerImpl) GetDeploymentTemplateData(w http.Re return } common.WriteJsonResp(w, nil, resp, http.StatusOK) - } func (handler PipelineConfigRestHandlerImpl) GetDeploymentTemplate(w http.ResponseWriter, r *http.Request) { diff --git a/api/router/pubsub/ApplicationStatusHandler.go b/api/router/pubsub/ApplicationStatusHandler.go index cd27a60b6e..94378a3639 100644 --- a/api/router/pubsub/ApplicationStatusHandler.go +++ b/api/router/pubsub/ApplicationStatusHandler.go @@ -22,6 +22,7 @@ import ( "encoding/json" "errors" "fmt" + "github.com/devtron-labs/devtron/pkg/app" "time" "github.com/devtron-labs/devtron/internal/sql/repository/pipelineConfig" @@ -32,7 +33,6 @@ import ( v1alpha12 "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1" pubsub "github.com/devtron-labs/common-lib/pubsub-lib" - "github.com/devtron-labs/devtron/pkg/app" "github.com/devtron-labs/devtron/pkg/appStore/deployment/service" "github.com/devtron-labs/devtron/pkg/pipeline" "github.com/go-pg/pg" diff --git a/charts/devtron/Chart.yaml b/charts/devtron/Chart.yaml index 4dff6167c4..ef73f6ed80 100644 --- a/charts/devtron/Chart.yaml +++ b/charts/devtron/Chart.yaml @@ -1,6 +1,6 @@ apiVersion: v2 name: devtron-operator -appVersion: 0.6.22 +appVersion: 0.6.23 description: Chart to configure and install Devtron. Devtron is a Kubernetes Orchestration system. keywords: - Devtron @@ -11,7 +11,7 @@ keywords: - argocd - Hyperion engine: gotpl -version: 0.22.61 +version: 0.22.62 sources: - https://github.com/devtron-labs/charts dependencies: diff --git a/charts/devtron/devtron-bom.yaml b/charts/devtron/devtron-bom.yaml index d106022999..0af4a2b095 100644 --- a/charts/devtron/devtron-bom.yaml +++ b/charts/devtron/devtron-bom.yaml @@ -9,32 +9,38 @@ global: runAsNonRoot: true installer: - release: "v0.6.22" + release: "v0.6.23" image: "quay.io/devtron/inception" tag: "44b30917-185-13275" components: dashboard: - image: "quay.io/devtron/dashboard:12717798-325-16265" + image: "quay.io/devtron/dashboard:ba04f4f4-325-18824" config: extraConfigs: USE_V2: "true" ENABLE_BUILD_CONTEXT: "true" ENABLE_RESTART_WORKLOAD: "true" HIDE_EXCLUDE_INCLUDE_GIT_COMMITS: "false" + ENABLE_SCOPED_VARIABLES: "true" + ENABLE_CI_JOB: "true" devtron: - image: "quay.io/devtron/hyperion:3c1ba1ad-280-16262" - cicdImage: "quay.io/devtron/devtron:3c1ba1ad-434-16260" + image: "quay.io/devtron/hyperion:65577374-280-18804" + cicdImage: "quay.io/devtron/devtron:50ac85e6-434-18829" customOverrides: - DEFAULT_CI_IMAGE: "quay.io/devtron/ci-runner:d8d774c3-138-16238" + DEFAULT_CI_IMAGE: "quay.io/devtron/ci-runner:ad3af321-138-18662" argocdDexServer: image: "ghcr.io/dexidp/dex:v2.30.2" initContainer: authenticator: "quay.io/devtron/authenticator:e414faff-393-13273" kubelink: - image: "quay.io/devtron/kubelink:aefc1baf-318-16208" + image: "quay.io/devtron/kubelink:25052130-318-18795" configs: ENABLE_HELM_RELEASE_CACHE: "true" + MANIFEST_FETCH_BATCH_SIZE: "2" + NATS_MSG_PROCESSING_BATCH_SIZE: "1" + NATS_SERVER_HOST: nats://devtron-nats.devtroncd:4222 + RUN_HELM_INSTALL_IN_ASYNC_MODE: "true" PG_ADDR: postgresql-postgresql.devtroncd PG_DATABASE: orchestrator PG_LOG_QUERY: "true" @@ -47,7 +53,34 @@ components: image: "quay.io/devtron/postgres:11.9.0-debian-10-r26" armImage: "quay.io/devtron/postgres:11.9" gitsensor: - image: "quay.io/devtron/git-sensor:46b8f0f1-200-16195" + image: "quay.io/devtron/git-sensor:b6c3ea0e-200-16327" + imagePullPolicy: "IfNotPresent" + serviceMonitor: + enabled: false + persistence: + volumeSize: 2Gi + configs: + PG_ADDR: postgresql-postgresql.devtroncd + PG_USER: postgres + COMMIT_STATS_TIMEOUT_IN_SEC: "2" + ENABLE_FILE_STATS: "true" + dbconfig: + secretName: postgresql-postgresql + keyName: postgresql-password + lens: + image: "quay.io/devtron/lens:8803028b-333-16178" + imagePullPolicy: IfNotPresent + configs: + GIT_SENSOR_PROTOCOL: GRPC + GIT_SENSOR_URL: git-sensor-service.devtroncd:90 + NATS_SERVER_HOST: nats://devtron-nats.devtroncd:4222 + PG_ADDR: postgresql-postgresql.devtroncd + PG_PORT: "5432" + PG_USER: postgres + PG_DATABASE: lens + dbconfig: + secretName: postgresql-postgresql + keyName: postgresql-password migrator: image: "quay.io/devtron/migrator:v4.16.2" envVars: @@ -66,13 +99,14 @@ argo-cd: repository: quay.io/argoproj/argocd tag: "v2.5.2" imagePullPolicy: IfNotPresent - -lens: - image: "quay.io/devtron/lens:8803028b-333-16178" + +workflowController: + image: "quay.io/argoproj/workflow-controller:v3.4.3" + executorImage: "quay.io/argoproj/argoexec:v3.4.3" security: imageScanner: image: "quay.io/devtron/image-scanner:ea03b0af-334-15158" notifier: - image: "quay.io/devtron/notifier:d9c72180-372-14306" + image: "quay.io/devtron/notifier:d71bcbcd-372-18717" diff --git a/charts/devtron/templates/NOTES.txt b/charts/devtron/templates/NOTES.txt index ddf0fea2f9..0150709137 100644 --- a/charts/devtron/templates/NOTES.txt +++ b/charts/devtron/templates/NOTES.txt @@ -55,3 +55,5 @@ Please wait for ~1 minute before running any of the following commands. 2. "Applied" means installation is successful. {{- end }} + +Facing issues? Reach out to our team on Discord https://discord.devtron.ai for immediate assistance! diff --git a/charts/devtron/templates/gitsensor.yaml b/charts/devtron/templates/gitsensor.yaml new file mode 100644 index 0000000000..356dec9ee6 --- /dev/null +++ b/charts/devtron/templates/gitsensor.yaml @@ -0,0 +1,160 @@ +{{- if $.Values.installer.modules }} +{{- if has "cicd" $.Values.installer.modules }} +{{- with .Values.components.gitsensor }} +apiVersion: v1 +kind: Secret +metadata: + name: git-sensor-secret + labels: + app: git-sensor + release: devtron +{{- if .secrets }} +data: +{{- range $k, $v := .secrets }} + {{ $k }}: {{ $v | b64enc }} +{{- end }} +{{- end }} +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: git-sensor-cm + labels: + app: git-sensor + release: devtron +{{- if .configs }} +data: +{{ toYaml .configs | indent 2 }} +{{- end }} + +--- +# Source: gitsensor/templates/generic.yaml +apiVersion: v1 +kind: Service +metadata: + name: git-sensor-service + labels: + app: git-sensor + release: devtron +spec: + ports: + - name: sensor + port: 80 + protocol: TCP + targetPort: 8080 + - name: grpc + port: 90 + protocol: TCP + targetPort: 8081 + selector: + app: git-sensor +--- +# Source: gitsensor/templates/generic.yaml +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: git-sensor + labels: + release: devtron + app: git-sensor +spec: + selector: + matchLabels: + app: git-sensor # has to match .spec.template.metadata.labels + serviceName: git-sensor + replicas: 1 # by default is 1 + template: + metadata: + labels: + app: git-sensor + spec: + terminationGracePeriodSeconds: 10 + securityContext: + runAsGroup: 1000 + runAsUser: 1000 + initContainers: + - command: + - /bin/sh + - -c + - mkdir -p /git-base/ssh-keys && chown -R devtron:devtron /git-base && chmod 777 /git-base/ssh-keys + image: {{ .image }} + imagePullPolicy: IfNotPresent + name: chown-git-base + resources: {} + securityContext: + runAsUser: 0 + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /git-base/ + name: git-volume + containers: + - name: git-sensor + image: {{ .image }} + {{- if .imagePullPolicy }} + imagePullPolicy: {{ .imagePullPolicy }} + {{- end }} + securityContext: + allowPrivilegeEscalation: false + runAsUser: 1000 + runAsNonRoot: true + ports: + - containerPort: 8080 + name: sensor + - containerPort: 8081 + name: grpc + volumeMounts: + - name: git-volume + mountPath: /git-base/ + env: + - name: DEVTRON_APP_NAME + value: git-sensor + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + {{- if $.Values.components.gitsensor.dbconfig }} + - name: PG_PASSWORD + valueFrom: + secretKeyRef: + name: {{ .dbconfig.secretName }} + key: {{ .dbconfig.keyName }} + {{- end }} + envFrom: + - secretRef: + name: git-sensor-secret + - configMapRef: + name: git-sensor-cm + {{- if .resources }} + resources: + {{- toYaml .resources | nindent 12 }} + {{- end }} + volumeClaimTemplates: + - metadata: + name: git-volume + spec: + accessModes: [ "ReadWriteOnce" ] + resources: + requests: + storage: {{ .persistence.volumeSize }} +--- +{{- if .serviceMonitor.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: git-sensor-sm + labels: + app: git-sensor + kind: Prometheus + release: devtron +spec: + endpoints: + - port: app + path: /metrics + selector: + matchLabels: + app: git-sensor +{{- end }} +{{- end }} +{{- end }} +{{- end }} \ No newline at end of file diff --git a/charts/devtron/templates/lens.yaml b/charts/devtron/templates/lens.yaml new file mode 100644 index 0000000000..76328fb7dc --- /dev/null +++ b/charts/devtron/templates/lens.yaml @@ -0,0 +1,115 @@ +{{- if $.Values.installer.modules }} +{{- if has "cicd" $.Values.installer.modules }} +{{- with .Values.components.lens }} +apiVersion: v1 +kind: Secret +metadata: + name: lens-secret + labels: + app: lens + release: devtron +{{- if .secrets }} +data: +{{- range $k, $v := .secrets }} + {{ $k }}: {{ $v | b64enc }} +{{- end }} +{{- end }} +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: lens-cm + labels: + app: lens + release: devtron +{{- if .configs }} +data: +{{ toYaml .configs | indent 2 }} +{{- end }} +--- +apiVersion: v1 +kind: Service +metadata: + name: lens-service + labels: + app: lens + release: devtron +spec: + type: ClusterIP + ports: + - port: 80 + targetPort: app + protocol: TCP + name: app + selector: + app: lens +--- +# Source: lens/templates/deployment.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: lens + labels: + app: lens + release: devtron +spec: + selector: + matchLabels: + app: lens + release: devtron + replicas: 1 + minReadySeconds: 60 + template: + metadata: + labels: + app: lens + release: devtron + spec: + terminationGracePeriodSeconds: 30 + restartPolicy: Always + {{- if and $.Values.global $.Values.global.podSecurityContext }} + securityContext: +{{- toYaml $.Values.global.podSecurityContext | nindent 8 }} + {{- end }} + containers: + - name: lens + image: {{ .image }} + {{- if .imagePullPolicy }} + imagePullPolicy: {{ .imagePullPolicy }} + {{- end }} + {{- if and $.Values.global $.Values.global.containerSecurityContext }} + securityContext: +{{- toYaml $.Values.global.containerSecurityContext | nindent 12 }} + {{- end }} + ports: + - name: app + containerPort: 8080 + protocol: TCP + env: + - name: DEVTRON_APP_NAME + value: lens + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + {{- if .dbconfig }} + - name: PG_PASSWORD + valueFrom: + secretKeyRef: + name: {{ .dbconfig.secretName }} + key: {{ .dbconfig.keyName }} + {{- end }} + envFrom: + - configMapRef: + name: lens-cm + - secretRef: + name: lens-secret + {{- if .resources }} + resources: + {{- toYaml .resources | nindent 12 }} + {{- end }} + volumeMounts: [] + revisionHistoryLimit: 3 +{{- end }} +{{- end }} +{{- end }} \ No newline at end of file diff --git a/charts/devtron/templates/migrator.yaml b/charts/devtron/templates/migrator.yaml index 6a9b014380..f663ee208e 100644 --- a/charts/devtron/templates/migrator.yaml +++ b/charts/devtron/templates/migrator.yaml @@ -270,7 +270,7 @@ spec: - /bin/sh - -c - cp -r sql /shared/ - image: {{ $.Values.lens.image }} + image: {{ $.Values.components.lens.image }} name: init-lens {{- if and $.Values.global $.Values.global.containerSecurityContext }} securityContext: diff --git a/charts/devtron/templates/workflow.yaml b/charts/devtron/templates/workflow.yaml index 1548e27533..aeb8b66196 100644 --- a/charts/devtron/templates/workflow.yaml +++ b/charts/devtron/templates/workflow.yaml @@ -35,6 +35,8 @@ kind: CustomResourceDefinition metadata: name: workflows.argoproj.io spec: + conversion: + strategy: None group: argoproj.io names: kind: Workflow @@ -67,12 +69,12 @@ spec: type: object spec: type: object - x-kubernetes-preserve-unknown-fields: true x-kubernetes-map-type: atomic + x-kubernetes-preserve-unknown-fields: true status: type: object - x-kubernetes-preserve-unknown-fields: true x-kubernetes-map-type: atomic + x-kubernetes-preserve-unknown-fields: true required: - metadata - spec @@ -86,6 +88,8 @@ kind: CustomResourceDefinition metadata: name: workflowtemplates.argoproj.io spec: + conversion: + strategy: None group: argoproj.io names: kind: WorkflowTemplate @@ -116,6 +120,799 @@ spec: served: true storage: true --- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: workfloweventbindings.argoproj.io +spec: + conversion: + strategy: None + group: argoproj.io + names: + kind: WorkflowEventBinding + listKind: WorkflowEventBindingList + plural: workfloweventbindings + shortNames: + - wfeb + singular: workfloweventbinding + scope: Namespaced + versions: + - name: v1alpha1 + schema: + openAPIV3Schema: + properties: + apiVersion: + type: string + kind: + type: string + metadata: + type: object + spec: + type: object + x-kubernetes-map-type: atomic + x-kubernetes-preserve-unknown-fields: true + required: + - metadata + - spec + type: object + served: true + storage: true +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: workflowtasksets.argoproj.io +spec: + conversion: + strategy: None + group: argoproj.io + names: + kind: WorkflowTaskSet + listKind: WorkflowTaskSetList + plural: workflowtasksets + shortNames: + - wfts + singular: workflowtaskset + scope: Namespaced + versions: + - name: v1alpha1 + schema: + openAPIV3Schema: + properties: + apiVersion: + type: string + kind: + type: string + metadata: + type: object + spec: + type: object + x-kubernetes-map-type: atomic + x-kubernetes-preserve-unknown-fields: true + status: + type: object + x-kubernetes-map-type: atomic + x-kubernetes-preserve-unknown-fields: true + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: + status: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: workflowtaskresults.argoproj.io +spec: + group: argoproj.io + names: + kind: WorkflowTaskResult + listKind: WorkflowTaskResultList + plural: workflowtaskresults + singular: workflowtaskresult + scope: Namespaced + versions: + - name: v1alpha1 + schema: + openAPIV3Schema: + properties: + apiVersion: + type: string + kind: + type: string + message: + type: string + metadata: + type: object + outputs: + properties: + artifacts: + items: + properties: + archive: + properties: + none: + type: object + tar: + properties: + compressionLevel: + format: int32 + type: integer + type: object + zip: + type: object + type: object + archiveLogs: + type: boolean + artifactGC: + properties: + podMetadata: + properties: + annotations: + additionalProperties: + type: string + type: object + labels: + additionalProperties: + type: string + type: object + type: object + serviceAccountName: + type: string + strategy: + enum: + - "" + - OnWorkflowCompletion + - OnWorkflowDeletion + - Never + type: string + type: object + artifactory: + properties: + passwordSecret: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + url: + type: string + usernameSecret: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + required: + - url + type: object + azure: + properties: + accountKeySecret: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + blob: + type: string + container: + type: string + endpoint: + type: string + useSDKCreds: + type: boolean + required: + - blob + - container + - endpoint + type: object + deleted: + type: boolean + from: + type: string + fromExpression: + type: string + gcs: + properties: + bucket: + type: string + key: + type: string + serviceAccountKeySecret: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + required: + - key + type: object + git: + properties: + branch: + type: string + depth: + format: int64 + type: integer + disableSubmodules: + type: boolean + fetch: + items: + type: string + type: array + insecureIgnoreHostKey: + type: boolean + passwordSecret: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + repo: + type: string + revision: + type: string + singleBranch: + type: boolean + sshPrivateKeySecret: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + usernameSecret: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + required: + - repo + type: object + globalName: + type: string + hdfs: + properties: + addresses: + items: + type: string + type: array + force: + type: boolean + hdfsUser: + type: string + krbCCacheSecret: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + krbConfigConfigMap: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + krbKeytabSecret: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + krbRealm: + type: string + krbServicePrincipalName: + type: string + krbUsername: + type: string + path: + type: string + required: + - path + type: object + http: + properties: + auth: + properties: + basicAuth: + properties: + passwordSecret: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + usernameSecret: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + clientCert: + properties: + clientCertSecret: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + clientKeySecret: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + oauth2: + properties: + clientIDSecret: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + clientSecretSecret: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + endpointParams: + items: + properties: + key: + type: string + value: + type: string + required: + - key + type: object + type: array + scopes: + items: + type: string + type: array + tokenURLSecret: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + type: object + headers: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + url: + type: string + required: + - url + type: object + mode: + format: int32 + type: integer + name: + type: string + optional: + type: boolean + oss: + properties: + accessKeySecret: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + bucket: + type: string + createBucketIfNotPresent: + type: boolean + endpoint: + type: string + key: + type: string + lifecycleRule: + properties: + markDeletionAfterDays: + format: int32 + type: integer + markInfrequentAccessAfterDays: + format: int32 + type: integer + type: object + secretKeySecret: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + securityToken: + type: string + required: + - key + type: object + path: + type: string + raw: + properties: + data: + type: string + required: + - data + type: object + recurseMode: + type: boolean + s3: + properties: + accessKeySecret: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + bucket: + type: string + createBucketIfNotPresent: + properties: + objectLocking: + type: boolean + type: object + encryptionOptions: + properties: + enableEncryption: + type: boolean + kmsEncryptionContext: + type: string + kmsKeyId: + type: string + serverSideCustomerKeySecret: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + endpoint: + type: string + insecure: + type: boolean + key: + type: string + region: + type: string + roleARN: + type: string + secretKeySecret: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + useSDKCreds: + type: boolean + type: object + subPath: + type: string + required: + - name + type: object + type: array + exitCode: + type: string + parameters: + items: + properties: + default: + type: string + description: + type: string + enum: + items: + type: string + type: array + globalName: + type: string + name: + type: string + value: + type: string + valueFrom: + properties: + configMapKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + default: + type: string + event: + type: string + expression: + type: string + jqFilter: + type: string + jsonPath: + type: string + parameter: + type: string + path: + type: string + supplied: + type: object + type: object + required: + - name + type: object + type: array + result: + type: string + type: object + phase: + type: string + progress: + type: string + required: + - metadata + type: object + served: true + storage: true +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: workflowartifactgctasks.argoproj.io +spec: + conversion: + strategy: None + group: argoproj.io + names: + kind: WorkflowArtifactGCTask + listKind: WorkflowArtifactGCTaskList + plural: workflowartifactgctasks + shortNames: + - wfat + singular: workflowartifactgctask + scope: Namespaced + versions: + - name: v1alpha1 + schema: + openAPIV3Schema: + properties: + apiVersion: + type: string + kind: + type: string + metadata: + type: object + spec: + type: object + x-kubernetes-map-type: atomic + x-kubernetes-preserve-unknown-fields: true + status: + type: object + x-kubernetes-map-type: atomic + x-kubernetes-preserve-unknown-fields: true + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: + status: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: cronworkflows.argoproj.io +spec: + conversion: + strategy: None + group: argoproj.io + names: + kind: CronWorkflow + listKind: CronWorkflowList + plural: cronworkflows + shortNames: + - cwf + - cronwf + singular: cronworkflow + scope: Namespaced + versions: + - name: v1alpha1 + schema: + openAPIV3Schema: + properties: + apiVersion: + type: string + kind: + type: string + metadata: + type: object + spec: + type: object + x-kubernetes-map-type: atomic + x-kubernetes-preserve-unknown-fields: true + status: + type: object + x-kubernetes-map-type: atomic + x-kubernetes-preserve-unknown-fields: true + required: + - metadata + - spec + type: object + served: true + storage: true +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: clusterworkflowtemplates.argoproj.io +spec: + conversion: + strategy: None + group: argoproj.io + names: + kind: ClusterWorkflowTemplate + listKind: ClusterWorkflowTemplateList + plural: clusterworkflowtemplates + shortNames: + - clusterwftmpl + - cwft + singular: clusterworkflowtemplate + scope: Cluster + versions: + - name: v1alpha1 + schema: + openAPIV3Schema: + properties: + apiVersion: + type: string + kind: + type: string + metadata: + type: object + spec: + type: object + x-kubernetes-map-type: atomic + x-kubernetes-preserve-unknown-fields: true + required: + - metadata + - spec + type: object + served: true + storage: true +--- apiVersion: v1 kind: ServiceAccount metadata: @@ -228,15 +1025,6 @@ rules: - update - patch - delete -- apiGroups: - - argoproj.io - resources: - - workflowtemplates - - workflowtemplates/finalizers - verbs: - - get - - list - - watch - apiGroups: - "" resources: @@ -249,8 +1037,10 @@ rules: - "" resources: - persistentvolumeclaims + - persistentvolumeclaims/finalizers verbs: - create + - update - delete - get - apiGroups: @@ -258,6 +1048,9 @@ rules: resources: - workflows - workflows/finalizers + - workflowtasksets + - workflowtasksets/finalizers + - workflowartifactgctasks verbs: - get - list @@ -265,15 +1058,27 @@ rules: - update - patch - delete + - create - apiGroups: - argoproj.io resources: - workflowtemplates - workflowtemplates/finalizers + - clusterworkflowtemplates + - clusterworkflowtemplates/finalizers verbs: - get - list - watch +- apiGroups: + - argoproj.io + resources: + - workflowtaskresults + - workflowtaskresults/finalizers + verbs: + - list + - watch + - deletecollection - apiGroups: - "" resources: @@ -341,7 +1146,9 @@ data: parallelism: 50 artifactRepository: archiveLogs: false + {{- if not $.Values.workflowController.IMDSv2Enforced }} containerRuntimeExecutor: pns + {{- end }} executor: imagePullPolicy: Always kind: ConfigMap @@ -368,7 +1175,11 @@ spec: - --configmap - workflow-controller-configmap - --executor-image + {{- if $.Values.workflowController.IMDSv2Enforced }} - {{ $.Values.workflowController.executorImage }} + {{- else }} + - quay.io/argoproj/argoexec:v3.0.7 + {{- end }} command: - workflow-controller env: @@ -377,7 +1188,15 @@ spec: fieldRef: apiVersion: v1 fieldPath: metadata.name + {{- if $.Values.workflowController.IMDSv2Enforced }} + - name: POD_NAMES + value: v1 + {{- end }} + {{- if $.Values.workflowController.IMDSv2Enforced }} image: {{ $.Values.workflowController.image }} + {{- else }} + image: quay.io/argoproj/workflow-controller:v3.0.7 + {{- end }} name: workflow-controller {{- if $.Values.workflowController.resources }} resources: @@ -385,4 +1204,4 @@ spec: {{- end }} serviceAccountName: argo {{- end }} -{{- end }} \ No newline at end of file +{{- end }} diff --git a/charts/devtron/values.yaml b/charts/devtron/values.yaml index 6ff484957c..c9da3f93e6 100644 --- a/charts/devtron/values.yaml +++ b/charts/devtron/values.yaml @@ -10,7 +10,7 @@ global: installer: repo: "devtron-labs/devtron" # For Kubernetes version < 1.16, set release: legacy. You won't be able to upgrade Devtron unless you upgrade the K8s version to 1.16 or above. - release: "v0.6.22" #You can use a branch name or a release tag name as a release, for gitee as source only "main" is supported as of now + release: "v0.6.23" #You can use a branch name or a release tag name as a release, for gitee as source only "main" is supported as of now image: quay.io/devtron/inception tag: 44b30917-185-13275 source: "github" # Available options are github and gitee @@ -55,15 +55,17 @@ components: ENABLE_BUILD_CONTEXT: "true" ENABLE_RESTART_WORKLOAD: "true" HIDE_EXCLUDE_INCLUDE_GIT_COMMITS: "false" - image: "quay.io/devtron/dashboard:12717798-325-16265" + ENABLE_SCOPED_VARIABLES: "true" + ENABLE_CI_JOB: "true" + image: "quay.io/devtron/dashboard:ba04f4f4-325-18824" imagePullPolicy: IfNotPresent devtron: - image: "quay.io/devtron/hyperion:3c1ba1ad-280-16262" - cicdImage: "quay.io/devtron/devtron:3c1ba1ad-434-16260" + image: "quay.io/devtron/hyperion:65577374-280-18804" + cicdImage: "quay.io/devtron/devtron:50ac85e6-434-18829" imagePullPolicy: IfNotPresent customOverrides: - DEFAULT_CI_IMAGE: "quay.io/devtron/ci-runner:d8d774c3-138-16238" + DEFAULT_CI_IMAGE: "quay.io/devtron/ci-runner:ad3af321-138-18662" serviceMonitor: enabled: false service: @@ -93,10 +95,14 @@ components: authenticator: "quay.io/devtron/authenticator:e414faff-393-13273" kubelink: - image: "quay.io/devtron/kubelink:aefc1baf-318-16208" + image: "quay.io/devtron/kubelink:25052130-318-18795" imagePullPolicy: IfNotPresent configs: ENABLE_HELM_RELEASE_CACHE: "true" + MANIFEST_FETCH_BATCH_SIZE: "2" + NATS_MSG_PROCESSING_BATCH_SIZE: "1" + NATS_SERVER_HOST: nats://devtron-nats.devtroncd:4222 + RUN_HELM_INSTALL_IN_ASYNC_MODE: "true" PG_ADDR: postgresql-postgresql.devtroncd PG_DATABASE: orchestrator PG_LOG_QUERY: "true" @@ -113,7 +119,34 @@ components: persistence: volumeSize: "20Gi" gitsensor: - image: "quay.io/devtron/git-sensor:46b8f0f1-200-16195" + image: "quay.io/devtron/git-sensor:b6c3ea0e-200-16327" + imagePullPolicy: "IfNotPresent" + serviceMonitor: + enabled: false + persistence: + volumeSize: 2Gi + configs: + PG_ADDR: postgresql-postgresql.devtroncd + PG_USER: postgres + COMMIT_STATS_TIMEOUT_IN_SEC: "2" + ENABLE_FILE_STATS: "true" + dbconfig: + secretName: postgresql-postgresql + keyName: postgresql-password + lens: + image: "quay.io/devtron/lens:8803028b-333-16178" + imagePullPolicy: IfNotPresent + configs: + GIT_SENSOR_PROTOCOL: GRPC + GIT_SENSOR_URL: git-sensor-service.devtroncd:90 + NATS_SERVER_HOST: nats://devtron-nats.devtroncd:4222 + PG_ADDR: postgresql-postgresql.devtroncd + PG_PORT: "5432" + PG_USER: postgres + PG_DATABASE: lens + dbconfig: + secretName: postgresql-postgresql + keyName: postgresql-password migrator: image: "quay.io/devtron/migrator:v4.16.2" envVars: @@ -294,7 +327,7 @@ security: notifier: enabled: false imagePullPolicy: IfNotPresent - image: "quay.io/devtron/notifier:d9c72180-372-14306" + image: "quay.io/devtron/notifier:d71bcbcd-372-18717" configs: CD_ENVIRONMENT: PROD DB: orchestrator @@ -318,11 +351,11 @@ minio: storage: "50Gi" # Change below values for workflow controller workflowController: - image: "quay.io/argoproj/workflow-controller:v3.0.7" - executorImage: "quay.io/argoproj/argoexec:v3.0.7" + # Set this to true if you have IMDSv2 enforced or IMDSv1 and v2 on your AWS EKS cluster and false if you are using IMDSv1 with token hop limit set to 1 + IMDSv2Enforced: true + image: "quay.io/argoproj/workflow-controller:v3.4.3" + executorImage: "quay.io/argoproj/argoexec:v3.4.3" -lens: - image: "quay.io/devtron/lens:8803028b-333-16178" # Values for grafana integration monitoring: grafana: diff --git a/client/argocdServer/ArgoClientWrapperService.go b/client/argocdServer/ArgoClientWrapperService.go new file mode 100644 index 0000000000..93682ee20a --- /dev/null +++ b/client/argocdServer/ArgoClientWrapperService.go @@ -0,0 +1,39 @@ +package argocdServer + +import ( + "context" + application2 "github.com/argoproj/argo-cd/v2/pkg/apiclient/application" + "github.com/devtron-labs/devtron/client/argocdServer/application" + "github.com/devtron-labs/devtron/client/argocdServer/bean" + "go.uber.org/zap" +) + +type ArgoClientWrapperService interface { + GetArgoAppWithNormalRefresh(context context.Context, argoAppName string) error +} + +type ArgoClientWrapperServiceImpl struct { + logger *zap.SugaredLogger + acdClient application.ServiceClient +} + +func NewArgoClientWrapperServiceImpl(logger *zap.SugaredLogger, + acdClient application.ServiceClient, +) *ArgoClientWrapperServiceImpl { + return &ArgoClientWrapperServiceImpl{ + logger: logger, + acdClient: acdClient, + } +} + +func (impl *ArgoClientWrapperServiceImpl) GetArgoAppWithNormalRefresh(context context.Context, argoAppName string) error { + refreshType := bean.RefreshTypeNormal + impl.logger.Debugw("trying to normal refresh application through get ", "argoAppName", argoAppName) + _, err := impl.acdClient.Get(context, &application2.ApplicationQuery{Name: &argoAppName, Refresh: &refreshType}) + if err != nil { + impl.logger.Errorw("cannot get application with refresh", "app", argoAppName) + return err + } + impl.logger.Debugw("done getting the application with refresh with no error", "argoAppName", argoAppName) + return nil +} diff --git a/client/argocdServer/Version.go b/client/argocdServer/Version.go index ac1c4f3828..6bea985554 100644 --- a/client/argocdServer/Version.go +++ b/client/argocdServer/Version.go @@ -20,6 +20,7 @@ package argocdServer import ( "context" "github.com/argoproj/argo-cd/v2/pkg/apiclient/version" + "github.com/devtron-labs/devtron/client/argocdServer/connection" "github.com/golang/protobuf/ptypes/empty" "go.uber.org/zap" ) @@ -31,10 +32,10 @@ type VersionService interface { type VersionServiceImpl struct { logger *zap.SugaredLogger - argoCDConnectionManager ArgoCDConnectionManager + argoCDConnectionManager connection.ArgoCDConnectionManager } -func NewVersionServiceImpl(logger *zap.SugaredLogger, argoCDConnectionManager ArgoCDConnectionManager) *VersionServiceImpl { +func NewVersionServiceImpl(logger *zap.SugaredLogger, argoCDConnectionManager connection.ArgoCDConnectionManager) *VersionServiceImpl { return &VersionServiceImpl{logger: logger, argoCDConnectionManager: argoCDConnectionManager} } diff --git a/client/argocdServer/application/Application.go b/client/argocdServer/application/Application.go index 5206cf5f77..d35fa8b0be 100644 --- a/client/argocdServer/application/Application.go +++ b/client/argocdServer/application/Application.go @@ -23,13 +23,13 @@ import ( "errors" "fmt" "github.com/devtron-labs/devtron/api/restHandler/bean" + "github.com/devtron-labs/devtron/client/argocdServer/connection" "strings" "time" "github.com/argoproj/argo-cd/v2/pkg/apiclient/application" "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1" "github.com/argoproj/argo-cd/v2/reposerver/apiclient" - "github.com/devtron-labs/devtron/client/argocdServer" "github.com/devtron-labs/devtron/util" "go.uber.org/zap" "google.golang.org/grpc" @@ -116,11 +116,11 @@ type Manifests struct { type ServiceClientImpl struct { logger *zap.SugaredLogger - argoCDConnectionManager argocdServer.ArgoCDConnectionManager + argoCDConnectionManager connection.ArgoCDConnectionManager } func NewApplicationClientImpl( - logger *zap.SugaredLogger, argoCDConnectionManager argocdServer.ArgoCDConnectionManager, + logger *zap.SugaredLogger, argoCDConnectionManager connection.ArgoCDConnectionManager, ) *ServiceClientImpl { return &ServiceClientImpl{ logger: logger, diff --git a/client/argocdServer/bean/bean.go b/client/argocdServer/bean/bean.go new file mode 100644 index 0000000000..1a75a5c204 --- /dev/null +++ b/client/argocdServer/bean/bean.go @@ -0,0 +1,3 @@ +package bean + +const RefreshTypeNormal = "normal" diff --git a/client/argocdServer/cluster/Cluster.go b/client/argocdServer/cluster/Cluster.go index 5658f4760c..b9e1f0fc5f 100644 --- a/client/argocdServer/cluster/Cluster.go +++ b/client/argocdServer/cluster/Cluster.go @@ -22,7 +22,7 @@ import ( "errors" "github.com/argoproj/argo-cd/v2/pkg/apiclient/cluster" "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1" - "github.com/devtron-labs/devtron/client/argocdServer" + "github.com/devtron-labs/devtron/client/argocdServer/connection" "go.uber.org/zap" "time" ) @@ -44,10 +44,10 @@ type ServiceClient interface { type ServiceClientImpl struct { logger *zap.SugaredLogger - argoCdConnection argocdServer.ArgoCDConnectionManager + argoCdConnection connection.ArgoCDConnectionManager } -func NewServiceClientImpl(logger *zap.SugaredLogger, argoCdConnection argocdServer.ArgoCDConnectionManager) *ServiceClientImpl { +func NewServiceClientImpl(logger *zap.SugaredLogger, argoCdConnection connection.ArgoCDConnectionManager) *ServiceClientImpl { return &ServiceClientImpl{ logger: logger, argoCdConnection: argoCdConnection, diff --git a/client/argocdServer/Config.go b/client/argocdServer/connection/Config.go similarity index 97% rename from client/argocdServer/Config.go rename to client/argocdServer/connection/Config.go index d0f1333557..54805c1cbd 100644 --- a/client/argocdServer/Config.go +++ b/client/argocdServer/connection/Config.go @@ -15,7 +15,7 @@ * */ -package argocdServer +package connection import ( "github.com/caarlos0/env" diff --git a/client/argocdServer/Connection.go b/client/argocdServer/connection/Connection.go similarity index 99% rename from client/argocdServer/Connection.go rename to client/argocdServer/connection/Connection.go index c325268529..56e77c776f 100644 --- a/client/argocdServer/Connection.go +++ b/client/argocdServer/connection/Connection.go @@ -15,7 +15,7 @@ * */ -package argocdServer +package connection import ( "context" diff --git a/client/argocdServer/Tls.go b/client/argocdServer/connection/Tls.go similarity index 99% rename from client/argocdServer/Tls.go rename to client/argocdServer/connection/Tls.go index ab22560bba..a9d11826de 100644 --- a/client/argocdServer/Tls.go +++ b/client/argocdServer/connection/Tls.go @@ -15,7 +15,7 @@ * */ -package argocdServer +package connection import ( "crypto/ecdsa" diff --git a/client/argocdServer/Token.go b/client/argocdServer/connection/Token.go similarity index 98% rename from client/argocdServer/Token.go rename to client/argocdServer/connection/Token.go index fb194aa50e..437d3fd1d2 100644 --- a/client/argocdServer/Token.go +++ b/client/argocdServer/connection/Token.go @@ -15,7 +15,7 @@ * */ -package argocdServer +package connection import "context" diff --git a/client/argocdServer/proxy.go b/client/argocdServer/connection/proxy.go similarity index 99% rename from client/argocdServer/proxy.go rename to client/argocdServer/connection/proxy.go index 1e66d3dcef..54f44c5a51 100644 --- a/client/argocdServer/proxy.go +++ b/client/argocdServer/connection/proxy.go @@ -15,7 +15,7 @@ * */ -package argocdServer +package connection import ( "bytes" diff --git a/client/argocdServer/proxy_test.go b/client/argocdServer/connection/proxy_test.go similarity index 98% rename from client/argocdServer/proxy_test.go rename to client/argocdServer/connection/proxy_test.go index a1f4be9b7b..bc84026e13 100644 --- a/client/argocdServer/proxy_test.go +++ b/client/argocdServer/connection/proxy_test.go @@ -1,4 +1,4 @@ -package argocdServer +package connection import "testing" diff --git a/client/argocdServer/repository/Repository.go b/client/argocdServer/repository/Repository.go index 19930c6806..7b5013f3f3 100644 --- a/client/argocdServer/repository/Repository.go +++ b/client/argocdServer/repository/Repository.go @@ -23,8 +23,8 @@ import ( repository2 "github.com/argoproj/argo-cd/v2/pkg/apiclient/repository" "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1" "github.com/argoproj/argo-cd/v2/reposerver/apiclient" - "github.com/devtron-labs/devtron/client/argocdServer" "github.com/devtron-labs/devtron/client/argocdServer/application" + "github.com/devtron-labs/devtron/client/argocdServer/connection" "go.uber.org/zap" ) @@ -45,10 +45,10 @@ type ServiceClient interface { type ServiceClientImpl struct { logger *zap.SugaredLogger - argoCDConnectionManager argocdServer.ArgoCDConnectionManager + argoCDConnectionManager connection.ArgoCDConnectionManager } -func NewServiceClientImpl(logger *zap.SugaredLogger, argoCDConnectionManager argocdServer.ArgoCDConnectionManager) *ServiceClientImpl { +func NewServiceClientImpl(logger *zap.SugaredLogger, argoCDConnectionManager connection.ArgoCDConnectionManager) *ServiceClientImpl { return &ServiceClientImpl{ logger: logger, argoCDConnectionManager: argoCDConnectionManager, diff --git a/client/argocdServer/session/Session.go b/client/argocdServer/session/Session.go index f4b041e0d7..e5de2470f5 100644 --- a/client/argocdServer/session/Session.go +++ b/client/argocdServer/session/Session.go @@ -20,7 +20,7 @@ package session import ( "context" "github.com/argoproj/argo-cd/v2/pkg/apiclient/session" - "github.com/devtron-labs/devtron/client/argocdServer" + "github.com/devtron-labs/devtron/client/argocdServer/connection" "time" ) @@ -32,7 +32,7 @@ type ServiceClientImpl struct { ssc session.SessionServiceClient } -func NewSessionServiceClient(argoCDConnectionManager argocdServer.ArgoCDConnectionManager) *ServiceClientImpl { +func NewSessionServiceClient(argoCDConnectionManager connection.ArgoCDConnectionManager) *ServiceClientImpl { // this function only called when gitops configured and user ask for creating acd token conn := argoCDConnectionManager.GetConnection("") ssc := session.NewSessionServiceClient(conn) diff --git a/cmd/external-app/wire.go b/cmd/external-app/wire.go index 38fd34b681..d4b68e7b92 100644 --- a/cmd/external-app/wire.go +++ b/cmd/external-app/wire.go @@ -40,6 +40,7 @@ import ( "github.com/devtron-labs/devtron/internal/util" "github.com/devtron-labs/devtron/pkg/app" appStoreBean "github.com/devtron-labs/devtron/pkg/appStore/bean" + repository3 "github.com/devtron-labs/devtron/pkg/appStore/deployment/repository" appStoreDeploymentTool "github.com/devtron-labs/devtron/pkg/appStore/deployment/tool" appStoreDeploymentGitopsTool "github.com/devtron-labs/devtron/pkg/appStore/deployment/tool/gitops" "github.com/devtron-labs/devtron/pkg/attributes" @@ -199,6 +200,12 @@ func InitializeApp() (*App, error) { wire.Bind(new(dockerRegistryRepository.DockerRegistryIpsConfigRepository), new(*dockerRegistryRepository.DockerRegistryIpsConfigRepositoryImpl)), dockerRegistryRepository.NewOCIRegistryConfigRepositoryImpl, wire.Bind(new(dockerRegistryRepository.OCIRegistryConfigRepository), new(*dockerRegistryRepository.OCIRegistryConfigRepositoryImpl)), + + // chart group repository layer wire injection started + repository3.NewChartGroupDeploymentRepositoryImpl, + wire.Bind(new(repository3.ChartGroupDeploymentRepository), new(*repository3.ChartGroupDeploymentRepositoryImpl)), + // chart group repository layer wire injection ended + // end: docker registry wire set injection ) return &App{}, nil diff --git a/cmd/external-app/wire_gen.go b/cmd/external-app/wire_gen.go index 959fbf6239..b48223587e 100644 --- a/cmd/external-app/wire_gen.go +++ b/cmd/external-app/wire_gen.go @@ -274,6 +274,7 @@ func InitializeApp() (*App, error) { appStoreValuesServiceImpl := service2.NewAppStoreValuesServiceImpl(sugaredLogger, appStoreApplicationVersionRepositoryImpl, installedAppRepositoryImpl, appStoreVersionValuesRepositoryImpl, userServiceImpl) appStoreValuesRestHandlerImpl := appStoreValues.NewAppStoreValuesRestHandlerImpl(sugaredLogger, userServiceImpl, appStoreValuesServiceImpl) appStoreValuesRouterImpl := appStoreValues.NewAppStoreValuesRouterImpl(appStoreValuesRestHandlerImpl) + chartGroupDeploymentRepositoryImpl := repository4.NewChartGroupDeploymentRepositoryImpl(db, sugaredLogger) clusterInstalledAppsRepositoryImpl := repository4.NewClusterInstalledAppsRepositoryImpl(db, sugaredLogger) appStoreDeploymentHelmServiceImpl := appStoreDeploymentTool.NewAppStoreDeploymentHelmServiceImpl(sugaredLogger, helmAppServiceImpl, appStoreApplicationVersionRepositoryImpl, environmentRepositoryImpl, helmAppClientImpl, installedAppRepositoryImpl, appStoreDeploymentCommonServiceImpl, ociRegistryConfigRepositoryImpl) installedAppVersionHistoryRepositoryImpl := repository4.NewInstalledAppVersionHistoryRepositoryImpl(sugaredLogger, db) @@ -282,7 +283,7 @@ func InitializeApp() (*App, error) { return nil, err } pubSubClientServiceImpl := pubsub_lib.NewPubSubClientServiceImpl(sugaredLogger) - appStoreDeploymentServiceImpl := service3.NewAppStoreDeploymentServiceImpl(sugaredLogger, installedAppRepositoryImpl, appStoreApplicationVersionRepositoryImpl, environmentRepositoryImpl, clusterInstalledAppsRepositoryImpl, appRepositoryImpl, appStoreDeploymentHelmServiceImpl, appStoreDeploymentHelmServiceImpl, environmentServiceImpl, clusterServiceImpl, helmAppServiceImpl, appStoreDeploymentCommonServiceImpl, globalEnvVariables, installedAppVersionHistoryRepositoryImpl, gitOpsConfigRepositoryImpl, attributesServiceImpl, deploymentServiceTypeConfig, chartTemplateServiceImpl, pubSubClientServiceImpl) + appStoreDeploymentServiceImpl := service3.NewAppStoreDeploymentServiceImpl(sugaredLogger, installedAppRepositoryImpl, chartGroupDeploymentRepositoryImpl, appStoreApplicationVersionRepositoryImpl, environmentRepositoryImpl, clusterInstalledAppsRepositoryImpl, appRepositoryImpl, appStoreDeploymentHelmServiceImpl, appStoreDeploymentHelmServiceImpl, environmentServiceImpl, clusterServiceImpl, helmAppServiceImpl, appStoreDeploymentCommonServiceImpl, globalEnvVariables, installedAppVersionHistoryRepositoryImpl, gitOpsConfigRepositoryImpl, attributesServiceImpl, deploymentServiceTypeConfig, chartTemplateServiceImpl, pubSubClientServiceImpl) appStoreDeploymentRestHandlerImpl := appStoreDeployment.NewAppStoreDeploymentRestHandlerImpl(sugaredLogger, userServiceImpl, enforcerImpl, enforcerUtilImpl, enforcerUtilHelmImpl, appStoreDeploymentServiceImpl, validate, helmAppServiceImpl, appStoreDeploymentCommonServiceImpl, helmUserServiceImpl, attributesServiceImpl) appStoreDeploymentRouterImpl := appStoreDeployment.NewAppStoreDeploymentRouterImpl(appStoreDeploymentRestHandlerImpl) chartProviderServiceImpl := chartProvider.NewChartProviderServiceImpl(sugaredLogger, chartRepoRepositoryImpl, chartRepositoryServiceImpl, dockerArtifactStoreRepositoryImpl, ociRegistryConfigRepositoryImpl) diff --git a/docs/SUMMARY.md b/docs/SUMMARY.md index 8e3953c8e6..fb6df44e1e 100644 --- a/docs/SUMMARY.md +++ b/docs/SUMMARY.md @@ -114,7 +114,7 @@ * [Connect SpringBoot with Mysql Database](user-guide/use-cases/connect-springboot-with-mysql-database.md) * [Connect Expressjs With Mongodb Database](user-guide/use-cases/connect-expressjs-with-mongodb-database.md) * [Connect Django With Mysql Database](user-guide/use-cases/connect-django-with-mysql-database.md) + * [Pull Helm Charts from OCI Registry](user-guide/use-cases/oci-pull.md) * [Telemetry Overview](user-guide/telemetry.md) * [Devtron on Graviton](reference/graviton.md) -* [Release Notes](https://github.com/devtron-labs/devtron/releases) - +* [Release Notes](https://github.com/devtron-labs/devtron/releases) \ No newline at end of file diff --git a/docs/reference/glossary.md b/docs/reference/glossary.md index cd2c294503..16a82760a6 100644 --- a/docs/reference/glossary.md +++ b/docs/reference/glossary.md @@ -50,9 +50,11 @@ Kubernetes objects used to store configuration data as key-value pairs. They all You can use different ConfigMaps for respective environments too. [Read More...](https://docs.devtron.ai/usage/applications/creating-application/config-maps) -### Container Registry +### Container/OCI Registry -It is a collection of repositories that store container images. It allows developers to store, share, and manage images used to deploy containers. In Devtron, you can add a container registry by going to Global Configurations → Container / OCI Registry. Your CI images are pushed to the container registry you configure. [Read More...](https://docs.devtron.ai/global-configurations/docker-registries) +It is a collection of repositories that store container images. It allows developers to store, share, and manage images used to deploy containers. In Devtron, you can add a container registry by going to Global Configurations → Container / OCI Registry. Your CI images are pushed to the container registry you configure. [Read More...](https://docs.devtron.ai/global-configurations/container-registries). + +An OCI-compliant registry can also store artifacts (such as helm charts). Here, OCI stands for Open Container Initiative. It is an open industry standard for container formats and registries. ### Cordoning diff --git a/docs/user-guide/creating-application/git-material.md b/docs/user-guide/creating-application/git-material.md index 14d5e43ec0..a322a15bf0 100644 --- a/docs/user-guide/creating-application/git-material.md +++ b/docs/user-guide/creating-application/git-material.md @@ -1,67 +1,144 @@ # Git Repository -**Please configure Global configurations > Git Accounts to configure Git Repository is using private repo** +## Introduction -Git Repository is used to pull your application source code during the CI step. Select `Git Repository` section of the `App Configuration`. Inside `Git Repository` when you click on `Add Git Repository` you will see three options as shown below: +During the [CI process](https://docs.devtron.ai/usage/applications/deploying-application/triggering-ci), the application source code is pulled from your [git repository](https://docs.devtron.ai/resources/glossary#repo). -1. Git Account -2. Git Repo URL -3. Checkout Path +Devtron also supports multiple Git repositories (be it from one Git account or multiple Git accounts) in a single deployment. -Devtron also supports multiple git repositories in a single deployment. We will discuss this in detail in the multi git option [below](#5-multi-git). +![Figure 1: Adding Git Repository](https://devtron-public-asset.s3.us-east-2.amazonaws.com/images/creating-application/git-material/add-git-repo.jpg) -![](../../.gitbook/assets/create-app-git-account.gif) +Therefore, this doc is divided into 2 sections, read the one that caters to your application: +* [Single Repo Application](#single-repo-application) +* [Multi Repo Application](#multi-repo-application) -## 1. Git Account +--- -In this section, you have to select the git account of your code repository. If the authentication type of the Git account is anonymous, only public git repository will be accessible. If you are using a private git repository, you can configure your git provider via [git accounts](../global-configurations/git-accounts.md). +## Single Repo Application -## 2. Git Repo URL +Follow the below steps if the source code of your application is hosted on a single Git repository. -Inside the git repo URL, you have to provide your code repository’s URL. For Example- [https://github.com/devtron-labs/django-repo](https://github.com/devtron-labs/django-repo) +In your application, go to **App Configuration** → **Git Repository**. You will get the following fields and options: -You can find this URL by clicking on the '⤓ code' button on your git repository page. +1. [Git Account](#git-account) +2. [Git Repo URL](#git-repo-url) +3. (Checkboxes) + * [Exclude specific file/folder in this repo](#exclude-specific-filefolder-in-this-repo) + * [Set clone directory](#set-clone-directory) + * [Pull submodules recursively](#pull-submodules-recursively) -Note: -* Copy the HTTPS/SSH url of the repository -* Please make sure that you've added your [dockerfile](https://docs.docker.com/engine/reference/builder/) in the repo. +### Git Account -![](https://devtron-public-asset.s3.us-east-2.amazonaws.com/images/creating-application/git-material/git-repo-1.jpg) +This is a dropdown that shows the list of Git accounts added to your organization on Devtron. If you haven't done already, we recommend you to first [add your Git account](https://docs.devtron.ai/global-configurations/git-accounts) (especially when the repository is private). +![Figure 2: Selecting Git Account](https://devtron-public-asset.s3.us-east-2.amazonaws.com/images/creating-application/git-material/select-git-account.jpg) -## 3. Checkout Path +{% hint style="info" %} +If the authentication type of your Git account is anonymous, only public Git repositories in that account will be accessible. Whereas, adding a user auth or SSH key will make both public and private repositories accessible. +{% endhint %} -After clicking on checkbox, git checkout path field appears. The git checkout path is the directory where your code is pulled or cloned for the repository you specified in the previous step. -This field is optional in case of a single git repository application and you can leave the path as default. Devtron assigns a directory by itself when the field is left blank. The default value of this field is `./` +### Git Repo URL -![](https://devtron-public-asset.s3.us-east-2.amazonaws.com/images/creating-application/git-material/save-git-repo.jpg) +In this field, you have to provide your code repository’s URL, for e.g., `https://github.com/devtron-labs/django-repo`. -If you want to go with a multi-git approach, then you need to specify a separate path for each of your repositories. The first repository can be checked out at the default `./` path as explained above. But, for all the rest of the repositories, you need to ensure that you provide unique checkout paths. In failing to do so, you may cause Devtron to checkout multiple repositories in one directory and overwriting files from different repositories on each other. +You can find this URL by clicking on the **Code** button available on your repository page as shown below: -## 4. Pull Modules Recursively: +![Figure 3: Getting Repo URL](https://devtron-public-asset.s3.us-east-2.amazonaws.com/images/creating-application/git-material/repo-url.jpg) -This checkbox is optional and is used for pulling [git submodules](https://git-scm.com/book/en/v2/Git-Tools-Submodules) present in a repo. The submodules will be pulled recursively and same auth method which is used for parent repo will be used for submodules. +{% hint style="info" %} +* Copy the HTTPS/SSH portion of the URL too +* Make sure you've added your [Dockerfile](https://docs.docker.com/engine/reference/builder/) in the repo +{% endhint %} -## 5. Multi Git: -As we discussed, Devtron also supports multiple git repositories in a single application. To add multiple repositories, click on add repo and repeat steps 1 to 3. Repeat the process for every new git repository you add. Ensure that the checkout paths are unique for each. +### Exclude specific file/folder in this repo -Note: Even if you add multiple repositories, only one image will be created based on the docker file as shown in the [docker build config](docker-build-configuration.md). +Not all repository changes are worth triggering a new [CI build](https://docs.devtron.ai/usage/applications/deploying-application/triggering-ci). If you enable this checkbox, you can define the file(s) or folder(s) whose commits you wish to use in the CI build. -## **Why do we need Multi Git support-** +![Figure 4: Sample Exclusion Rule](https://devtron-public-asset.s3.us-east-2.amazonaws.com/images/creating-application/git-material/sample1.jpg) -Let’s look at this with an example: +In other words, if a given commit contains changes only in file(s) present in your exclusion rule, the commit won't show up while selecting the [Git material](https://docs.devtron.ai/resources/glossary#material), which means it will not be eligible for build. However, if a given commit contains changes in other files too (along with the excluded file), the commit won't be excluded and it will definitely show up in the list of commits. + +![Figure 5: Excludes commits made to README.md](https://devtron-public-asset.s3.us-east-2.amazonaws.com/images/creating-application/git-material/excluded-commit.jpg) + +Devtron allows you to create either an exclusion rule, an inclusion rule, or a combination of both. In case of multiple files or folders, you can list them in new lines. + +To exclude a path, use **!** as the prefix, e.g. `!path/to/file`
+To include a path, don't use any prefix, e.g. `path/to/file` + + +#### Examples + + +| Sample Values | Description | +|---|---| +| `!README.md` | **Exclusion of a single file in root folder:**
Commits containing changes made only in README.md file will not be shown | +| `!README.md`
`!index.js` | **Exclusion of multiple files in root folder:**
Commits containing changes made only in README.md or/and index.js files will not be shown | +| `README.md` | **Inclusion of a single file in root folder:**
Commits containing changes made only in README.md file will be shown. Rest all will be excluded. | +| `!src/extensions/printer/code2.py` | **Exclusion of a single file in a folder tree:**
Commits containing changes made specifically to code2.py file will not be shown | +| `!src/*` | **Exclusion of a single folder and all its files:**
Commits containing changes made specifically to files within src folder will not be shown | +| `!README.md`
`index.js` | **Exclusion and inclusion of files:**
Commits containing changes made only in README.md will not be shown, but commits made in index.js file will be shown. All other commits apart from the aforementioned files will be excluded. | +| `!README.md`
`README.md` | **Exclusion and inclusion of conflicting files:**
If conflicting paths are defined in the rule, the one defined later will be considered. In this case, commits containing changes made only in README.md will be shown. | + + +You may use the **Learn how** link (as shown below) to understand the syntax of defining an exclusion or inclusion rule. + +![Figure 6: 'Learn how' Button](https://devtron-public-asset.s3.us-east-2.amazonaws.com/images/creating-application/git-material/rules.jpg) + +Since file paths can be long, Devtron supports regex too for writing the paths. To understand it better, you may click the **How to use** link as shown below. + +![Figure 7: Regex Support](https://devtron-public-asset.s3.us-east-2.amazonaws.com/images/creating-application/git-material/regex-help.jpg) + +#### How to view excluded commits? + +As we saw earlier in fig. 4 and 5, commits containing the changes of only `README.md` file were not displayed, since the file was in the exclusion list. + +However, Devtron gives you the option to view the excluded commits too. There's a döner menu at the top-right (beside the `Search by commit hash` search bar). + +![Figure 8a: Döner Menu Icon](https://devtron-public-asset.s3.us-east-2.amazonaws.com/images/creating-application/git-material/doner-menu.jpg) + +![Figure 8b: Show Excluded Commits](https://devtron-public-asset.s3.us-east-2.amazonaws.com/images/creating-application/git-material/show-exclusions.jpg) -Due to security reasons, you may want to keep sensitive configurations like third party API keys in a separate access restricted git repositories and the source code in a git repository that every developer has access to. To deploy this application, code from both the repositories is required. A multi-git support will help you to do that. +![Figure 8c: Commits Unavailable for Build](https://devtron-public-asset.s3.us-east-2.amazonaws.com/images/creating-application/git-material/excluded-commits.jpg) -Few other examples, where you may want to have multiple repositories for your application and will need multi git checkout support: +The **EXCLUDED** label (in red) indicates that the commits contain changes made only to the excluded file, and hence they are unavailable for build. + + +### Set clone directory + +After clicking the checkbox, a field titled `clone directory path` appears. It is the directory where your code will be cloned for the repository you specified in the previous step. + +This field is optional for a single Git repository application and you can leave the path as default. Devtron assigns a directory by itself when the field is left blank. The default value of this field is `./` + +![Figure 8: Clone Directory Option](https://devtron-public-asset.s3.us-east-2.amazonaws.com/images/creating-application/git-material/clone-directory.jpg) + + +### Pull submodules recursively + +This checkbox is optional and is used for pulling [git submodules](https://git-scm.com/book/en/v2/Git-Tools-Submodules) present in a repo. The submodules will be pulled recursively, and the auth method used for the parent repo will be used for submodules too. + +--- + +## Multi Repo Application + +As discussed earlier, Devtron also supports multiple git repositories in a single application. To add multiple repositories, click **Add Git Repository** and repeat all the steps as mentioned in [Single Repo Application](#single-repo-application). However, ensure that the clone directory paths are unique for each repo. + +Repeat the process for every new git repository you add. The clone directory path is used by Devtron to assign a directory to each of your Git repositories. Devtron will clone your code at those locations and those paths can be referenced in the Docker file to create a Docker image of the application. + +Whenever a change is pushed to any of the configured repositories, CI will be triggered and a new Docker image file will be built (based on the latest commits of the configured repositories). Next, the image will be pushed to the container registry you configured in Devtron. + +{% hint style="info" %} +Even if you add multiple repositories, only one image will be created based on the Dockerfile as shown in the [docker build config](docker-build-configuration.md) +{% endhint %} + +### Why do you need Multi-Git support? + +Let’s look at this with an example: -* To make code modularize, you are keeping front-end and back-end code in different repositories. -* Common Library extracted out in different repo so that it can be used via multiple other projects. -* Due to security reasons you are keeping configuration files in different access restricted git repositories. +Due to security reasons, you want to keep sensitive configurations like third-party API keys in separate access-restricted git repositories, and the source code in a Git repository that every developer has access to. To deploy this application, code from both the repositories are required. A Multi-Git support helps you achieve it. -## **How Devtron's 'Checkout Path' Works** +Other examples where you might need Multi-Git support: -The checkout path is used by Devtron to assign a directory to each of your git repositories. Once you provide different checkout paths for your repositories, Devtron will clone your code at those locations and these checkout paths can be referenced in the docker file to create docker image for the application. -Whenever a change is pushed to any the configured repositories, the CI will be triggered and a new docker image file will be built based on the latest commits of the configured repositories and pushed to the container registry. \ No newline at end of file +* To make code modularized, where front-end and back-end code are in different repos +* Common library extracted out in a different repo so that other projects can use it \ No newline at end of file diff --git a/docs/user-guide/deploying-application/triggering-ci.md b/docs/user-guide/deploying-application/triggering-ci.md index 3a0909fc8c..fe6ae8f267 100644 --- a/docs/user-guide/deploying-application/triggering-ci.md +++ b/docs/user-guide/deploying-application/triggering-ci.md @@ -10,6 +10,21 @@ Once clicked, a list will appear showing various commits made in the repository, CI Pipelines with automatic trigger enabled are triggered immediately when a new commit is made to the git branch. If the trigger for a build pipeline is set to manual, it will not be automatically triggered and requires a manual trigger. +{% hint style="info" %} + +### Partal Cloning Feature [![](https://img.shields.io/badge/ENT-Devtron-blue)](https://devtron.ai/pricing) + +CI builds can be time-consuming for large repositories, especially for enterprises. However, Devtron's partial cloning feature significantly increases cloning speed, reducing the time it takes to clone your source code and leading to faster build times. + +**Advantages** +* Smaller image sizes +* Reduced resource usage and costs +* Faster software releases +* Improved productivity + +Get in touch with us if you are looking for a way to improve the efficiency of your software development process +{% endhint %} + The **Refresh** icon updates the Git Commits section in the CI Pipeline by fetching the latest commits from the repository. Clicking on the refresh icon ensures that you have the most recent commit available. The **Ignore Cache** option ignores the previous build cache and creates a fresh build. If selected, will take a longer build time than usual. @@ -32,3 +47,5 @@ To check for any vulnerabilities in the build image, click on `Security`. Please ![](https://devtron-public-asset.s3.us-east-2.amazonaws.com/images/deploying-application/triggering-ci/security-scan-report.jpg) + + diff --git a/docs/user-guide/integrations/build-and-deploy-ci-cd.md b/docs/user-guide/integrations/build-and-deploy-ci-cd.md index c1d8977205..6347adf5a8 100644 --- a/docs/user-guide/integrations/build-and-deploy-ci-cd.md +++ b/docs/user-guide/integrations/build-and-deploy-ci-cd.md @@ -14,6 +14,10 @@ Devtron CI/CD integration enables software development teams to automate the bui * Provides deployment metrics like; deployment frequency, lead time, change failure rate, and mean-time recovery. * Seamless integration with Grafana for continuous application metrics like CPU and memory usage, status code, throughput, and latency on the dashboard. +{% hint style="info" %} +Devtron also gives you the option of partial cloning. It increases the cloning speed of your [code repository](../../../docs/reference/glossary#repo), thus reducing the [build time](../../../docs/reference/glossary#build-pipeline) during the [CI process](../deploying-application/triggering-ci). +{% endhint %} + ## Installation 1. On the **Devtron Stack Manager > Discover** page, click the **Build and Deploy (CI/CD)**. diff --git a/docs/user-guide/use-cases/oci-pull.md b/docs/user-guide/use-cases/oci-pull.md new file mode 100644 index 0000000000..858eabdaf7 --- /dev/null +++ b/docs/user-guide/use-cases/oci-pull.md @@ -0,0 +1,73 @@ +# Pull Helm Charts from OCI Registry + +## Introduction + +Devtron supports the installation of [Helm charts](https://docs.devtron.ai/resources/glossary#helm-charts-packages) from both: Helm [repos](https://docs.devtron.ai/resources/glossary#repo) and [Container/OCI registries](https://docs.devtron.ai/resources/glossary#container-registry). Unlike Helm repos, OCI registries do not have an index file to discover all the charts. However, Devtron makes it easier for you to populate your charts from multiple sources to the [chart store](https://docs.devtron.ai/resources/glossary#chart-store). + +**Pre-requisites** + +* Helm Chart(s) +* OCI-compliant Registry (e.g. Docker Hub and [many more](https://docs.devtron.ai/global-configurations/container-registries#supported-registry-providers)) + +You must [add your OCI registry](https://docs.devtron.ai/global-configurations/container-registries) to Devtron with the `Use as chart repository` option enabled. + +--- + +## Tutorial + +{% embed url="https://www.youtube.com/watch?v=9imC5MMz9gs" caption="Pulling Charts from an OCI Registry to Devtron" %} + +--- + +## Populating your Charts to the Chart Store + +1. Go to **Global Configurations** → **Container/OCI Registry**. + +2. Search your OCI registry in the list, and click it. + +3. In the **List of repositories**, add the chart repo(s). The format should be `username/chartname`. You can find the username from your registry provider account. + + ![Figure 1: Adding Chart Repos](https://devtron-public-asset.s3.us-east-2.amazonaws.com/images/use-cases/oci-pull/container-config.jpg) + +4. Click **Save** or **Update**. + +5. From the left sidebar, go to **Chart Store**. + +6. You can find your chart(s) either by using the search bar or by selecting your chart source. + + ![Figure 2: Searching your Chart](https://devtron-public-asset.s3.us-east-2.amazonaws.com/images/use-cases/oci-pull/chart-search.jpg) + +You have successfully pulled your charts to the chart store. + +![Figure 3: Uploaded Helm Charts](https://devtron-public-asset.s3.us-east-2.amazonaws.com/images/use-cases/oci-pull/chart-list.jpg) + +### Unable to find your Charts? + +Deprecated charts won't show up in the Chart Store unless you enable the **Show deprecated charts** filter as shown below + +![Figure 4: Checking Deprecated Charts](https://devtron-public-asset.s3.us-east-2.amazonaws.com/images/use-cases/oci-pull/deprecated.jpg) + +Or, you may try performing a resync as shown below + +![Figure 5: Performing a Resync](https://devtron-public-asset.s3.us-east-2.amazonaws.com/images/use-cases/oci-pull/chart-sync.jpg) + +--- + + +## Removing your Chart from the Chart Store + +1. Go to your OCI registry settings in Devtron. + +2. In the **List of repositories** field, remove the unwanted chart repo. + + ![Figure 6: Removing a Chart Repo](https://devtron-public-asset.s3.us-east-2.amazonaws.com/images/use-cases/oci-pull/remove-chart-repo.jpg) + +3. Click **Update**. + +The removed chart would no longer appear in the Chart Store. + +{% hint style="info" %} +Deleting a chart repo from your OCI registry will not lead to the removal of chart from the Chart Store +{% endhint %} + + diff --git a/internal/sql/repository/pipelineConfig/CiWorkflowRepository.go b/internal/sql/repository/pipelineConfig/CiWorkflowRepository.go index 083be267d0..171cd872d4 100644 --- a/internal/sql/repository/pipelineConfig/CiWorkflowRepository.go +++ b/internal/sql/repository/pipelineConfig/CiWorkflowRepository.go @@ -73,7 +73,7 @@ type CiWorkflow struct { EnvironmentId int `sql:"environment_id"` ImagePathReservationId int `sql:"image_path_reservation_id"` ReferenceCiWorkflowId int `sql:"ref_ci_workflow_id"` - ParentCiWorkFlowId int `sql:"parent_ci_workflow_id"` + ParentCiWorkFlowId int `sql:"parent_ci_workflow_id"` CiPipeline *CiPipeline } diff --git a/internal/sql/repository/security/ImageScanDeployInfoRepository.go b/internal/sql/repository/security/ImageScanDeployInfoRepository.go index 6f421e774f..cea243d14b 100644 --- a/internal/sql/repository/security/ImageScanDeployInfoRepository.go +++ b/internal/sql/repository/security/ImageScanDeployInfoRepository.go @@ -145,13 +145,13 @@ func (impl ImageScanDeployInfoRepositoryImpl) FetchListingGroupByObject(size int } func (impl ImageScanDeployInfoRepositoryImpl) FetchByAppIdAndEnvId(appId int, envId int, objectType []string) (*ImageScanDeployInfo, error) { - var model ImageScanDeployInfo - err := impl.dbConnection.Model(&model). + var model *ImageScanDeployInfo + err := impl.dbConnection.Model(model). Where("scan_object_meta_id = ?", appId). Where("env_id = ?", envId).Where("object_type in (?)", pg.In(objectType)). Order("created_on desc").Limit(1). Select() - return &model, err + return model, err } func (impl ImageScanDeployInfoRepositoryImpl) FindByTypeMetaAndTypeId(scanObjectMetaId int, objectType string) (*ImageScanDeployInfo, error) { @@ -185,7 +185,8 @@ func (impl ImageScanDeployInfoRepositoryImpl) scanListQueryWithoutObject(request } query = query + " INNER JOIN environment env on env.id=info.env_id" query = query + " INNER JOIN cluster clus on clus.id=env.cluster_id" - query = query + " WHERE info.scan_object_meta_id > 0 and env.active=true and info.image_scan_execution_history_id[1] != -1" + query = query + " LEFT JOIN app ap on ap.id = info.scan_object_meta_id and info.object_type='app' WHERE ap.active=true" + query = query + " AND info.scan_object_meta_id > 0 and env.active=true and info.image_scan_execution_history_id[1] != -1 " if len(deployInfoIds) > 0 { ids := strings.Trim(strings.Join(strings.Fields(fmt.Sprint(deployInfoIds)), ","), "[]") query = query + " AND info.id IN (" + ids + ")" diff --git a/internal/util/MergeUtil.go b/internal/util/MergeUtil.go index df6ca22fd4..3f252bb9d1 100644 --- a/internal/util/MergeUtil.go +++ b/internal/util/MergeUtil.go @@ -19,11 +19,11 @@ package util import ( "encoding/json" - "github.com/devtron-labs/devtron/api/bean" "github.com/devtron-labs/devtron/util" jsonpatch "github.com/evanphx/json-patch" "go.uber.org/zap" + "golang.org/x/exp/slices" ) type MergeUtil struct { @@ -99,8 +99,6 @@ func (m MergeUtil) ConfigMapMerge(appLevelConfigMapJson string, envLevelConfigMa appLevelConfigMap := bean.ConfigMapJson{} envLevelConfigMap := bean.ConfigMapJson{} configResponse := bean.ConfigMapJson{} - commonMaps := map[string]bean.ConfigSecretMap{} - var finalMaps []bean.ConfigSecretMap if appLevelConfigMapJson != "" { err = json.Unmarshal([]byte(appLevelConfigMapJson), &appLevelConfigMap) if err != nil { @@ -117,20 +115,7 @@ func (m MergeUtil) ConfigMapMerge(appLevelConfigMapJson string, envLevelConfigMa configResponse.Enabled = true } - for _, item := range envLevelConfigMap.Maps { - commonMaps[item.Name] = item - } - for _, item := range appLevelConfigMap.Maps { - if _, ok := commonMaps[item.Name]; ok { - //ignoring this value as override from configB - } else { - commonMaps[item.Name] = item - } - } - for _, v := range commonMaps { - finalMaps = append(finalMaps, v) - } - configResponse.Maps = finalMaps + configResponse.Maps = mergeConfigMapsAndSecrets(envLevelConfigMap.Maps, appLevelConfigMap.Maps) byteData, err := json.Marshal(configResponse) if err != nil { m.Logger.Debugw("error in marshal ", "err", err) @@ -140,67 +125,75 @@ func (m MergeUtil) ConfigMapMerge(appLevelConfigMapJson string, envLevelConfigMa func (m MergeUtil) ConfigSecretMerge(appLevelSecretJson string, envLevelSecretJson string, chartMajorVersion int, chartMinorVersion int, isJob bool) (data string, err error) { appLevelSecret := bean.ConfigSecretJson{} - envLevelSecret := bean.ConfigSecretJson{} - secretResponse := bean.ConfigSecretJson{} - commonSecrets := map[string]*bean.ConfigSecretMap{} - var finalMaps []*bean.ConfigSecretMap if appLevelSecretJson != "" { err = json.Unmarshal([]byte(appLevelSecretJson), &appLevelSecret) if err != nil { m.Logger.Debugw("error in Unmarshal ", "appLevelSecretJson", appLevelSecretJson, "envLevelSecretJson", envLevelSecretJson, "err", err) } } + envLevelSecret := bean.ConfigSecretJson{} if envLevelSecretJson != "" { err = json.Unmarshal([]byte(envLevelSecretJson), &envLevelSecret) if err != nil { m.Logger.Debugw("error in Unmarshal ", "appLevelSecretJson", appLevelSecretJson, "envLevelSecretJson", envLevelSecretJson, "err", err) } } + secretResponse := bean.ConfigSecretJson{} if len(appLevelSecret.Secrets) > 0 || len(envLevelSecret.Secrets) > 0 { secretResponse.Enabled = true } - for _, item := range envLevelSecret.Secrets { - commonSecrets[item.Name] = item + finalCMCS := mergeConfigMapsAndSecrets(envLevelSecret.GetDereferencedSecrets(), appLevelSecret.GetDereferencedSecrets()) + for _, finalMap := range finalCMCS { + finalMap = m.processExternalSecrets(finalMap, chartMajorVersion, chartMinorVersion, isJob) } - for _, item := range appLevelSecret.Secrets { + secretResponse.SetReferencedSecrets(finalCMCS) + byteData, err := json.Marshal(secretResponse) + if err != nil { + m.Logger.Debugw("error in marshal ", "err", err) + } + return string(byteData), err +} + +func mergeConfigMapsAndSecrets(envLevelCMCS []bean.ConfigSecretMap, appLevelSecretCMCS []bean.ConfigSecretMap) []bean.ConfigSecretMap { + envCMCSNames := make([]string, 0) + var finalCMCS []bean.ConfigSecretMap + for _, item := range envLevelCMCS { + envCMCSNames = append(envCMCSNames, item.Name) + } + for _, item := range appLevelSecretCMCS { //else ignoring this value as override from configB - if _, ok := commonSecrets[item.Name]; !ok { - commonSecrets[item.Name] = item + if !slices.Contains(envCMCSNames, item.Name) { + finalCMCS = append(finalCMCS, item) } } + for _, item := range envLevelCMCS { + finalCMCS = append(finalCMCS, item) + } + return finalCMCS +} - for _, item := range commonSecrets { - if item.ExternalType == util.AWSSecretsManager || item.ExternalType == util.AWSSystemManager || item.ExternalType == util.HashiCorpVault { - if item.SecretData != nil && ((chartMajorVersion <= 3 && chartMinorVersion < 8) || isJob) { - var es []map[string]interface{} - esNew := make(map[string]interface{}) - err = json.Unmarshal(item.SecretData, &es) - if err != nil { - m.Logger.Debugw("error in Unmarshal ", "appLevelSecretJson", appLevelSecretJson, "envLevelSecretJson", envLevelSecretJson, "err", err) - } - for _, item := range es { - keyProp := item["name"].(string) - valueProp := item["key"] - esNew[keyProp] = valueProp - } - byteData, err := json.Marshal(esNew) - if err != nil { - m.Logger.Debugw("error in marshal ", "err", err) - } - item.Data = byteData - item.SecretData = nil +func (m MergeUtil) processExternalSecrets(secret bean.ConfigSecretMap, chartMajorVersion int, chartMinorVersion int, isJob bool) bean.ConfigSecretMap { + if secret.ExternalType == util.AWSSecretsManager || secret.ExternalType == util.AWSSystemManager || secret.ExternalType == util.HashiCorpVault { + if secret.SecretData != nil && ((chartMajorVersion <= 3 && chartMinorVersion < 8) || isJob) { + var es []map[string]interface{} + esNew := make(map[string]interface{}) + err := json.Unmarshal(secret.SecretData, &es) + if err != nil { + m.Logger.Debugw("error in Unmarshal ", "SecretData", secret.SecretData, "external secret", es, "err", err) } + for _, item := range es { + keyProp := item["name"].(string) + valueProp := item["key"] + esNew[keyProp] = valueProp + } + byteData, err := json.Marshal(esNew) + if err != nil { + m.Logger.Debugw("error in marshal ", "err", err) + } + secret.Data = byteData + secret.SecretData = nil } } - - for _, v := range commonSecrets { - finalMaps = append(finalMaps, v) - } - secretResponse.Secrets = finalMaps - byteData, err := json.Marshal(secretResponse) - if err != nil { - m.Logger.Debugw("error in marshal ", "err", err) - } - return string(byteData), err + return secret } diff --git a/manifests/install/devtron-installer.yaml b/manifests/install/devtron-installer.yaml index a035d78329..101cec01c3 100644 --- a/manifests/install/devtron-installer.yaml +++ b/manifests/install/devtron-installer.yaml @@ -4,4 +4,4 @@ metadata: name: installer-devtron namespace: devtroncd spec: - url: https://raw.githubusercontent.com/devtron-labs/devtron/v0.6.22/manifests/installation-script + url: https://raw.githubusercontent.com/devtron-labs/devtron/v0.6.23/manifests/installation-script diff --git a/manifests/installation-script b/manifests/installation-script index 3d9964e8f3..1c30ab92be 100644 --- a/manifests/installation-script +++ b/manifests/installation-script @@ -1,4 +1,4 @@ -LTAG="v0.6.22"; +LTAG="v0.6.23"; REPO_RAW_URL="https://raw.githubusercontent.com/devtron-labs/devtron/"; operatorSecret = kubectl get secret -n devtroncd devtron-operator-secret; @@ -60,11 +60,7 @@ if !defaultCacheBucket { ######Generating raw urls argocdResource_raw = REPO_RAW_URL + LTAG + "/manifests/yamls/argocd-resource.json"; devtronHousekeeping_raw = REPO_RAW_URL + LTAG + "/manifests/yamls/devtron-housekeeping.yaml"; -dashboard_raw = REPO_RAW_URL + LTAG + "/manifests/yamls/dashboard.yaml"; -gitSensor_raw = REPO_RAW_URL + LTAG + "/manifests/yamls/gitsensor.yaml"; -kubelink_raw = REPO_RAW_URL + LTAG + "/manifests/yamls/kubelink.yaml"; kubewatch_raw = REPO_RAW_URL + LTAG + "/manifests/yamls/kubewatch.yaml"; -lens_raw = REPO_RAW_URL + LTAG + "/manifests/yamls/lens.yaml"; natsServer_raw = REPO_RAW_URL + LTAG + "/manifests/yamls/nats-server.yaml"; devtron_raw = REPO_RAW_URL + LTAG + "/manifests/yamls/devtron.yaml"; devtronIngress_raw = REPO_RAW_URL + LTAG + "/manifests/yamls/devtron-ingress.yaml"; @@ -74,49 +70,33 @@ devtronIngress_raw = REPO_RAW_URL + LTAG + "/manifests/yamls/devtron-ingress-leg log(devtronIngress_raw); serviceAccount_raw = REPO_RAW_URL + LTAG + "/manifests/yamls/serviceaccount.yaml"; namespace_raw = REPO_RAW_URL + LTAG + "/manifests/yamls/namespace.yaml"; -rollout_raw = REPO_RAW_URL + LTAG + "/manifests/yamls/rollout.yaml"; ######Downloading the manifests argocdResource = download(argocdResource_raw); devtronHousekeeping = download(devtronHousekeeping_raw); -dashboard = download(dashboard_raw); -gitSensor = download(gitSensor_raw); -kubelink = download(kubelink_raw); kubewatch = download(kubewatch_raw); -lens = download(lens_raw); natsServer = download(natsServer_raw); devtron = download(devtron_raw); devtronIngress = download(devtronIngress_raw); serviceAccount = download(serviceAccount_raw); namespace = download(namespace_raw); -rollout = download(rollout_raw); ######Downloading the manifests devtronHousekeepingOverride = kubectl get cm -n devtroncd devtron-housekeeping-override-cm; -dashboardOverride = kubectl get cm -n devtroncd dashboard-override-cm; -gitSensorOverride = kubectl get cm -n devtroncd git-sensor-override-cm; -kubelinkOverride = kubectl get cm -n devtroncd kubelink-override-cm; kubewatchOverride = kubectl get cm -n devtroncd kubewatch-override-cm; -lensOverride = kubectl get cm -n devtroncd lens-override-cm; natsServerOverride = kubectl get cm -n devtroncd nats-server-override-cm; devtronOverride = kubectl get cm -n devtroncd devtron-override-cm; devtronIngressOverride = kubectl get cm -n devtroncd devtron-ingress-override-cm; serviceAccountOverride = kubectl get cm -n devtroncd devtron-service-account-override-cm; namespaceOverride = kubectl get cm -n devtroncd namespace-override-cm; -rolloutOverride = kubectl get cm -n devtroncd rollout-override-cm; -dashboardOverride = jsonSelect(dashboardOverride, "data.override"); -gitSensorOverride = jsonSelect(gitSensorOverride, "data.override"); -kubelinkOverride = jsonSelect(kubelinkOverride, "data.override"); kubewatchOverride = jsonSelect(kubewatchOverride, "data.override"); -lensOverride = jsonSelect(lensOverride, "data.override"); natsServerOverride = jsonSelect(natsServerOverride, "data.override"); devtronOverride = jsonSelect(devtronOverride, "data.override"); devtronIngressOverride = jsonSelect(devtronIngressOverride, "data.override"); serviceAccountOverride = jsonSelect(serviceAccountOverride, "data.override"); namespaceOverride = jsonSelect(namespaceOverride, "data.override"); -rolloutOverride = jsonSelect(rolloutOverride, "data.override"); namespaces = kubectl apply namespace; log("created namespaces"); @@ -126,15 +106,6 @@ log("created service account"); pa = kubectl patch -n devtroncd cm/argocd-cm --type "application/json-patch+json" -p argocdResource; log("executed argocd setup command"); -#rollout -rollout = kubectl apply -n devtroncd rollout -u rolloutOverride; -log("executed rollout setup command"); - -#git-sensor -kubeYamlEdit(gitSensor, "data.PG_PASSWORD", postgresqlPassword, `/Secret//git-sensor-secret`); - -#lens -kubeYamlEdit(lens, "data.PG_PASSWORD", postgresqlPassword, `/Secret//lens-secret`); migDelete = kubectl delete -n devtroncd job devtron-housekeeping; if !migDelete { @@ -230,50 +201,8 @@ if !helmInstallation { devtron = kubectl apply -n devtroncd devtron -u devtronOverride; log("executed devtron setup"); -if !helmInstallation { - if devtronIngressAnnotations { - log("editing ingress"); - kubeYamlEdit(devtronIngress, "metadata.annotations", devtronIngressAnnotations, `extensions/Ingress//devtron-ingress`, "asObject"); - } - - if setupDevtronIngress { - log("fetch ingress"); - existingIngress = kubectl get -n devtroncd ing devtron-ingress; - } - - if existingIngress { - annotations = jsonSelect(existingIngress, "metadata.annotations"); - } - - if annotations { - kubeYamlEdit(devtronIngress, "metadata.annotations", annotations, `extensions/Ingress//devtron-ingress`, "asObject"); - } - - if setupDevtronIngress { - log("setup ingress"); - log(devtronIngress); - devtronIngress = kubectl apply -n devtroncd devtronIngress -u devtronIngressOverride; - } - - log("executed devtron ingress setup"); -} - -if !helmInstallation { - dashboard = kubectl apply -n devtroncd dashboard -u dashboardOverride; - log("executed dashboard setup"); -} -gitSensor = kubectl apply -n devtroncd gitSensor -u gitSensorOverride; -log("executed git sensor setup"); -##imageScanner = kubectl apply -n devtroncd imageScanner -u imageScannerOverride; -log("executed image scanner setup"); -if !helmInstallation { - kubelink = kubectl apply -n devtroncd kubelink -u kubelinkOverride; - log("executed kubelink setup"); -} kubewatch = kubectl apply -n devtroncd kubewatch -u kubewatchOverride; log("executed kubewatch setup"); -lens = kubectl apply -n devtroncd lens -u lensOverride; -log("executed lens setup"); ## Applying Housekeeping Job appHousekeeping = kubectl apply -n devtroncd devtronHousekeeping -u devtronHousekeepingOverride; diff --git a/manifests/release.txt b/manifests/release.txt index 5b8cfb00ed..9543b3f3f9 100644 --- a/manifests/release.txt +++ b/manifests/release.txt @@ -1 +1 @@ -stable -1 v0.6.22 +stable -1 v0.6.23 diff --git a/manifests/version.txt b/manifests/version.txt index 635026fb80..d44996fff6 100644 --- a/manifests/version.txt +++ b/manifests/version.txt @@ -1 +1 @@ -v0.6.22 +v0.6.23 diff --git a/manifests/yamls/dashboard.yaml b/manifests/yamls/dashboard.yaml index 585154c84f..4113536571 100644 --- a/manifests/yamls/dashboard.yaml +++ b/manifests/yamls/dashboard.yaml @@ -235,7 +235,7 @@ spec: - name: envoy-config-volume mountPath: /etc/envoy-config/ - name: dashboard - image: "quay.io/devtron/dashboard:12717798-325-16265" + image: "quay.io/devtron/dashboard:ba04f4f4-325-18824" imagePullPolicy: IfNotPresent securityContext: allowPrivilegeEscalation: false diff --git a/manifests/yamls/devtron.yaml b/manifests/yamls/devtron.yaml index dbb58c74e8..efc37b5e90 100644 --- a/manifests/yamls/devtron.yaml +++ b/manifests/yamls/devtron.yaml @@ -53,7 +53,7 @@ data: CD_NODE_TAINTS_VALUE: "ci" CD_ARTIFACT_LOCATION_FORMAT: "%d/%d.zip" DEFAULT_CD_NAMESPACE: "devtron-cd" - DEFAULT_CI_IMAGE: "quay.io/devtron/ci-runner:d8d774c3-138-16238" + DEFAULT_CI_IMAGE: "quay.io/devtron/ci-runner:ad3af321-138-18662" DEFAULT_CD_TIMEOUT: "3600" WF_CONTROLLER_INSTANCE_ID: "devtron-runner" CI_LOGS_KEY_PREFIX: "ci-artifacts" @@ -94,6 +94,13 @@ data: GIT_SENSOR_PROTOCOL: GRPC GIT_SENSOR_URL: git-sensor-service.devtroncd:90 ENABLE_BUILD_CONTEXT: "true" + CI_SUCCESS_AUTO_TRIGGER_BATCH_SIZE: "1" + SKIP_GITOPS_VALIDATION: "false" + SKIP_CREATING_ECR_REPO: "false" + SCOPED_VARIABLE_ENABLED: "true" + SCOPED_VARIABLE_HANDLE_PRIMITIVES: "true" + MAX_CI_WORKFLOW_RETRIES: "0" + MAX_CD_WORKFLOW_RUNNER_RETRIES: "0" --- apiVersion: v1 kind: ConfigMap @@ -162,7 +169,7 @@ spec: runAsUser: 1000 containers: - name: devtron - image: "quay.io/devtron/devtron:3c1ba1ad-434-16260" + image: "quay.io/devtron/devtron:50ac85e6-434-18829" securityContext: allowPrivilegeEscalation: false runAsUser: 1000 diff --git a/manifests/yamls/gitsensor.yaml b/manifests/yamls/gitsensor.yaml index 69d499c96f..b274a4bb12 100644 --- a/manifests/yamls/gitsensor.yaml +++ b/manifests/yamls/gitsensor.yaml @@ -67,7 +67,7 @@ spec: - /bin/sh - -c - mkdir -p /git-base/ssh-keys && chown -R devtron:devtron /git-base && chmod 777 /git-base/ssh-keys - image: "quay.io/devtron/git-sensor:46b8f0f1-200-16195" + image: "quay.io/devtron/git-sensor:b6c3ea0e-200-16327" imagePullPolicy: IfNotPresent name: chown-git-base resources: {} @@ -80,7 +80,7 @@ spec: name: git-volume containers: - name: git-sensor - image: "quay.io/devtron/git-sensor:46b8f0f1-200-16195" + image: "quay.io/devtron/git-sensor:b6c3ea0e-200-16327" securityContext: allowPrivilegeEscalation: false runAsUser: 1000 diff --git a/manifests/yamls/kubelink.yaml b/manifests/yamls/kubelink.yaml index 0db8070821..b8eb486fe1 100644 --- a/manifests/yamls/kubelink.yaml +++ b/manifests/yamls/kubelink.yaml @@ -25,7 +25,7 @@ spec: runAsUser: 1000 containers: - name: kubelink - image: "quay.io/devtron/kubelink:aefc1baf-318-16208" + image: "quay.io/devtron/kubelink:25052130-318-18795" securityContext: allowPrivilegeEscalation: false runAsUser: 1000 diff --git a/manifests/yamls/kubewatch.yaml b/manifests/yamls/kubewatch.yaml index 96a2c34f0e..65db0c5d12 100644 --- a/manifests/yamls/kubewatch.yaml +++ b/manifests/yamls/kubewatch.yaml @@ -164,7 +164,7 @@ spec: runAsUser: 1000 containers: - name: kubewatch - image: "quay.io/devtron/kubewatch:49f906a5-419-14814" + image: "quay.io/devtron/kubewatch:79d44ddc-370-18559" securityContext: allowPrivilegeEscalation: false runAsUser: 1000 diff --git a/manifests/yamls/migrator.yaml b/manifests/yamls/migrator.yaml index 009c28505a..c9971d3008 100644 --- a/manifests/yamls/migrator.yaml +++ b/manifests/yamls/migrator.yaml @@ -47,7 +47,7 @@ spec: - name: MIGRATE_TO_VERSION value: "0" - name: GIT_HASH - value: 3c1ba1ad06cf134743c08667e8589dbd2f97c57d + value: 50ac85e68d6e020797b0db342527c79a89c9c969 envFrom: - secretRef: name: postgresql-migrator @@ -96,7 +96,7 @@ spec: - name: MIGRATE_TO_VERSION value: "0" - name: GIT_HASH - value: 3c1ba1ad06cf134743c08667e8589dbd2f97c57d + value: 50ac85e68d6e020797b0db342527c79a89c9c969 - name: GIT_BRANCH value: main envFrom: @@ -148,7 +148,7 @@ spec: - name: GIT_BRANCH value: main - name: GIT_HASH - value: 46b8f0f18a3402234663ba963496e2b8ced271ae + value: b6c3ea0ef2d3dff004b572916ff804914b8d938a envFrom: - secretRef: name: postgresql-migrator diff --git a/manifests/yamls/notifier.yaml b/manifests/yamls/notifier.yaml index 8424138a09..5c7cd4f2a9 100644 --- a/manifests/yamls/notifier.yaml +++ b/manifests/yamls/notifier.yaml @@ -66,7 +66,7 @@ spec: restartPolicy: Always containers: - name: notifier - image: quay.io/devtron/notifier:d9c72180-372-14306 + image: quay.io/devtron/notifier:d71bcbcd-372-18717 imagePullPolicy: IfNotPresent ports: - name: app diff --git a/manifests/yamls/serviceaccount.yaml b/manifests/yamls/serviceaccount.yaml index 6b9bee776f..b29127e812 100644 --- a/manifests/yamls/serviceaccount.yaml +++ b/manifests/yamls/serviceaccount.yaml @@ -158,15 +158,6 @@ rules: - update - patch - delete -- apiGroups: - - argoproj.io - resources: - - workflowtemplates - - workflowtemplates/finalizers - verbs: - - get - - list - - watch - apiGroups: - "" resources: @@ -179,8 +170,10 @@ rules: - "" resources: - persistentvolumeclaims + - persistentvolumeclaims/finalizers verbs: - create + - update - delete - get - apiGroups: @@ -188,6 +181,9 @@ rules: resources: - workflows - workflows/finalizers + - workflowtasksets + - workflowtasksets/finalizers + - workflowartifactgctasks verbs: - get - list @@ -195,15 +191,27 @@ rules: - update - patch - delete + - create - apiGroups: - argoproj.io resources: - workflowtemplates - workflowtemplates/finalizers + - clusterworkflowtemplates + - clusterworkflowtemplates/finalizers verbs: - get - list - watch +- apiGroups: + - argoproj.io + resources: + - workflowtaskresults + - workflowtaskresults/finalizers + verbs: + - list + - watch + - deletecollection - apiGroups: - "" resources: diff --git a/pkg/app/AppCrudOperationService.go b/pkg/app/AppCrudOperationService.go index f139b09b61..9a3bb90b3d 100644 --- a/pkg/app/AppCrudOperationService.go +++ b/pkg/app/AppCrudOperationService.go @@ -206,7 +206,7 @@ func (impl AppCrudOperationServiceImpl) UpdateLabelsInApp(request *bean.CreateAp appLabelMap[uniqueLabelExists] = appLabel } } - + appLabelDeleteMap := make(map[string]bool, 0) for _, label := range request.AppLabels { uniqueLabelRequest := fmt.Sprintf("%s:%s:%t", label.Key, label.Value, label.Propagate) if _, ok := appLabelMap[uniqueLabelRequest]; !ok { @@ -227,10 +227,13 @@ func (impl AppCrudOperationServiceImpl) UpdateLabelsInApp(request *bean.CreateAp return nil, err } } else { - // delete from map so that item remain live, all other item will be delete from this app - delete(appLabelMap, uniqueLabelRequest) + // storing this unique so that item remain live, all other item will be delete from this app + appLabelDeleteMap[uniqueLabelRequest] = true } } + for labelReq, _ := range appLabelDeleteMap { + delete(appLabelMap, labelReq) + } for _, appLabel := range appLabelMap { err = impl.appLabelRepository.Delete(appLabel, tx) if err != nil { diff --git a/pkg/app/AppService.go b/pkg/app/AppService.go index 05622bd4ee..3fc92f185b 100644 --- a/pkg/app/AppService.go +++ b/pkg/app/AppService.go @@ -20,15 +20,11 @@ package app import ( "context" "encoding/json" - error2 "errors" "fmt" "github.com/caarlos0/env" - pubsub "github.com/devtron-labs/common-lib/pubsub-lib" - k8s2 "github.com/devtron-labs/common-lib/utils/k8s" k8sCommonBean "github.com/devtron-labs/common-lib/utils/k8s/commonBean" "github.com/devtron-labs/common-lib/utils/k8s/health" client2 "github.com/devtron-labs/devtron/api/helm-app" - bean3 "github.com/devtron-labs/devtron/pkg/app/bean" status2 "github.com/devtron-labs/devtron/pkg/app/status" repository4 "github.com/devtron-labs/devtron/pkg/appStore/deployment/repository" "github.com/devtron-labs/devtron/pkg/appStore/deployment/service" @@ -38,18 +34,13 @@ import ( "github.com/devtron-labs/devtron/pkg/k8s" repository3 "github.com/devtron-labs/devtron/pkg/pipeline/history/repository" repository5 "github.com/devtron-labs/devtron/pkg/pipeline/repository" - "github.com/devtron-labs/devtron/pkg/resourceQualifiers" "github.com/devtron-labs/devtron/pkg/variables" "github.com/devtron-labs/devtron/pkg/variables/parsers" _ "github.com/devtron-labs/devtron/pkg/variables/repository" - repository6 "github.com/devtron-labs/devtron/pkg/variables/repository" "github.com/devtron-labs/devtron/util/argo" - "github.com/tidwall/gjson" - "github.com/tidwall/sjson" "go.opentelemetry.io/otel" "io/ioutil" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime/schema" chart2 "k8s.io/helm/pkg/proto/hapi/chart" "net/url" "os" @@ -70,13 +61,10 @@ import ( application2 "github.com/argoproj/argo-cd/v2/pkg/apiclient/application" "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1" - "github.com/aws/aws-sdk-go/service/autoscaling" "github.com/devtron-labs/devtron/api/bean" "github.com/devtron-labs/devtron/client/argocdServer" "github.com/devtron-labs/devtron/client/argocdServer/application" client "github.com/devtron-labs/devtron/client/events" - "github.com/devtron-labs/devtron/internal/middleware" - "github.com/devtron-labs/devtron/internal/sql/models" "github.com/devtron-labs/devtron/internal/sql/repository" "github.com/devtron-labs/devtron/internal/sql/repository/chartConfig" "github.com/devtron-labs/devtron/internal/sql/repository/pipelineConfig" @@ -88,11 +76,7 @@ import ( util "github.com/devtron-labs/devtron/util/event" "github.com/devtron-labs/devtron/util/rbac" "github.com/go-pg/pg" - errors2 "github.com/juju/errors" - "github.com/pkg/errors" "go.uber.org/zap" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" ) type AppServiceConfig struct { @@ -180,27 +164,30 @@ type AppServiceImpl struct { scopedVariableService variables.ScopedVariableService variableEntityMappingService variables.VariableEntityMappingService variableTemplateParser parsers.VariableTemplateParser + argoClientWrapperService argocdServer.ArgoClientWrapperService } type AppService interface { - TriggerRelease(overrideRequest *bean.ValuesOverrideRequest, ctx context.Context, triggeredAt time.Time, deployedBy int32) (releaseNo int, manifest []byte, err error) + //TriggerRelease(overrideRequest *bean.ValuesOverrideRequest, ctx context.Context, triggeredAt time.Time, deployedBy int32) (releaseNo int, manifest []byte, err error) UpdateReleaseStatus(request *bean.ReleaseStatusUpdateRequest) (bool, error) UpdateDeploymentStatusAndCheckIsSucceeded(app *v1alpha1.Application, statusTime time.Time, isAppStore bool) (bool, *chartConfig.PipelineOverride, error) - TriggerCD(artifact *repository.CiArtifact, cdWorkflowId, wfrId int, pipeline *pipelineConfig.Pipeline, triggeredAt time.Time) error + //TriggerCD(artifact *repository.CiArtifact, cdWorkflowId, wfrId int, pipeline *pipelineConfig.Pipeline, triggeredAt time.Time) error GetConfigMapAndSecretJson(appId int, envId int, pipelineId int) ([]byte, error) UpdateCdWorkflowRunnerByACDObject(app *v1alpha1.Application, cdWfrId int, updateTimedOutStatus bool) error GetCmSecretNew(appId int, envId int, isJob bool) (*bean.ConfigMapJson, *bean.ConfigSecretJson, error) - MarkImageScanDeployed(appId int, envId int, imageDigest string, clusterId int, isScanEnabled bool) error + //MarkImageScanDeployed(appId int, envId int, imageDigest string, clusterId int, isScanEnabled bool) error UpdateDeploymentStatusForGitOpsPipelines(app *v1alpha1.Application, statusTime time.Time, isAppStore bool) (bool, bool, *chartConfig.PipelineOverride, error) WriteCDSuccessEvent(appId int, envId int, override *chartConfig.PipelineOverride) GetGitOpsRepoPrefix() string - GetValuesOverrideForTrigger(overrideRequest *bean.ValuesOverrideRequest, triggeredAt time.Time, ctx context.Context) (*ValuesOverrideResponse, error) - GetEnvOverrideByTriggerType(overrideRequest *bean.ValuesOverrideRequest, triggeredAt time.Time, ctx context.Context) (*chartConfig.EnvConfigOverride, error) - GetAppMetricsByTriggerType(overrideRequest *bean.ValuesOverrideRequest, ctx context.Context) (bool, error) - GetDeploymentStrategyByTriggerType(overrideRequest *bean.ValuesOverrideRequest, ctx context.Context) (*chartConfig.PipelineStrategy, error) + //GetValuesOverrideForTrigger(overrideRequest *bean.ValuesOverrideRequest, triggeredAt time.Time, ctx context.Context) (*ValuesOverrideResponse, error) + //GetEnvOverrideByTriggerType(overrideRequest *bean.ValuesOverrideRequest, triggeredAt time.Time, ctx context.Context) (*chartConfig.EnvConfigOverride, error) + //GetAppMetricsByTriggerType(overrideRequest *bean.ValuesOverrideRequest, ctx context.Context) (bool, error) + //GetDeploymentStrategyByTriggerType(overrideRequest *bean.ValuesOverrideRequest, ctx context.Context) (*chartConfig.PipelineStrategy, error) CreateGitopsRepo(app *app.App, userId int32) (gitopsRepoName string, chartGitAttr *ChartGitAttribute, err error) GetDeployedManifestByPipelineIdAndCDWorkflowId(appId int, envId int, cdWorkflowId int, ctx context.Context) ([]byte, error) - SetPipelineFieldsInOverrideRequest(overrideRequest *bean.ValuesOverrideRequest, pipeline *pipelineConfig.Pipeline) + //SetPipelineFieldsInOverrideRequest(overrideRequest *bean.ValuesOverrideRequest, pipeline *pipelineConfig.Pipeline) + + BuildChartAndGetPath(appName string, envOverride *chartConfig.EnvConfigOverride, ctx context.Context) (string, error) } func NewAppService( @@ -261,7 +248,9 @@ func NewAppService( variableSnapshotHistoryService variables.VariableSnapshotHistoryService, scopedVariableService variables.ScopedVariableService, variableEntityMappingService variables.VariableEntityMappingService, - variableTemplateParser parsers.VariableTemplateParser) *AppServiceImpl { + variableTemplateParser parsers.VariableTemplateParser, + argoClientWrapperService argocdServer.ArgoClientWrapperService, +) *AppServiceImpl { appServiceImpl := &AppServiceImpl{ environmentConfigRepository: environmentConfigRepository, mergeUtil: mergeUtil, @@ -326,6 +315,7 @@ func NewAppService( scopedVariableService: scopedVariableService, variableEntityMappingService: variableEntityMappingService, variableTemplateParser: variableTemplateParser, + argoClientWrapperService: argoClientWrapperService, } return appServiceImpl } @@ -335,66 +325,6 @@ const ( Failure = "FAILURE" ) -func (impl *AppServiceImpl) SetPipelineFieldsInOverrideRequest(overrideRequest *bean.ValuesOverrideRequest, pipeline *pipelineConfig.Pipeline) { - overrideRequest.PipelineId = pipeline.Id - overrideRequest.PipelineName = pipeline.Name - overrideRequest.EnvId = pipeline.EnvironmentId - overrideRequest.EnvName = pipeline.Environment.Name - overrideRequest.ClusterId = pipeline.Environment.ClusterId - overrideRequest.AppId = pipeline.AppId - overrideRequest.AppName = pipeline.App.AppName - overrideRequest.DeploymentAppType = pipeline.DeploymentAppType -} - -func (impl *AppServiceImpl) getValuesFileForEnv(environmentId int) string { - return fmt.Sprintf("_%d-values.yaml", environmentId) //-{envId}-values.yaml -} -func (impl *AppServiceImpl) createArgoApplicationIfRequired(appId int, envConfigOverride *chartConfig.EnvConfigOverride, pipeline *pipelineConfig.Pipeline, userId int32) (string, error) { - //repo has been registered while helm create - chart, err := impl.chartRepository.FindLatestChartForAppByAppId(appId) - if err != nil { - impl.logger.Errorw("no chart found ", "app", appId) - return "", err - } - envModel, err := impl.envRepository.FindById(envConfigOverride.TargetEnvironment) - if err != nil { - return "", err - } - argoAppName := pipeline.DeploymentAppName - if pipeline.DeploymentAppCreated { - return argoAppName, nil - } else { - //create - appNamespace := envConfigOverride.Namespace - if appNamespace == "" { - appNamespace = "default" - } - namespace := argocdServer.DevtronInstalationNs - appRequest := &argocdServer.AppTemplate{ - ApplicationName: argoAppName, - Namespace: namespace, - TargetNamespace: appNamespace, - TargetServer: envModel.Cluster.ServerUrl, - Project: "default", - ValuesFile: impl.getValuesFileForEnv(envModel.Id), - RepoPath: chart.ChartLocation, - RepoUrl: chart.GitRepoUrl, - } - - argoAppName, err := impl.ArgoK8sClient.CreateAcdApp(appRequest, envModel.Cluster) - if err != nil { - return "", err - } - //update cd pipeline to mark deployment app created - _, err = impl.updatePipeline(pipeline, userId) - if err != nil { - impl.logger.Errorw("error in update cd pipeline for deployment app created or not", "err", err) - return "", err - } - return argoAppName, nil - } -} - func (impl *AppServiceImpl) UpdateReleaseStatus(updateStatusRequest *bean.ReleaseStatusUpdateRequest) (bool, error) { count, err := impl.pipelineOverrideRepository.UpdateStatusByRequestIdentifier(updateStatusRequest.RequestId, updateStatusRequest.NewStatus) if err != nil { @@ -1013,84 +943,6 @@ type ValuesOverrideResponse struct { AppMetrics bool } -type EnvironmentOverride struct { - Enabled bool `json:"enabled"` - EnvValues []*KeyValue `json:"envValues"` -} - -type KeyValue struct { - Key string `json:"key"` - Value string `json:"value"` -} - -func (conf *EnvironmentOverride) appendEnvironmentVariable(key, value string) { - item := &KeyValue{Key: key, Value: value} - conf.EnvValues = append(conf.EnvValues, item) -} - -func (impl *AppServiceImpl) TriggerCD(artifact *repository.CiArtifact, cdWorkflowId, wfrId int, pipeline *pipelineConfig.Pipeline, triggeredAt time.Time) error { - impl.logger.Debugw("automatic pipeline trigger attempt async", "artifactId", artifact.Id) - - return impl.triggerReleaseAsync(artifact, cdWorkflowId, wfrId, pipeline, triggeredAt) -} - -func (impl *AppServiceImpl) triggerReleaseAsync(artifact *repository.CiArtifact, cdWorkflowId, wfrId int, pipeline *pipelineConfig.Pipeline, triggeredAt time.Time) error { - err := impl.validateAndTrigger(pipeline, artifact, cdWorkflowId, wfrId, triggeredAt) - if err != nil { - impl.logger.Errorw("error in trigger for pipeline", "pipelineId", strconv.Itoa(pipeline.Id)) - } - impl.logger.Debugw("trigger attempted for all pipeline ", "artifactId", artifact.Id) - return err -} - -func (impl *AppServiceImpl) validateAndTrigger(p *pipelineConfig.Pipeline, artifact *repository.CiArtifact, cdWorkflowId, wfrId int, triggeredAt time.Time) error { - object := impl.enforcerUtil.GetAppRBACNameByAppId(p.AppId) - envApp := strings.Split(object, "/") - if len(envApp) != 2 { - impl.logger.Error("invalid req, app and env not found from rbac") - return errors.New("invalid req, app and env not found from rbac") - } - err := impl.releasePipeline(p, artifact, cdWorkflowId, wfrId, triggeredAt) - return err -} - -func (impl *AppServiceImpl) releasePipeline(pipeline *pipelineConfig.Pipeline, artifact *repository.CiArtifact, cdWorkflowId, wfrId int, triggeredAt time.Time) error { - impl.logger.Debugw("triggering release for ", "cdPipelineId", pipeline.Id, "artifactId", artifact.Id) - - pipeline, err := impl.pipelineRepository.FindById(pipeline.Id) - if err != nil { - impl.logger.Errorw("error in fetching pipeline by pipelineId", "err", err) - return err - } - - request := &bean.ValuesOverrideRequest{ - PipelineId: pipeline.Id, - UserId: artifact.CreatedBy, - CiArtifactId: artifact.Id, - AppId: pipeline.AppId, - CdWorkflowId: cdWorkflowId, - ForceTrigger: true, - DeploymentWithConfig: bean.DEPLOYMENT_CONFIG_TYPE_LAST_SAVED, - WfrId: wfrId, - } - impl.SetPipelineFieldsInOverrideRequest(request, pipeline) - - ctx, err := impl.buildACDContext() - if err != nil { - impl.logger.Errorw("error in creating acd synch context", "pipelineId", pipeline.Id, "artifactId", artifact.Id, "err", err) - return err - } - //setting deployedBy as 1(system user) since case of auto trigger - id, _, err := impl.TriggerRelease(request, ctx, triggeredAt, 1) - if err != nil { - impl.logger.Errorw("error in auto cd pipeline trigger", "pipelineId", pipeline.Id, "artifactId", artifact.Id, "err", err) - } else { - impl.logger.Infow("pipeline successfully triggered ", "cdPipelineId", pipeline.Id, "artifactId", artifact.Id, "releaseId", id) - } - return err - -} - func (impl *AppServiceImpl) buildACDContext() (acdContext context.Context, err error) { //this method should only call in case of argo-integration and gitops configured acdToken, err := impl.argoUserService.GetLatestDevtronArgoCdUserToken() @@ -1103,515 +955,6 @@ func (impl *AppServiceImpl) buildACDContext() (acdContext context.Context, err e return ctx, nil } -func (impl *AppServiceImpl) getDbMigrationOverride(overrideRequest *bean.ValuesOverrideRequest, artifact *repository.CiArtifact, isRollback bool) (overrideJson []byte, err error) { - if isRollback { - return nil, fmt.Errorf("rollback not supported ye") - } - notConfigured := false - config, err := impl.dbMigrationConfigRepository.FindByPipelineId(overrideRequest.PipelineId) - if err != nil && !IsErrNoRows(err) { - impl.logger.Errorw("error in fetching pipeline override config", "req", overrideRequest, "err", err) - return nil, err - } else if IsErrNoRows(err) { - notConfigured = true - } - envVal := &EnvironmentOverride{} - if notConfigured { - impl.logger.Warnw("no active db migration found", "pipeline", overrideRequest.PipelineId) - envVal.Enabled = false - } else { - materialInfos, err := artifact.ParseMaterialInfo() - if err != nil { - return nil, err - } - - hash, ok := materialInfos[config.GitMaterial.Url] - if !ok { - impl.logger.Errorf("wrong url map ", "map", materialInfos, "url", config.GitMaterial.Url) - return nil, fmt.Errorf("configured url not found in material %s", config.GitMaterial.Url) - } - - envVal.Enabled = true - if config.GitMaterial.GitProvider.AuthMode != repository.AUTH_MODE_USERNAME_PASSWORD && - config.GitMaterial.GitProvider.AuthMode != repository.AUTH_MODE_ACCESS_TOKEN && - config.GitMaterial.GitProvider.AuthMode != repository.AUTH_MODE_ANONYMOUS { - return nil, fmt.Errorf("auth mode %s not supported for migration", config.GitMaterial.GitProvider.AuthMode) - } - envVal.appendEnvironmentVariable("GIT_REPO_URL", config.GitMaterial.Url) - envVal.appendEnvironmentVariable("GIT_USER", config.GitMaterial.GitProvider.UserName) - var password string - if config.GitMaterial.GitProvider.AuthMode == repository.AUTH_MODE_USERNAME_PASSWORD { - password = config.GitMaterial.GitProvider.Password - } else { - password = config.GitMaterial.GitProvider.AccessToken - } - envVal.appendEnvironmentVariable("GIT_AUTH_TOKEN", password) - // parse git-tag not required - //envVal.appendEnvironmentVariable("GIT_TAG", "") - envVal.appendEnvironmentVariable("GIT_HASH", hash) - envVal.appendEnvironmentVariable("SCRIPT_LOCATION", config.ScriptSource) - envVal.appendEnvironmentVariable("DB_TYPE", string(config.DbConfig.Type)) - envVal.appendEnvironmentVariable("DB_USER_NAME", config.DbConfig.UserName) - envVal.appendEnvironmentVariable("DB_PASSWORD", config.DbConfig.Password) - envVal.appendEnvironmentVariable("DB_HOST", config.DbConfig.Host) - envVal.appendEnvironmentVariable("DB_PORT", config.DbConfig.Port) - envVal.appendEnvironmentVariable("DB_NAME", config.DbConfig.DbName) - //Will be used for rollback don't delete it - //envVal.appendEnvironmentVariable("MIGRATE_TO_VERSION", strconv.Itoa(overrideRequest.TargetDbVersion)) - } - dbMigrationConfig := map[string]interface{}{"dbMigrationConfig": envVal} - confByte, err := json.Marshal(dbMigrationConfig) - if err != nil { - return nil, err - } - return confByte, nil -} - -func (impl *AppServiceImpl) GetAppMetricsByTriggerType(overrideRequest *bean.ValuesOverrideRequest, ctx context.Context) (bool, error) { - - var appMetrics bool - if overrideRequest.DeploymentWithConfig == bean.DEPLOYMENT_CONFIG_TYPE_SPECIFIC_TRIGGER { - _, span := otel.Tracer("orchestrator").Start(ctx, "deploymentTemplateHistoryRepository.GetHistoryByPipelineIdAndWfrId") - deploymentTemplateHistory, err := impl.deploymentTemplateHistoryRepository.GetHistoryByPipelineIdAndWfrId(overrideRequest.PipelineId, overrideRequest.WfrIdForDeploymentWithSpecificTrigger) - span.End() - if err != nil { - impl.logger.Errorw("error in getting deployed deployment template history by pipelineId and wfrId", "err", err, "pipelineId", &overrideRequest, "wfrId", overrideRequest.WfrIdForDeploymentWithSpecificTrigger) - return appMetrics, err - } - appMetrics = deploymentTemplateHistory.IsAppMetricsEnabled - - } else if overrideRequest.DeploymentWithConfig == bean.DEPLOYMENT_CONFIG_TYPE_LAST_SAVED { - _, span := otel.Tracer("orchestrator").Start(ctx, "appLevelMetricsRepository.FindByAppId") - appLevelMetrics, err := impl.appLevelMetricsRepository.FindByAppId(overrideRequest.AppId) - span.End() - if err != nil && !IsErrNoRows(err) { - impl.logger.Errorw("err", err) - return appMetrics, &ApiError{InternalMessage: "unable to fetch app level metrics flag"} - } - appMetrics = appLevelMetrics.AppMetrics - - _, span = otel.Tracer("orchestrator").Start(ctx, "envLevelMetricsRepository.FindByAppIdAndEnvId") - envLevelMetrics, err := impl.envLevelMetricsRepository.FindByAppIdAndEnvId(overrideRequest.AppId, overrideRequest.EnvId) - span.End() - if err != nil && !IsErrNoRows(err) { - impl.logger.Errorw("err", err) - return appMetrics, &ApiError{InternalMessage: "unable to fetch env level metrics flag"} - } - if envLevelMetrics.Id != 0 && envLevelMetrics.AppMetrics != nil { - appMetrics = *envLevelMetrics.AppMetrics - } - } - return appMetrics, nil -} - -func (impl *AppServiceImpl) GetDeploymentStrategyByTriggerType(overrideRequest *bean.ValuesOverrideRequest, ctx context.Context) (*chartConfig.PipelineStrategy, error) { - - strategy := &chartConfig.PipelineStrategy{} - var err error - if overrideRequest.DeploymentWithConfig == bean.DEPLOYMENT_CONFIG_TYPE_SPECIFIC_TRIGGER { - _, span := otel.Tracer("orchestrator").Start(ctx, "strategyHistoryRepository.GetHistoryByPipelineIdAndWfrId") - strategyHistory, err := impl.strategyHistoryRepository.GetHistoryByPipelineIdAndWfrId(overrideRequest.PipelineId, overrideRequest.WfrIdForDeploymentWithSpecificTrigger) - span.End() - if err != nil { - impl.logger.Errorw("error in getting deployed strategy history by pipleinId and wfrId", "err", err, "pipelineId", overrideRequest.PipelineId, "wfrId", overrideRequest.WfrIdForDeploymentWithSpecificTrigger) - return nil, err - } - strategy.Strategy = strategyHistory.Strategy - strategy.Config = strategyHistory.Config - strategy.PipelineId = overrideRequest.PipelineId - } else if overrideRequest.DeploymentWithConfig == bean.DEPLOYMENT_CONFIG_TYPE_LAST_SAVED { - if overrideRequest.ForceTrigger { - _, span := otel.Tracer("orchestrator").Start(ctx, "pipelineConfigRepository.GetDefaultStrategyByPipelineId") - strategy, err = impl.pipelineConfigRepository.GetDefaultStrategyByPipelineId(overrideRequest.PipelineId) - span.End() - } else { - var deploymentTemplate chartRepoRepository.DeploymentStrategy - if overrideRequest.DeploymentTemplate == "ROLLING" { - deploymentTemplate = chartRepoRepository.DEPLOYMENT_STRATEGY_ROLLING - } else if overrideRequest.DeploymentTemplate == "BLUE-GREEN" { - deploymentTemplate = chartRepoRepository.DEPLOYMENT_STRATEGY_BLUE_GREEN - } else if overrideRequest.DeploymentTemplate == "CANARY" { - deploymentTemplate = chartRepoRepository.DEPLOYMENT_STRATEGY_CANARY - } else if overrideRequest.DeploymentTemplate == "RECREATE" { - deploymentTemplate = chartRepoRepository.DEPLOYMENT_STRATEGY_RECREATE - } - - if len(deploymentTemplate) > 0 { - _, span := otel.Tracer("orchestrator").Start(ctx, "pipelineConfigRepository.FindByStrategyAndPipelineId") - strategy, err = impl.pipelineConfigRepository.FindByStrategyAndPipelineId(deploymentTemplate, overrideRequest.PipelineId) - span.End() - } else { - _, span := otel.Tracer("orchestrator").Start(ctx, "pipelineConfigRepository.GetDefaultStrategyByPipelineId") - strategy, err = impl.pipelineConfigRepository.GetDefaultStrategyByPipelineId(overrideRequest.PipelineId) - span.End() - } - } - if err != nil && errors2.IsNotFound(err) == false { - impl.logger.Errorf("invalid state", "err", err, "req", strategy) - return nil, err - } - } - return strategy, nil -} - -func (impl *AppServiceImpl) GetEnvOverrideByTriggerType(overrideRequest *bean.ValuesOverrideRequest, triggeredAt time.Time, ctx context.Context) (*chartConfig.EnvConfigOverride, error) { - - envOverride := &chartConfig.EnvConfigOverride{} - - var err error - if overrideRequest.DeploymentWithConfig == bean.DEPLOYMENT_CONFIG_TYPE_SPECIFIC_TRIGGER { - _, span := otel.Tracer("orchestrator").Start(ctx, "deploymentTemplateHistoryRepository.GetHistoryByPipelineIdAndWfrId") - deploymentTemplateHistory, err := impl.deploymentTemplateHistoryRepository.GetHistoryByPipelineIdAndWfrId(overrideRequest.PipelineId, overrideRequest.WfrIdForDeploymentWithSpecificTrigger) - //VARIABLE_SNAPSHOT_GET and resolve - - span.End() - if err != nil { - impl.logger.Errorw("error in getting deployed deployment template history by pipelineId and wfrId", "err", err, "pipelineId", &overrideRequest, "wfrId", overrideRequest.WfrIdForDeploymentWithSpecificTrigger) - return nil, err - } - templateName := deploymentTemplateHistory.TemplateName - templateVersion := deploymentTemplateHistory.TemplateVersion - if templateName == "Rollout Deployment" { - templateName = "" - } - //getting chart_ref by id - _, span = otel.Tracer("orchestrator").Start(ctx, "chartRefRepository.FindByVersionAndName") - chartRef, err := impl.chartRefRepository.FindByVersionAndName(templateName, templateVersion) - span.End() - if err != nil { - impl.logger.Errorw("error in getting chartRef by version and name", "err", err, "version", templateVersion, "name", templateName) - return nil, err - } - //assuming that if a chartVersion is deployed then it's envConfigOverride will be available - _, span = otel.Tracer("orchestrator").Start(ctx, "environmentConfigRepository.GetByAppIdEnvIdAndChartRefId") - envOverride, err = impl.environmentConfigRepository.GetByAppIdEnvIdAndChartRefId(overrideRequest.AppId, overrideRequest.EnvId, chartRef.Id) - span.End() - if err != nil { - impl.logger.Errorw("error in getting envConfigOverride for pipeline for specific chartVersion", "err", err, "appId", overrideRequest.AppId, "envId", overrideRequest.EnvId, "chartRefId", chartRef.Id) - return nil, err - } - - _, span = otel.Tracer("orchestrator").Start(ctx, "envRepository.FindById") - env, err := impl.envRepository.FindById(envOverride.TargetEnvironment) - span.End() - if err != nil { - impl.logger.Errorw("unable to find env", "err", err) - return nil, err - } - envOverride.Environment = env - - //updating historical data in envConfigOverride and appMetrics flag - envOverride.IsOverride = true - envOverride.EnvOverrideValues = deploymentTemplateHistory.Template - - resolvedTemplate, variableMap, err := impl.getResolvedTemplateWithSnapshot(deploymentTemplateHistory.Id, envOverride.EnvOverrideValues) - if err != nil { - return nil, err - } - envOverride.ResolvedEnvOverrideValues = resolvedTemplate - envOverride.VariableSnapshot = variableMap - - } else if overrideRequest.DeploymentWithConfig == bean.DEPLOYMENT_CONFIG_TYPE_LAST_SAVED { - _, span := otel.Tracer("orchestrator").Start(ctx, "environmentConfigRepository.ActiveEnvConfigOverride") - envOverride, err = impl.environmentConfigRepository.ActiveEnvConfigOverride(overrideRequest.AppId, overrideRequest.EnvId) - - var chart *chartRepoRepository.Chart - span.End() - if err != nil { - impl.logger.Errorw("invalid state", "err", err, "req", overrideRequest) - return nil, err - } - if envOverride.Id == 0 { - _, span = otel.Tracer("orchestrator").Start(ctx, "chartRepository.FindLatestChartForAppByAppId") - chart, err = impl.chartRepository.FindLatestChartForAppByAppId(overrideRequest.AppId) - span.End() - if err != nil { - impl.logger.Errorw("invalid state", "err", err, "req", overrideRequest) - return nil, err - } - _, span = otel.Tracer("orchestrator").Start(ctx, "environmentConfigRepository.FindChartByAppIdAndEnvIdAndChartRefId") - envOverride, err = impl.environmentConfigRepository.FindChartByAppIdAndEnvIdAndChartRefId(overrideRequest.AppId, overrideRequest.EnvId, chart.ChartRefId) - span.End() - if err != nil && !errors2.IsNotFound(err) { - impl.logger.Errorw("invalid state", "err", err, "req", overrideRequest) - return nil, err - } - - //creating new env override config - if errors2.IsNotFound(err) || envOverride == nil { - _, span = otel.Tracer("orchestrator").Start(ctx, "envRepository.FindById") - environment, err := impl.envRepository.FindById(overrideRequest.EnvId) - span.End() - if err != nil && !IsErrNoRows(err) { - return nil, err - } - envOverride = &chartConfig.EnvConfigOverride{ - Active: true, - ManualReviewed: true, - Status: models.CHARTSTATUS_SUCCESS, - TargetEnvironment: overrideRequest.EnvId, - ChartId: chart.Id, - AuditLog: sql.AuditLog{UpdatedBy: overrideRequest.UserId, UpdatedOn: triggeredAt, CreatedOn: triggeredAt, CreatedBy: overrideRequest.UserId}, - Namespace: environment.Namespace, - IsOverride: false, - EnvOverrideValues: "{}", - Latest: false, - IsBasicViewLocked: chart.IsBasicViewLocked, - CurrentViewEditor: chart.CurrentViewEditor, - } - _, span = otel.Tracer("orchestrator").Start(ctx, "environmentConfigRepository.Save") - err = impl.environmentConfigRepository.Save(envOverride) - span.End() - if err != nil { - impl.logger.Errorw("error in creating envconfig", "data", envOverride, "error", err) - return nil, err - } - } - envOverride.Chart = chart - } else if envOverride.Id > 0 && !envOverride.IsOverride { - _, span = otel.Tracer("orchestrator").Start(ctx, "chartRepository.FindLatestChartForAppByAppId") - chart, err = impl.chartRepository.FindLatestChartForAppByAppId(overrideRequest.AppId) - span.End() - if err != nil { - impl.logger.Errorw("invalid state", "err", err, "req", overrideRequest) - return nil, err - } - envOverride.Chart = chart - } - - _, span = otel.Tracer("orchestrator").Start(ctx, "envRepository.FindById") - env, err := impl.envRepository.FindById(envOverride.TargetEnvironment) - span.End() - if err != nil { - impl.logger.Errorw("unable to find env", "err", err) - return nil, err - } - envOverride.Environment = env - - //VARIABLE different cases for variable resolution - scope := resourceQualifiers.Scope{ - AppId: overrideRequest.AppId, - EnvId: overrideRequest.EnvId, - ClusterId: overrideRequest.ClusterId, - SystemMetadata: &resourceQualifiers.SystemMetadata{ - EnvironmentName: env.Name, - ClusterName: env.Cluster.ClusterName, - Namespace: env.Namespace, - ImageTag: overrideRequest.ImageTag, - AppName: overrideRequest.AppName, - }, - } - - if envOverride.IsOverride { - - resolvedTemplate, variableMap, err := impl.extractVariablesAndResolveTemplate(scope, envOverride.EnvOverrideValues, repository6.Entity{ - EntityType: repository6.EntityTypeDeploymentTemplateEnvLevel, - EntityId: envOverride.Id, - }) - if err != nil { - return nil, err - } - envOverride.ResolvedEnvOverrideValues = resolvedTemplate - envOverride.VariableSnapshot = variableMap - } else { - resolvedTemplate, variableMap, err := impl.extractVariablesAndResolveTemplate(scope, chart.GlobalOverride, repository6.Entity{ - EntityType: repository6.EntityTypeDeploymentTemplateAppLevel, - EntityId: chart.Id, - }) - if err != nil { - return nil, err - } - envOverride.Chart.ResolvedGlobalOverride = resolvedTemplate - envOverride.VariableSnapshot = variableMap - } - } - - return envOverride, nil -} - -func (impl *AppServiceImpl) getResolvedTemplateWithSnapshot(deploymentTemplateHistoryId int, template string) (string, map[string]string, error) { - reference := repository6.HistoryReference{ - HistoryReferenceId: deploymentTemplateHistoryId, - HistoryReferenceType: repository6.HistoryReferenceTypeDeploymentTemplate, - } - variableSnapshot, err := impl.variableSnapshotHistoryService.GetVariableHistoryForReferences([]repository6.HistoryReference{reference}) - if err != nil { - return "", nil, err - } - - variableSnapshotMap := make(map[string]string) - - if _, ok := variableSnapshot[reference]; !ok { - return template, variableSnapshotMap, nil - } - - err = json.Unmarshal(variableSnapshot[reference].VariableSnapshot, &variableSnapshotMap) - if err != nil { - return "", nil, err - } - - if len(variableSnapshotMap) == 0 { - return template, variableSnapshotMap, nil - } - scopedVariableData := parsers.GetScopedVarData(variableSnapshotMap) - request := parsers.VariableParserRequest{Template: template, TemplateType: parsers.JsonVariableTemplate, Variables: scopedVariableData} - parserResponse := impl.variableTemplateParser.ParseTemplate(request) - err = parserResponse.Error - if err != nil { - return "", nil, err - } - resolvedTemplate := parserResponse.ResolvedTemplate - return resolvedTemplate, variableSnapshotMap, nil -} - -func (impl *AppServiceImpl) extractVariablesAndResolveTemplate(scope resourceQualifiers.Scope, template string, entity repository6.Entity) (string, map[string]string, error) { - - entityToVariables, err := impl.variableEntityMappingService.GetAllMappingsForEntities([]repository6.Entity{entity}) - if err != nil { - return "", nil, err - } - - variableMap := make(map[string]string) - if vars, ok := entityToVariables[entity]; !ok || len(vars) == 0 { - return template, variableMap, nil - } - scopedVariables, err := impl.scopedVariableService.GetScopedVariables(scope, entityToVariables[entity], true) - if err != nil { - return "", nil, err - } - - parserRequest := parsers.VariableParserRequest{Template: template, Variables: scopedVariables, TemplateType: parsers.JsonVariableTemplate} - parserResponse := impl.variableTemplateParser.ParseTemplate(parserRequest) - err = parserResponse.Error - if err != nil { - return "", nil, err - } - - for _, variable := range scopedVariables { - variableMap[variable.VariableName] = variable.VariableValue.StringValue() - } - - resolvedTemplate := parserResponse.ResolvedTemplate - return resolvedTemplate, variableMap, nil -} - -func (impl *AppServiceImpl) GetValuesOverrideForTrigger(overrideRequest *bean.ValuesOverrideRequest, triggeredAt time.Time, ctx context.Context) (*ValuesOverrideResponse, error) { - if overrideRequest.DeploymentType == models.DEPLOYMENTTYPE_UNKNOWN { - overrideRequest.DeploymentType = models.DEPLOYMENTTYPE_DEPLOY - } - if len(overrideRequest.DeploymentWithConfig) == 0 { - overrideRequest.DeploymentWithConfig = bean.DEPLOYMENT_CONFIG_TYPE_LAST_SAVED - } - valuesOverrideResponse := &ValuesOverrideResponse{} - - pipeline, err := impl.pipelineRepository.FindById(overrideRequest.PipelineId) - if err != nil { - impl.logger.Errorw("error in fetching pipeline by pipeline id", "err", err, "pipeline-id-", overrideRequest.PipelineId) - return valuesOverrideResponse, err - } - - _, span := otel.Tracer("orchestrator").Start(ctx, "ciArtifactRepository.Get") - artifact, err := impl.ciArtifactRepository.Get(overrideRequest.CiArtifactId) - span.End() - if err != nil { - return valuesOverrideResponse, err - } - overrideRequest.ImageTag = artifact.Image - - envOverride, err := impl.GetEnvOverrideByTriggerType(overrideRequest, triggeredAt, ctx) - if err != nil { - impl.logger.Errorw("error in getting env override by trigger type", "err", err) - return valuesOverrideResponse, err - } - appMetrics, err := impl.GetAppMetricsByTriggerType(overrideRequest, ctx) - if err != nil { - impl.logger.Errorw("error in getting app metrics by trigger type", "err", err) - return valuesOverrideResponse, err - } - strategy, err := impl.GetDeploymentStrategyByTriggerType(overrideRequest, ctx) - if err != nil { - impl.logger.Errorw("error in getting strategy by trigger type", "err", err) - return valuesOverrideResponse, err - } - _, span = otel.Tracer("orchestrator").Start(ctx, "getDbMigrationOverride") - //FIXME: how to determine rollback - //we can't depend on ciArtifact ID because CI pipeline can be manually triggered in any order regardless of sourcecode status - dbMigrationOverride, err := impl.getDbMigrationOverride(overrideRequest, artifact, false) - span.End() - if err != nil { - impl.logger.Errorw("error in fetching db migration config", "req", overrideRequest, "err", err) - return valuesOverrideResponse, err - } - chartVersion := envOverride.Chart.ChartVersion - _, span = otel.Tracer("orchestrator").Start(ctx, "getConfigMapAndSecretJsonV2") - configMapJson, err := impl.getConfigMapAndSecretJsonV2(overrideRequest.AppId, envOverride.TargetEnvironment, overrideRequest.PipelineId, chartVersion, overrideRequest.DeploymentWithConfig, overrideRequest.WfrIdForDeploymentWithSpecificTrigger) - span.End() - if err != nil { - impl.logger.Errorw("error in fetching config map n secret ", "err", err) - configMapJson = nil - } - _, span = otel.Tracer("orchestrator").Start(ctx, "appCrudOperationService.GetLabelsByAppIdForDeployment") - appLabelJsonByte, err := impl.appCrudOperationService.GetLabelsByAppIdForDeployment(overrideRequest.AppId) - span.End() - if err != nil { - impl.logger.Errorw("error in fetching app labels for gitOps commit", "err", err) - appLabelJsonByte = nil - } - _, span = otel.Tracer("orchestrator").Start(ctx, "mergeAndSave") - pipelineOverride, err := impl.savePipelineOverride(overrideRequest, envOverride.Id, triggeredAt) - if err != nil { - return valuesOverrideResponse, err - } - //TODO: check status and apply lock - releaseOverrideJson, err := impl.getReleaseOverride(envOverride, overrideRequest, artifact, pipelineOverride, strategy, &appMetrics) - if err != nil { - return valuesOverrideResponse, err - } - mergedValues, err := impl.mergeOverrideValues(envOverride, dbMigrationOverride, releaseOverrideJson, configMapJson, appLabelJsonByte, strategy) - - appName := fmt.Sprintf("%s-%s", overrideRequest.AppName, envOverride.Environment.Name) - mergedValues = impl.autoscalingCheckBeforeTrigger(ctx, appName, envOverride.Namespace, mergedValues, overrideRequest) - - _, span = otel.Tracer("orchestrator").Start(ctx, "dockerRegistryIpsConfigService.HandleImagePullSecretOnApplicationDeployment") - // handle image pull secret if access given - mergedValues, err = impl.dockerRegistryIpsConfigService.HandleImagePullSecretOnApplicationDeployment(envOverride.Environment, pipeline.CiPipelineId, mergedValues) - span.End() - if err != nil { - return valuesOverrideResponse, err - } - pipelineOverride.PipelineMergedValues = string(mergedValues) - err = impl.pipelineOverrideRepository.Update(pipelineOverride) - if err != nil { - return valuesOverrideResponse, err - } - //valuesOverrideResponse. - valuesOverrideResponse.MergedValues = string(mergedValues) - valuesOverrideResponse.EnvOverride = envOverride - valuesOverrideResponse.PipelineOverride = pipelineOverride - valuesOverrideResponse.AppMetrics = appMetrics - valuesOverrideResponse.PipelineStrategy = strategy - valuesOverrideResponse.ReleaseOverrideJSON = releaseOverrideJson - valuesOverrideResponse.Artifact = artifact - valuesOverrideResponse.Pipeline = pipeline - return valuesOverrideResponse, err -} - -func (impl *AppServiceImpl) BuildManifestForTrigger(overrideRequest *bean.ValuesOverrideRequest, triggeredAt time.Time, ctx context.Context) (valuesOverrideResponse *ValuesOverrideResponse, builtChartPath string, err error) { - - valuesOverrideResponse = &ValuesOverrideResponse{} - valuesOverrideResponse, err = impl.GetValuesOverrideForTrigger(overrideRequest, triggeredAt, ctx) - if err != nil { - impl.logger.Errorw("error in fetching values for trigger", "err", err) - return valuesOverrideResponse, "", err - } - builtChartPath, err = impl.BuildChartAndGetPath(overrideRequest.AppName, valuesOverrideResponse.EnvOverride, ctx) - if err != nil { - impl.logger.Errorw("error in parsing reference chart", "err", err) - return valuesOverrideResponse, "", err - } - return valuesOverrideResponse, builtChartPath, err -} - func (impl *AppServiceImpl) GetDeployedManifestByPipelineIdAndCDWorkflowId(appId int, envId int, cdWorkflowId int, ctx context.Context) ([]byte, error) { manifestByteArray := make([]byte, 0) @@ -1713,242 +1056,27 @@ func (impl *AppServiceImpl) CreateGitopsRepo(app *app.App, userId int32) (gitops return gitOpsRepoName, chartGitAttr, nil } -func (impl *AppServiceImpl) DeployArgocdApp(overrideRequest *bean.ValuesOverrideRequest, valuesOverrideResponse *ValuesOverrideResponse, ctx context.Context) error { - - impl.logger.Debugw("new pipeline found", "pipeline", valuesOverrideResponse.Pipeline) - _, span := otel.Tracer("orchestrator").Start(ctx, "createArgoApplicationIfRequired") - name, err := impl.createArgoApplicationIfRequired(overrideRequest.AppId, valuesOverrideResponse.EnvOverride, valuesOverrideResponse.Pipeline, overrideRequest.UserId) +func (impl *AppServiceImpl) saveTimeline(overrideRequest *bean.ValuesOverrideRequest, status string, statusDetail string, ctx context.Context) { + // creating cd pipeline status timeline for git commit + timeline := &pipelineConfig.PipelineStatusTimeline{ + CdWorkflowRunnerId: overrideRequest.WfrId, + Status: status, + StatusDetail: statusDetail, + StatusTime: time.Now(), + AuditLog: sql.AuditLog{ + CreatedBy: overrideRequest.UserId, + CreatedOn: time.Now(), + UpdatedBy: overrideRequest.UserId, + UpdatedOn: time.Now(), + }, + } + _, span := otel.Tracer("orchestrator").Start(ctx, "cdPipelineStatusTimelineRepo.SaveTimeline") + timelineErr := impl.pipelineStatusTimelineService.SaveTimeline(timeline, nil, false) span.End() - if err != nil { - impl.logger.Errorw("acd application create error on cd trigger", "err", err, "req", overrideRequest) - return err + if timelineErr != nil { + impl.logger.Errorw("error in creating timeline status for git commit", "err", timelineErr, "timeline", timeline) } - impl.logger.Debugw("argocd application created", "name", name) - - _, span = otel.Tracer("orchestrator").Start(ctx, "updateArgoPipeline") - updateAppInArgocd, err := impl.updateArgoPipeline(overrideRequest.AppId, valuesOverrideResponse.Pipeline.Name, valuesOverrideResponse.EnvOverride, ctx) - span.End() - if err != nil { - impl.logger.Errorw("error in updating argocd app ", "err", err) - return err - } - if updateAppInArgocd { - impl.logger.Debug("argo-cd successfully updated") - } else { - impl.logger.Debug("argo-cd failed to update, ignoring it") - } - return nil -} - -func (impl *AppServiceImpl) DeployApp(overrideRequest *bean.ValuesOverrideRequest, valuesOverrideResponse *ValuesOverrideResponse, triggeredAt time.Time, ctx context.Context) error { - - if IsAcdApp(overrideRequest.DeploymentAppType) { - _, span := otel.Tracer("orchestrator").Start(ctx, "DeployArgocdApp") - err := impl.DeployArgocdApp(overrideRequest, valuesOverrideResponse, ctx) - span.End() - if err != nil { - impl.logger.Errorw("error in deploying app on argocd", "err", err) - return err - } - } else if IsHelmApp(overrideRequest.DeploymentAppType) { - _, span := otel.Tracer("orchestrator").Start(ctx, "createHelmAppForCdPipeline") - _, err := impl.createHelmAppForCdPipeline(overrideRequest, valuesOverrideResponse, triggeredAt, ctx) - span.End() - if err != nil { - impl.logger.Errorw("error in creating or updating helm application for cd pipeline", "err", err) - return err - } - } - return nil -} - -func (impl *AppServiceImpl) ValidateTriggerEvent(triggerEvent bean.TriggerEvent) (bool, error) { - - switch triggerEvent.DeploymentAppType { - case bean2.ArgoCd: - if !triggerEvent.PerformChartPush { - return false, errors2.New("For deployment type ArgoCd, PerformChartPush flag expected value = true, got false") - } - case bean2.Helm: - return true, nil - case bean2.GitOpsWithoutDeployment: - if triggerEvent.PerformDeploymentOnCluster { - return false, errors2.New("For deployment type GitOpsWithoutDeployment, PerformDeploymentOnCluster flag expected value = false, got value = true") - } - case bean2.ManifestDownload: - if triggerEvent.PerformChartPush { - return false, error2.New("For deployment type ManifestDownload, PerformChartPush flag expected value = false, got true") - } - if triggerEvent.PerformDeploymentOnCluster { - return false, error2.New("For deployment type ManifestDownload, PerformDeploymentOnCluster flag expected value = false, got true") - } - } - return true, nil - -} - -// write integration/unit test for each function -func (impl *AppServiceImpl) TriggerPipeline(overrideRequest *bean.ValuesOverrideRequest, triggerEvent bean.TriggerEvent, ctx context.Context) (releaseNo int, manifest []byte, err error) { - - isRequestValid, err := impl.ValidateTriggerEvent(triggerEvent) - if !isRequestValid { - return releaseNo, manifest, err - } - - valuesOverrideResponse, builtChartPath, err := impl.BuildManifestForTrigger(overrideRequest, triggerEvent.TriggerdAt, ctx) - if err != nil { - return releaseNo, manifest, err - } - - _, span := otel.Tracer("orchestrator").Start(ctx, "CreateHistoriesForDeploymentTrigger") - err = impl.CreateHistoriesForDeploymentTrigger(valuesOverrideResponse.Pipeline, valuesOverrideResponse.PipelineStrategy, valuesOverrideResponse.EnvOverride, triggerEvent.TriggerdAt, triggerEvent.TriggeredBy) - span.End() - - if triggerEvent.PerformChartPush { - manifestPushTemplate, err := impl.BuildManifestPushTemplate(overrideRequest, valuesOverrideResponse, builtChartPath, &manifest) - if err != nil { - impl.logger.Errorw("error in building manifest push template", "err", err) - return releaseNo, manifest, err - } - manifestPushService := impl.GetManifestPushService(triggerEvent) - manifestPushResponse := manifestPushService.PushChart(manifestPushTemplate, ctx) - if manifestPushResponse.Error != nil { - impl.logger.Errorw("Error in pushing manifest to git", "err", err, "git_repo_url", manifestPushTemplate.RepoUrl) - return releaseNo, manifest, err - } - pipelineOverrideUpdateRequest := &chartConfig.PipelineOverride{ - Id: valuesOverrideResponse.PipelineOverride.Id, - GitHash: manifestPushResponse.CommitHash, - CommitTime: manifestPushResponse.CommitTime, - EnvConfigOverrideId: valuesOverrideResponse.EnvOverride.Id, - PipelineOverrideValues: valuesOverrideResponse.ReleaseOverrideJSON, - PipelineId: overrideRequest.PipelineId, - CiArtifactId: overrideRequest.CiArtifactId, - PipelineMergedValues: valuesOverrideResponse.MergedValues, - AuditLog: sql.AuditLog{UpdatedOn: triggerEvent.TriggerdAt, UpdatedBy: overrideRequest.UserId}, - } - _, span := otel.Tracer("orchestrator").Start(ctx, "pipelineOverrideRepository.Update") - err = impl.pipelineOverrideRepository.Update(pipelineOverrideUpdateRequest) - span.End() - } - - if triggerEvent.PerformDeploymentOnCluster { - err = impl.DeployApp(overrideRequest, valuesOverrideResponse, triggerEvent.TriggerdAt, ctx) - if err != nil { - impl.logger.Errorw("error in deploying app", "err", err) - return releaseNo, manifest, err - } - } - - go impl.WriteCDTriggerEvent(overrideRequest, valuesOverrideResponse.Artifact, valuesOverrideResponse.PipelineOverride.PipelineReleaseCounter, valuesOverrideResponse.PipelineOverride.Id) - - _, spann := otel.Tracer("orchestrator").Start(ctx, "MarkImageScanDeployed") - _ = impl.MarkImageScanDeployed(overrideRequest.AppId, valuesOverrideResponse.EnvOverride.TargetEnvironment, valuesOverrideResponse.Artifact.ImageDigest, overrideRequest.ClusterId, valuesOverrideResponse.Artifact.ScanEnabled) - spann.End() - - middleware.CdTriggerCounter.WithLabelValues(overrideRequest.AppName, overrideRequest.EnvName).Inc() - - return valuesOverrideResponse.PipelineOverride.PipelineReleaseCounter, manifest, nil - -} - -func (impl *AppServiceImpl) GetTriggerEvent(deploymentAppType string, triggeredAt time.Time, deployedBy int32) bean.TriggerEvent { - // trigger event will decide whether to perform GitOps or deployment for a particular deployment app type - triggerEvent := bean.TriggerEvent{ - TriggeredBy: deployedBy, - TriggerdAt: triggeredAt, - } - switch deploymentAppType { - case bean2.ArgoCd: - triggerEvent.PerformChartPush = true - triggerEvent.PerformDeploymentOnCluster = true - triggerEvent.GetManifestInResponse = false - triggerEvent.DeploymentAppType = bean2.ArgoCd - triggerEvent.ManifestStorageType = bean2.ManifestStorageGit - case bean2.Helm: - triggerEvent.PerformChartPush = false - triggerEvent.PerformDeploymentOnCluster = true - triggerEvent.GetManifestInResponse = false - triggerEvent.DeploymentAppType = bean2.Helm - } - return triggerEvent -} - -func (impl *AppServiceImpl) TriggerRelease(overrideRequest *bean.ValuesOverrideRequest, ctx context.Context, triggeredAt time.Time, deployedBy int32) (releaseNo int, manifest []byte, err error) { - triggerEvent := impl.GetTriggerEvent(overrideRequest.DeploymentAppType, triggeredAt, deployedBy) - releaseNo, manifest, err = impl.TriggerPipeline(overrideRequest, triggerEvent, ctx) - if err != nil { - return 0, manifest, err - } - return releaseNo, manifest, nil -} - -func (impl *AppServiceImpl) GetManifestPushService(triggerEvent bean.TriggerEvent) ManifestPushService { - var manifestPushService ManifestPushService - if triggerEvent.ManifestStorageType == bean2.ManifestStorageGit { - manifestPushService = impl.GitOpsManifestPushService - } - return manifestPushService -} - -func (impl *AppServiceImpl) BuildManifestPushTemplate(overrideRequest *bean.ValuesOverrideRequest, valuesOverrideResponse *ValuesOverrideResponse, builtChartPath string, manifest *[]byte) (*bean3.ManifestPushTemplate, error) { - - manifestPushTemplate := &bean3.ManifestPushTemplate{ - WorkflowRunnerId: overrideRequest.WfrId, - AppId: overrideRequest.AppId, - ChartRefId: valuesOverrideResponse.EnvOverride.Chart.ChartRefId, - EnvironmentId: valuesOverrideResponse.EnvOverride.Environment.Id, - UserId: overrideRequest.UserId, - PipelineOverrideId: valuesOverrideResponse.PipelineOverride.Id, - AppName: overrideRequest.AppName, - TargetEnvironmentName: valuesOverrideResponse.EnvOverride.TargetEnvironment, - BuiltChartPath: builtChartPath, - BuiltChartBytes: manifest, - MergedValues: valuesOverrideResponse.MergedValues, - } - - manifestPushConfig, err := impl.manifestPushConfigRepository.GetManifestPushConfigByAppIdAndEnvId(overrideRequest.AppId, overrideRequest.EnvId) - if err != nil && err != pg.ErrNoRows { - impl.logger.Errorw("error in fetching manifest push config from db", "err", err) - return manifestPushTemplate, err - } - - if manifestPushConfig != nil { - if manifestPushConfig.StorageType == bean2.ManifestStorageGit { - // need to implement for git repo push - // currently manifest push config doesn't have git push config. Gitops config is derived from charts, chart_env_config_override and chart_ref table - } - } else { - manifestPushTemplate.ChartReferenceTemplate = valuesOverrideResponse.EnvOverride.Chart.ReferenceTemplate - manifestPushTemplate.ChartName = valuesOverrideResponse.EnvOverride.Chart.ChartName - manifestPushTemplate.ChartVersion = valuesOverrideResponse.EnvOverride.Chart.ChartVersion - manifestPushTemplate.ChartLocation = valuesOverrideResponse.EnvOverride.Chart.ChartLocation - manifestPushTemplate.RepoUrl = valuesOverrideResponse.EnvOverride.Chart.GitRepoUrl - } - return manifestPushTemplate, err -} - -func (impl *AppServiceImpl) saveTimeline(overrideRequest *bean.ValuesOverrideRequest, status string, statusDetail string, ctx context.Context) { - // creating cd pipeline status timeline for git commit - timeline := &pipelineConfig.PipelineStatusTimeline{ - CdWorkflowRunnerId: overrideRequest.WfrId, - Status: status, - StatusDetail: statusDetail, - StatusTime: time.Now(), - AuditLog: sql.AuditLog{ - CreatedBy: overrideRequest.UserId, - CreatedOn: time.Now(), - UpdatedBy: overrideRequest.UserId, - UpdatedOn: time.Now(), - }, - } - _, span := otel.Tracer("orchestrator").Start(ctx, "cdPipelineStatusTimelineRepo.SaveTimeline") - timelineErr := impl.pipelineStatusTimelineService.SaveTimeline(timeline, nil, false) - span.End() - if timelineErr != nil { - impl.logger.Errorw("error in creating timeline status for git commit", "err", timelineErr, "timeline", timeline) - } -} +} func (impl *AppServiceImpl) autoHealChartLocationInChart(ctx context.Context, envOverride *chartConfig.EnvConfigOverride) error { chartId := envOverride.Chart.Id @@ -1993,59 +1121,6 @@ func (impl *AppServiceImpl) autoHealChartLocationInChart(ctx context.Context, en return nil } -func (impl *AppServiceImpl) MarkImageScanDeployed(appId int, envId int, imageDigest string, clusterId int, isScanEnabled bool) error { - impl.logger.Debugw("mark image scan deployed for normal app, from cd auto or manual trigger", "imageDigest", imageDigest) - executionHistory, err := impl.imageScanHistoryRepository.FindByImageDigest(imageDigest) - if err != nil && err != pg.ErrNoRows { - impl.logger.Errorw("error in fetching execution history", "err", err) - return err - } - if executionHistory == nil || executionHistory.Id == 0 { - impl.logger.Errorw("no execution history found for digest", "digest", imageDigest) - return fmt.Errorf("no execution history found for digest - %s", imageDigest) - } - impl.logger.Debugw("mark image scan deployed for normal app, from cd auto or manual trigger", "executionHistory", executionHistory) - var ids []int - ids = append(ids, executionHistory.Id) - - ot, err := impl.imageScanDeployInfoRepository.FindByTypeMetaAndTypeId(appId, security.ScanObjectType_APP) //todo insure this touple unique in db - if err != nil && err != pg.ErrNoRows { - return err - } else if err == pg.ErrNoRows && isScanEnabled { - imageScanDeployInfo := &security.ImageScanDeployInfo{ - ImageScanExecutionHistoryId: ids, - ScanObjectMetaId: appId, - ObjectType: security.ScanObjectType_APP, - EnvId: envId, - ClusterId: clusterId, - AuditLog: sql.AuditLog{ - CreatedOn: time.Now(), - CreatedBy: 1, - UpdatedOn: time.Now(), - UpdatedBy: 1, - }, - } - impl.logger.Debugw("mark image scan deployed for normal app, from cd auto or manual trigger", "imageScanDeployInfo", imageScanDeployInfo) - err = impl.imageScanDeployInfoRepository.Save(imageScanDeployInfo) - if err != nil { - impl.logger.Errorw("error in creating deploy info", "err", err) - } - } else { - // Updating Execution history for Latest Deployment to fetch out security Vulnerabilities for latest deployed info - if isScanEnabled { - ot.ImageScanExecutionHistoryId = ids - } else { - arr := []int{-1} - ot.ImageScanExecutionHistoryId = arr - } - err = impl.imageScanDeployInfoRepository.Update(ot) - if err != nil { - impl.logger.Errorw("error in updating deploy info for latest deployed image", "err", err) - } - } - return err -} - // FIXME tmp workaround func (impl *AppServiceImpl) GetCmSecretNew(appId int, envId int, isJob bool) (*bean.ConfigMapJson, *bean.ConfigSecretJson, error) { var configMapJson string @@ -2170,99 +1245,6 @@ func (impl *AppServiceImpl) GetConfigMapAndSecretJson(appId int, envId int, pipe return merged, nil } -func (impl *AppServiceImpl) getConfigMapAndSecretJsonV2(appId int, envId int, pipelineId int, chartVersion string, deploymentWithConfig bean.DeploymentConfigurationType, wfrIdForDeploymentWithSpecificTrigger int) ([]byte, error) { - - var configMapJson string - var secretDataJson string - var configMapJsonApp string - var secretDataJsonApp string - var configMapJsonEnv string - var secretDataJsonEnv string - var err error - //var configMapJsonPipeline string - //var secretDataJsonPipeline string - - merged := []byte("{}") - if deploymentWithConfig == bean.DEPLOYMENT_CONFIG_TYPE_LAST_SAVED { - configMapA, err := impl.configMapRepository.GetByAppIdAppLevel(appId) - if err != nil && pg.ErrNoRows != err { - return []byte("{}"), err - } - if configMapA != nil && configMapA.Id > 0 { - configMapJsonApp = configMapA.ConfigMapData - secretDataJsonApp = configMapA.SecretData - } - configMapE, err := impl.configMapRepository.GetByAppIdAndEnvIdEnvLevel(appId, envId) - if err != nil && pg.ErrNoRows != err { - return []byte("{}"), err - } - if configMapE != nil && configMapE.Id > 0 { - configMapJsonEnv = configMapE.ConfigMapData - secretDataJsonEnv = configMapE.SecretData - } - } else if deploymentWithConfig == bean.DEPLOYMENT_CONFIG_TYPE_SPECIFIC_TRIGGER { - //fetching history and setting envLevelConfig and not appLevelConfig because history already contains merged appLevel and envLevel configs - configMapHistory, err := impl.configMapHistoryRepository.GetHistoryByPipelineIdAndWfrId(pipelineId, wfrIdForDeploymentWithSpecificTrigger, repository3.CONFIGMAP_TYPE) - if err != nil { - impl.logger.Errorw("error in getting config map history config by pipelineId and wfrId ", "err", err, "pipelineId", pipelineId, "wfrid", wfrIdForDeploymentWithSpecificTrigger) - return []byte("{}"), err - } - configMapJsonEnv = configMapHistory.Data - secretHistory, err := impl.configMapHistoryRepository.GetHistoryByPipelineIdAndWfrId(pipelineId, wfrIdForDeploymentWithSpecificTrigger, repository3.SECRET_TYPE) - if err != nil { - impl.logger.Errorw("error in getting config map history config by pipelineId and wfrId ", "err", err, "pipelineId", pipelineId, "wfrid", wfrIdForDeploymentWithSpecificTrigger) - return []byte("{}"), err - } - secretDataJsonEnv = secretHistory.Data - } - configMapJson, err = impl.mergeUtil.ConfigMapMerge(configMapJsonApp, configMapJsonEnv) - if err != nil { - return []byte("{}"), err - } - chartMajorVersion, chartMinorVersion, err := util2.ExtractChartVersion(chartVersion) - if err != nil { - impl.logger.Errorw("chart version parsing", "err", err) - return []byte("{}"), err - } - secretDataJson, err = impl.mergeUtil.ConfigSecretMerge(secretDataJsonApp, secretDataJsonEnv, chartMajorVersion, chartMinorVersion, false) - if err != nil { - return []byte("{}"), err - } - configResponseR := bean.ConfigMapRootJson{} - configResponse := bean.ConfigMapJson{} - if configMapJson != "" { - err = json.Unmarshal([]byte(configMapJson), &configResponse) - if err != nil { - return []byte("{}"), err - } - } - configResponseR.ConfigMapJson = configResponse - secretResponseR := bean.ConfigSecretRootJson{} - secretResponse := bean.ConfigSecretJson{} - if configMapJson != "" { - err = json.Unmarshal([]byte(secretDataJson), &secretResponse) - if err != nil { - return []byte("{}"), err - } - } - secretResponseR.ConfigSecretJson = secretResponse - - configMapByte, err := json.Marshal(configResponseR) - if err != nil { - return []byte("{}"), err - } - secretDataByte, err := json.Marshal(secretResponseR) - if err != nil { - return []byte("{}"), err - } - - merged, err = impl.mergeUtil.JsonPatch(configMapByte, secretDataByte) - if err != nil { - return []byte("{}"), err - } - return merged, nil -} - func (impl *AppServiceImpl) synchCD(pipeline *pipelineConfig.Pipeline, ctx context.Context, overrideRequest *bean.ValuesOverrideRequest, envOverride *chartConfig.EnvConfigOverride) { req := new(application2.ApplicationSyncRequest) @@ -2279,44 +1261,6 @@ func (impl *AppServiceImpl) synchCD(pipeline *pipelineConfig.Pipeline, ctx conte } } -func (impl *AppServiceImpl) WriteCDTriggerEvent(overrideRequest *bean.ValuesOverrideRequest, artifact *repository.CiArtifact, releaseId, pipelineOverrideId int) { - - event := impl.eventFactory.Build(util.Trigger, &overrideRequest.PipelineId, overrideRequest.AppId, &overrideRequest.EnvId, util.CD) - impl.logger.Debugw("event WriteCDTriggerEvent", "event", event) - event = impl.eventFactory.BuildExtraCDData(event, nil, pipelineOverrideId, bean.CD_WORKFLOW_TYPE_DEPLOY) - _, evtErr := impl.eventClient.WriteNotificationEvent(event) - if evtErr != nil { - impl.logger.Errorw("CD trigger event not sent", "error", evtErr) - } - deploymentEvent := DeploymentEvent{ - ApplicationId: overrideRequest.AppId, - EnvironmentId: overrideRequest.EnvId, //check for production Environment - ReleaseId: releaseId, - PipelineOverrideId: pipelineOverrideId, - TriggerTime: time.Now(), - CiArtifactId: overrideRequest.CiArtifactId, - } - ciPipelineMaterials, err := impl.ciPipelineMaterialRepository.GetByPipelineId(artifact.PipelineId) - if err != nil { - impl.logger.Errorw("error in ") - } - materialInfoMap, mErr := artifact.ParseMaterialInfo() - if mErr != nil { - impl.logger.Errorw("material info map error", mErr) - return - } - for _, ciPipelineMaterial := range ciPipelineMaterials { - hash := materialInfoMap[ciPipelineMaterial.GitMaterial.Url] - pipelineMaterialInfo := &PipelineMaterialInfo{PipelineMaterialId: ciPipelineMaterial.Id, CommitHash: hash} - deploymentEvent.PipelineMaterials = append(deploymentEvent.PipelineMaterials, pipelineMaterialInfo) - } - impl.logger.Infow("triggering deployment event", "event", deploymentEvent) - err = impl.eventClient.WriteNatsEvent(pubsub.CD_SUCCESS, deploymentEvent) - if err != nil { - impl.logger.Errorw("error in writing cd trigger event", "err", err) - } -} - type DeploymentEvent struct { ApplicationId int EnvironmentId int @@ -2365,358 +1309,6 @@ type ReleaseAttributes struct { AppMetrics *bool } -func (impl *AppServiceImpl) getReleaseOverride(envOverride *chartConfig.EnvConfigOverride, overrideRequest *bean.ValuesOverrideRequest, artifact *repository.CiArtifact, pipelineOverride *chartConfig.PipelineOverride, strategy *chartConfig.PipelineStrategy, appMetrics *bool) (releaseOverride string, err error) { - - artifactImage := artifact.Image - imageTag := strings.Split(artifactImage, ":") - - imageTagLen := len(imageTag) - - imageName := "" - - for i := 0; i < imageTagLen-1; i++ { - if i != imageTagLen-2 { - imageName = imageName + imageTag[i] + ":" - } else { - imageName = imageName + imageTag[i] - } - } - - appId := strconv.Itoa(overrideRequest.AppId) - envId := strconv.Itoa(overrideRequest.EnvId) - - deploymentStrategy := "" - if strategy != nil { - deploymentStrategy = string(strategy.Strategy) - } - releaseAttribute := ReleaseAttributes{ - Name: imageName, - Tag: imageTag[imageTagLen-1], - PipelineName: overrideRequest.PipelineName, - ReleaseVersion: strconv.Itoa(pipelineOverride.PipelineReleaseCounter), - DeploymentType: deploymentStrategy, - App: appId, - Env: envId, - AppMetrics: appMetrics, - } - override, err := util2.Tprintf(envOverride.Chart.ImageDescriptorTemplate, releaseAttribute) - if err != nil { - return "", &ApiError{InternalMessage: "unable to render ImageDescriptorTemplate"} - } - if overrideRequest.AdditionalOverride != nil { - userOverride, err := overrideRequest.AdditionalOverride.MarshalJSON() - if err != nil { - return "", err - } - data, err := impl.mergeUtil.JsonPatch(userOverride, []byte(override)) - if err != nil { - return "", err - } - override = string(data) - } - return override, nil -} - -func (impl *AppServiceImpl) mergeOverrideValues(envOverride *chartConfig.EnvConfigOverride, - dbMigrationOverride []byte, - releaseOverrideJson string, - configMapJson []byte, - appLabelJsonByte []byte, - strategy *chartConfig.PipelineStrategy, -) (mergedValues []byte, err error) { - - //merge three values on the fly - //ordering is important here - //global < environment < db< release - var merged []byte - if !envOverride.IsOverride { - merged, err = impl.mergeUtil.JsonPatch([]byte("{}"), []byte(envOverride.Chart.ResolvedGlobalOverride)) - if err != nil { - return nil, err - } - } else { - merged, err = impl.mergeUtil.JsonPatch([]byte("{}"), []byte(envOverride.ResolvedEnvOverrideValues)) - if err != nil { - return nil, err - } - } - if strategy != nil && len(strategy.Config) > 0 { - merged, err = impl.mergeUtil.JsonPatch(merged, []byte(strategy.Config)) - if err != nil { - return nil, err - } - } - merged, err = impl.mergeUtil.JsonPatch(merged, dbMigrationOverride) - if err != nil { - return nil, err - } - merged, err = impl.mergeUtil.JsonPatch(merged, []byte(releaseOverrideJson)) - if err != nil { - return nil, err - } - if configMapJson != nil { - merged, err = impl.mergeUtil.JsonPatch(merged, configMapJson) - if err != nil { - return nil, err - } - } - if appLabelJsonByte != nil { - merged, err = impl.mergeUtil.JsonPatch(merged, appLabelJsonByte) - if err != nil { - return nil, err - } - } - return merged, nil -} - -func (impl *AppServiceImpl) mergeAndSave(envOverride *chartConfig.EnvConfigOverride, - overrideRequest *bean.ValuesOverrideRequest, - dbMigrationOverride []byte, - artifact *repository.CiArtifact, - pipeline *pipelineConfig.Pipeline, configMapJson, appLabelJsonByte []byte, strategy *chartConfig.PipelineStrategy, ctx context.Context, - triggeredAt time.Time, deployedBy int32, appMetrics *bool) (releaseId int, overrideId int, mergedValues string, err error) { - - //register release , obtain release id TODO: populate releaseId to template - override, err := impl.savePipelineOverride(overrideRequest, envOverride.Id, triggeredAt) - if err != nil { - return 0, 0, "", err - } - //TODO: check status and apply lock - overrideJson, err := impl.getReleaseOverride(envOverride, overrideRequest, artifact, override, strategy, appMetrics) - if err != nil { - return 0, 0, "", err - } - - //merge three values on the fly - //ordering is important here - //global < environment < db< release - var merged []byte - if !envOverride.IsOverride { - merged, err = impl.mergeUtil.JsonPatch([]byte("{}"), []byte(envOverride.Chart.GlobalOverride)) - if err != nil { - return 0, 0, "", err - } - } else { - merged, err = impl.mergeUtil.JsonPatch([]byte("{}"), []byte(envOverride.EnvOverrideValues)) - if err != nil { - return 0, 0, "", err - } - } - - //pipeline override here comes from pipeline strategy table - if strategy != nil && len(strategy.Config) > 0 { - merged, err = impl.mergeUtil.JsonPatch(merged, []byte(strategy.Config)) - if err != nil { - return 0, 0, "", err - } - } - merged, err = impl.mergeUtil.JsonPatch(merged, dbMigrationOverride) - if err != nil { - return 0, 0, "", err - } - merged, err = impl.mergeUtil.JsonPatch(merged, []byte(overrideJson)) - if err != nil { - return 0, 0, "", err - } - - if configMapJson != nil { - merged, err = impl.mergeUtil.JsonPatch(merged, configMapJson) - if err != nil { - return 0, 0, "", err - } - } - - if appLabelJsonByte != nil { - merged, err = impl.mergeUtil.JsonPatch(merged, appLabelJsonByte) - if err != nil { - return 0, 0, "", err - } - } - - appName := fmt.Sprintf("%s-%s", pipeline.App.AppName, envOverride.Environment.Name) - merged = impl.autoscalingCheckBeforeTrigger(ctx, appName, envOverride.Namespace, merged, overrideRequest) - - _, span := otel.Tracer("orchestrator").Start(ctx, "dockerRegistryIpsConfigService.HandleImagePullSecretOnApplicationDeployment") - // handle image pull secret if access given - merged, err = impl.dockerRegistryIpsConfigService.HandleImagePullSecretOnApplicationDeployment(envOverride.Environment, pipeline.CiPipelineId, merged) - span.End() - if err != nil { - return 0, 0, "", err - } - - commitHash := "" - commitTime := time.Time{} - if IsAcdApp(pipeline.DeploymentAppType) { - chartRepoName := impl.chartTemplateService.GetGitOpsRepoNameFromUrl(envOverride.Chart.GitRepoUrl) - _, span = otel.Tracer("orchestrator").Start(ctx, "chartTemplateService.GetUserEmailIdAndNameForGitOpsCommit") - //getting username & emailId for commit author data - userEmailId, userName := impl.chartTemplateService.GetUserEmailIdAndNameForGitOpsCommit(overrideRequest.UserId) - span.End() - chartGitAttr := &ChartConfig{ - FileName: fmt.Sprintf("_%d-values.yaml", envOverride.TargetEnvironment), - FileContent: string(merged), - ChartName: envOverride.Chart.ChartName, - ChartLocation: envOverride.Chart.ChartLocation, - ChartRepoName: chartRepoName, - ReleaseMessage: fmt.Sprintf("release-%d-env-%d ", override.Id, envOverride.TargetEnvironment), - UserName: userName, - UserEmailId: userEmailId, - } - gitOpsConfigBitbucket, err := impl.gitOpsConfigRepository.GetGitOpsConfigByProvider(BITBUCKET_PROVIDER) - if err != nil { - if err == pg.ErrNoRows { - gitOpsConfigBitbucket.BitBucketWorkspaceId = "" - } else { - return 0, 0, "", err - } - } - gitOpsConfig := &bean.GitOpsConfigDto{BitBucketWorkspaceId: gitOpsConfigBitbucket.BitBucketWorkspaceId} - _, span = otel.Tracer("orchestrator").Start(ctx, "gitFactory.Client.CommitValues") - commitHash, commitTime, err = impl.gitFactory.Client.CommitValues(chartGitAttr, gitOpsConfig) - span.End() - if err != nil { - impl.logger.Errorw("error in git commit", "err", err) - return 0, 0, "", err - } - } - if commitTime.IsZero() { - commitTime = time.Now() - } - pipelineOverride := &chartConfig.PipelineOverride{ - Id: override.Id, - GitHash: commitHash, - CommitTime: commitTime, - EnvConfigOverrideId: envOverride.Id, - PipelineOverrideValues: overrideJson, - PipelineId: overrideRequest.PipelineId, - CiArtifactId: overrideRequest.CiArtifactId, - PipelineMergedValues: string(merged), - AuditLog: sql.AuditLog{UpdatedOn: triggeredAt, UpdatedBy: deployedBy}, - } - _, span = otel.Tracer("orchestrator").Start(ctx, "pipelineOverrideRepository.Update") - err = impl.pipelineOverrideRepository.Update(pipelineOverride) - span.End() - if err != nil { - return 0, 0, "", err - } - mergedValues = string(merged) - return override.PipelineReleaseCounter, override.Id, mergedValues, nil -} - -func (impl *AppServiceImpl) savePipelineOverride(overrideRequest *bean.ValuesOverrideRequest, envOverrideId int, triggeredAt time.Time) (override *chartConfig.PipelineOverride, err error) { - currentReleaseNo, err := impl.pipelineOverrideRepository.GetCurrentPipelineReleaseCounter(overrideRequest.PipelineId) - if err != nil { - return nil, err - } - po := &chartConfig.PipelineOverride{ - EnvConfigOverrideId: envOverrideId, - Status: models.CHARTSTATUS_NEW, - PipelineId: overrideRequest.PipelineId, - CiArtifactId: overrideRequest.CiArtifactId, - PipelineReleaseCounter: currentReleaseNo + 1, - CdWorkflowId: overrideRequest.CdWorkflowId, - AuditLog: sql.AuditLog{CreatedBy: overrideRequest.UserId, CreatedOn: triggeredAt, UpdatedOn: triggeredAt, UpdatedBy: overrideRequest.UserId}, - DeploymentType: overrideRequest.DeploymentType, - } - - err = impl.pipelineOverrideRepository.Save(po) - if err != nil { - return nil, err - } - err = impl.checkAndFixDuplicateReleaseNo(po) - if err != nil { - impl.logger.Errorw("error in checking release no duplicacy", "pipeline", po, "err", err) - return nil, err - } - return po, nil -} - -func (impl *AppServiceImpl) checkAndFixDuplicateReleaseNo(override *chartConfig.PipelineOverride) error { - - uniqueVerified := false - retryCount := 0 - - for !uniqueVerified && retryCount < 5 { - retryCount = retryCount + 1 - overrides, err := impl.pipelineOverrideRepository.GetByPipelineIdAndReleaseNo(override.PipelineId, override.PipelineReleaseCounter) - if err != nil { - return err - } - if overrides[0].Id == override.Id { - uniqueVerified = true - } else { - //duplicate might be due to concurrency, lets fix it - currentReleaseNo, err := impl.pipelineOverrideRepository.GetCurrentPipelineReleaseCounter(override.PipelineId) - if err != nil { - return err - } - override.PipelineReleaseCounter = currentReleaseNo + 1 - err = impl.pipelineOverrideRepository.Save(override) - if err != nil { - return err - } - } - } - if !uniqueVerified { - return fmt.Errorf("duplicate verification retry count exide max overrideId: %d ,count: %d", override.Id, retryCount) - } - return nil -} - -func (impl *AppServiceImpl) updateArgoPipeline(appId int, pipelineName string, envOverride *chartConfig.EnvConfigOverride, ctx context.Context) (bool, error) { - //repo has been registered while helm create - if ctx == nil { - impl.logger.Errorw("err in syncing ACD, ctx is NULL", "pipelineName", pipelineName) - return false, nil - } - app, err := impl.appRepository.FindById(appId) - if err != nil { - impl.logger.Errorw("no app found ", "err", err) - return false, err - } - envModel, err := impl.envRepository.FindById(envOverride.TargetEnvironment) - if err != nil { - return false, err - } - argoAppName := fmt.Sprintf("%s-%s", app.AppName, envModel.Name) - impl.logger.Infow("received payload, updateArgoPipeline", "appId", appId, "pipelineName", pipelineName, "envId", envOverride.TargetEnvironment, "argoAppName", argoAppName, "context", ctx) - application, err := impl.acdClient.Get(ctx, &application2.ApplicationQuery{Name: &argoAppName}) - if err != nil { - impl.logger.Errorw("no argo app exists", "app", argoAppName, "pipeline", pipelineName) - return false, err - } - //if status, ok:=status.FromError(err);ok{ - appStatus, _ := status.FromError(err) - - if appStatus.Code() == codes.OK { - impl.logger.Debugw("argo app exists", "app", argoAppName, "pipeline", pipelineName) - if application.Spec.Source.Path != envOverride.Chart.ChartLocation || application.Spec.Source.TargetRevision != "master" { - patchReq := v1alpha1.Application{Spec: v1alpha1.ApplicationSpec{Source: v1alpha1.ApplicationSource{Path: envOverride.Chart.ChartLocation, RepoURL: envOverride.Chart.GitRepoUrl, TargetRevision: "master"}}} - reqbyte, err := json.Marshal(patchReq) - if err != nil { - impl.logger.Errorw("error in creating patch", "err", err) - } - reqString := string(reqbyte) - patchType := "merge" - _, err = impl.acdClient.Patch(ctx, &application2.ApplicationPatchRequest{Patch: &reqString, Name: &argoAppName, PatchType: &patchType}) - if err != nil { - impl.logger.Errorw("error in creating argo pipeline ", "name", pipelineName, "patch", string(reqbyte), "err", err) - return false, err - } - impl.logger.Debugw("pipeline update req ", "res", patchReq) - } else { - impl.logger.Debug("pipeline no need to update ") - } - return true, nil - } else if appStatus.Code() == codes.NotFound { - impl.logger.Errorw("argo app not found", "app", argoAppName, "pipeline", pipelineName) - return false, nil - } else { - impl.logger.Errorw("err in checking application on gocd", "err", err, "pipeline", pipelineName) - return false, err - } -} - func (impl *AppServiceImpl) UpdateInstalledAppVersionHistoryByACDObject(app *v1alpha1.Application, installedAppVersionHistoryId int, updateTimedOutStatus bool) error { installedAppVersionHistory, err := impl.installedAppVersionHistoryRepository.GetInstalledAppVersionHistory(installedAppVersionHistoryId) if err != nil { @@ -2784,443 +1376,6 @@ const nameOverride = "nameOverride" const enabled = "enabled" const replicaCount = "replicaCount" -func (impl *AppServiceImpl) getAutoScalingReplicaCount(templateMap map[string]interface{}, appName string) *util2.HpaResourceRequest { - hasOverride := false - if _, ok := templateMap[fullnameOverride]; ok { - appNameOverride := templateMap[fullnameOverride].(string) - if len(appNameOverride) > 0 { - appName = appNameOverride - hasOverride = true - } - } - if !hasOverride { - if _, ok := templateMap[nameOverride]; ok { - nameOverride := templateMap[nameOverride].(string) - if len(nameOverride) > 0 { - appName = fmt.Sprintf("%s-%s", appName, nameOverride) - } - } - } - hpaResourceRequest := &util2.HpaResourceRequest{} - hpaResourceRequest.Version = "" - hpaResourceRequest.Group = autoscaling.ServiceName - hpaResourceRequest.Kind = HorizontalPodAutoscaler - impl.logger.Infow("getAutoScalingReplicaCount", "hpaResourceRequest", hpaResourceRequest) - if _, ok := templateMap[kedaAutoscaling]; ok { - as := templateMap[kedaAutoscaling] - asd := as.(map[string]interface{}) - if _, ok := asd[enabled]; ok { - impl.logger.Infow("getAutoScalingReplicaCount", "hpaResourceRequest", hpaResourceRequest) - enable := asd[enabled].(bool) - if enable { - hpaResourceRequest.IsEnable = enable - hpaResourceRequest.ReqReplicaCount = templateMap[replicaCount].(float64) - hpaResourceRequest.ReqMaxReplicas = asd["maxReplicaCount"].(float64) - hpaResourceRequest.ReqMinReplicas = asd["minReplicaCount"].(float64) - hpaResourceRequest.ResourceName = fmt.Sprintf("%s-%s-%s", "keda-hpa", appName, "keda") - impl.logger.Infow("getAutoScalingReplicaCount", "hpaResourceRequest", hpaResourceRequest) - return hpaResourceRequest - } - } - } - - if _, ok := templateMap[autoscaling.ServiceName]; ok { - as := templateMap[autoscaling.ServiceName] - asd := as.(map[string]interface{}) - if _, ok := asd[enabled]; ok { - enable := asd[enabled].(bool) - if enable { - hpaResourceRequest.IsEnable = asd[enabled].(bool) - hpaResourceRequest.ReqReplicaCount = templateMap[replicaCount].(float64) - hpaResourceRequest.ReqMaxReplicas = asd["MaxReplicas"].(float64) - hpaResourceRequest.ReqMinReplicas = asd["MinReplicas"].(float64) - hpaResourceRequest.ResourceName = fmt.Sprintf("%s-%s", appName, "hpa") - return hpaResourceRequest - } - } - } - return hpaResourceRequest - -} - -func (impl *AppServiceImpl) autoscalingCheckBeforeTrigger(ctx context.Context, appName string, namespace string, merged []byte, overrideRequest *bean.ValuesOverrideRequest) []byte { - //pipeline := overrideRequest.Pipeline - var appId = overrideRequest.AppId - pipelineId := overrideRequest.PipelineId - var appDeploymentType = overrideRequest.DeploymentAppType - var clusterId = overrideRequest.ClusterId - deploymentType := overrideRequest.DeploymentType - templateMap := make(map[string]interface{}) - err := json.Unmarshal(merged, &templateMap) - if err != nil { - return merged - } - - hpaResourceRequest := impl.getAutoScalingReplicaCount(templateMap, appName) - impl.logger.Debugw("autoscalingCheckBeforeTrigger", "hpaResourceRequest", hpaResourceRequest) - if hpaResourceRequest.IsEnable { - resourceManifest := make(map[string]interface{}) - if IsAcdApp(appDeploymentType) { - query := &application2.ApplicationResourceRequest{ - Name: &appName, - Version: &hpaResourceRequest.Version, - Group: &hpaResourceRequest.Group, - Kind: &hpaResourceRequest.Kind, - ResourceName: &hpaResourceRequest.ResourceName, - Namespace: &namespace, - } - recv, err := impl.acdClient.GetResource(ctx, query) - impl.logger.Debugw("resource manifest get replica count", "response", recv) - if err != nil { - impl.logger.Errorw("ACD Get Resource API Failed", "err", err) - middleware.AcdGetResourceCounter.WithLabelValues(strconv.Itoa(appId), namespace, appName).Inc() - return merged - } - if recv != nil && len(*recv.Manifest) > 0 { - err := json.Unmarshal([]byte(*recv.Manifest), &resourceManifest) - if err != nil { - impl.logger.Errorw("unmarshal failed for hpa check", "err", err) - return merged - } - } - } else { - version := "v2beta2" - k8sResource, err := impl.K8sCommonService.GetResource(ctx, &k8s.ResourceRequestBean{ClusterId: clusterId, - K8sRequest: &k8s2.K8sRequestBean{ResourceIdentifier: k8s2.ResourceIdentifier{Name: hpaResourceRequest.ResourceName, - Namespace: namespace, GroupVersionKind: schema.GroupVersionKind{Group: hpaResourceRequest.Group, Kind: hpaResourceRequest.Kind, Version: version}}}}) - if err != nil { - impl.logger.Errorw("error occurred while fetching resource for app", "resourceName", hpaResourceRequest.ResourceName, "err", err) - return merged - } - resourceManifest = k8sResource.Manifest.Object - } - if len(resourceManifest) > 0 { - statusMap := resourceManifest["status"].(map[string]interface{}) - currentReplicaVal := statusMap["currentReplicas"] - currentReplicaCount, err := util2.ParseFloatNumber(currentReplicaVal) - if err != nil { - impl.logger.Errorw("error occurred while parsing replica count", "currentReplicas", currentReplicaVal, "err", err) - return merged - } - - reqReplicaCount := impl.fetchRequiredReplicaCount(currentReplicaCount, hpaResourceRequest.ReqMaxReplicas, hpaResourceRequest.ReqMinReplicas) - templateMap["replicaCount"] = reqReplicaCount - merged, err = json.Marshal(&templateMap) - if err != nil { - impl.logger.Errorw("marshaling failed for hpa check", "err", err) - return merged - } - } - } else { - impl.logger.Errorw("autoscaling is not enabled", "pipelineId", pipelineId) - } - - //check for custom chart support - if autoscalingEnabledPath, ok := templateMap[bean2.CustomAutoScalingEnabledPathKey]; ok { - if deploymentType == models.DEPLOYMENTTYPE_STOP { - merged, err = impl.setScalingValues(templateMap, bean2.CustomAutoScalingEnabledPathKey, merged, false) - if err != nil { - return merged - } - merged, err = impl.setScalingValues(templateMap, bean2.CustomAutoscalingReplicaCountPathKey, merged, 0) - if err != nil { - return merged - } - } else { - autoscalingEnabled := false - autoscalingEnabledValue := gjson.Get(string(merged), autoscalingEnabledPath.(string)).Value() - if val, ok := autoscalingEnabledValue.(bool); ok { - autoscalingEnabled = val - } - if autoscalingEnabled { - // extract replica count, min, max and check for required value - replicaCount, err := impl.getReplicaCountFromCustomChart(templateMap, merged) - if err != nil { - return merged - } - merged, err = impl.setScalingValues(templateMap, bean2.CustomAutoscalingReplicaCountPathKey, merged, replicaCount) - if err != nil { - return merged - } - } - } - } - - return merged -} - -func (impl *AppServiceImpl) getReplicaCountFromCustomChart(templateMap map[string]interface{}, merged []byte) (float64, error) { - autoscalingMinVal, err := impl.extractParamValue(templateMap, bean2.CustomAutoscalingMinPathKey, merged) - if err != nil { - return 0, err - } - autoscalingMaxVal, err := impl.extractParamValue(templateMap, bean2.CustomAutoscalingMaxPathKey, merged) - if err != nil { - return 0, err - } - autoscalingReplicaCountVal, err := impl.extractParamValue(templateMap, bean2.CustomAutoscalingReplicaCountPathKey, merged) - if err != nil { - return 0, err - } - return impl.fetchRequiredReplicaCount(autoscalingReplicaCountVal, autoscalingMaxVal, autoscalingMinVal), nil -} - -func (impl *AppServiceImpl) extractParamValue(inputMap map[string]interface{}, key string, merged []byte) (float64, error) { - if _, ok := inputMap[key]; !ok { - return 0, errors.New("empty-val-err") - } - floatNumber, err := util2.ParseFloatNumber(gjson.Get(string(merged), inputMap[key].(string)).Value()) - if err != nil { - impl.logger.Errorw("error occurred while parsing float number", "key", key, "err", err) - } - return floatNumber, err -} - -func (impl *AppServiceImpl) setScalingValues(templateMap map[string]interface{}, customScalingKey string, merged []byte, value interface{}) ([]byte, error) { - autoscalingJsonPath := templateMap[customScalingKey] - autoscalingJsonPathKey := autoscalingJsonPath.(string) - mergedRes, err := sjson.Set(string(merged), autoscalingJsonPathKey, value) - if err != nil { - impl.logger.Errorw("error occurred while setting autoscaling key", "JsonPathKey", autoscalingJsonPathKey, "err", err) - return []byte{}, err - } - return []byte(mergedRes), nil -} - -func (impl *AppServiceImpl) fetchRequiredReplicaCount(currentReplicaCount float64, reqMaxReplicas float64, reqMinReplicas float64) float64 { - var reqReplicaCount float64 - if currentReplicaCount <= reqMaxReplicas && currentReplicaCount >= reqMinReplicas { - reqReplicaCount = currentReplicaCount - } else if currentReplicaCount > reqMaxReplicas { - reqReplicaCount = reqMaxReplicas - } else if currentReplicaCount < reqMinReplicas { - reqReplicaCount = reqMinReplicas - } - return reqReplicaCount -} - -func (impl *AppServiceImpl) CreateHistoriesForDeploymentTrigger(pipeline *pipelineConfig.Pipeline, strategy *chartConfig.PipelineStrategy, envOverride *chartConfig.EnvConfigOverride, deployedOn time.Time, deployedBy int32) error { - //creating history for deployment template - deploymentTemplateHistory, err := impl.deploymentTemplateHistoryService.CreateDeploymentTemplateHistoryForDeploymentTrigger(pipeline, envOverride, envOverride.Chart.ImageDescriptorTemplate, deployedOn, deployedBy) - if err != nil { - impl.logger.Errorw("error in creating deployment template history for deployment trigger", "err", err) - return err - } - err = impl.configMapHistoryService.CreateCMCSHistoryForDeploymentTrigger(pipeline, deployedOn, deployedBy) - if err != nil { - impl.logger.Errorw("error in creating CM/CS history for deployment trigger", "err", err) - return err - } - if strategy != nil { - err = impl.pipelineStrategyHistoryService.CreateStrategyHistoryForDeploymentTrigger(strategy, deployedOn, deployedBy, pipeline.TriggerType) - if err != nil { - impl.logger.Errorw("error in creating strategy history for deployment trigger", "err", err) - return err - } - } - //VARIABLE_SNAPSHOT_SAVE - if envOverride.VariableSnapshot != nil && len(envOverride.VariableSnapshot) > 0 { - variableMapBytes, _ := json.Marshal(envOverride.VariableSnapshot) - variableSnapshotHistory := &repository6.VariableSnapshotHistoryBean{ - VariableSnapshot: variableMapBytes, - HistoryReference: repository6.HistoryReference{ - HistoryReferenceId: deploymentTemplateHistory.Id, - HistoryReferenceType: repository6.HistoryReferenceTypeDeploymentTemplate, - }, - } - err = impl.variableSnapshotHistoryService.SaveVariableHistoriesForTrigger([]*repository6.VariableSnapshotHistoryBean{variableSnapshotHistory}, deployedBy) - if err != nil { - return err - } - } - return nil -} - -func (impl *AppServiceImpl) updatePipeline(pipeline *pipelineConfig.Pipeline, userId int32) (bool, error) { - err := impl.pipelineRepository.SetDeploymentAppCreatedInPipeline(true, pipeline.Id, userId) - if err != nil { - impl.logger.Errorw("error on updating cd pipeline for setting deployment app created", "err", err) - return false, err - } - return true, nil -} - -func (impl *AppServiceImpl) createHelmAppForCdPipeline(overrideRequest *bean.ValuesOverrideRequest, valuesOverrideResponse *ValuesOverrideResponse, triggeredAt time.Time, ctx context.Context) (bool, error) { - - pipeline := valuesOverrideResponse.Pipeline - envOverride := valuesOverrideResponse.EnvOverride - mergeAndSave := valuesOverrideResponse.MergedValues - - chartMetaData := &chart2.Metadata{ - Name: pipeline.App.AppName, - Version: envOverride.Chart.ChartVersion, - } - referenceTemplatePath := path.Join(string(impl.refChartDir), envOverride.Chart.ReferenceTemplate) - - if IsHelmApp(pipeline.DeploymentAppType) { - referenceChartByte := envOverride.Chart.ReferenceChart - // here updating reference chart into database. - if len(envOverride.Chart.ReferenceChart) == 0 { - refChartByte, err := impl.chartTemplateService.GetByteArrayRefChart(chartMetaData, referenceTemplatePath) - if err != nil { - impl.logger.Errorw("ref chart commit error on cd trigger", "err", err, "req", overrideRequest) - return false, err - } - ch := envOverride.Chart - ch.ReferenceChart = refChartByte - ch.UpdatedOn = time.Now() - ch.UpdatedBy = overrideRequest.UserId - err = impl.chartRepository.Update(ch) - if err != nil { - impl.logger.Errorw("chart update error", "err", err, "req", overrideRequest) - return false, err - } - referenceChartByte = refChartByte - } - - releaseName := pipeline.DeploymentAppName - cluster := envOverride.Environment.Cluster - bearerToken := cluster.Config[k8s2.BearerToken] - clusterConfig := &client2.ClusterConfig{ - ClusterName: cluster.ClusterName, - Token: bearerToken, - ApiServerUrl: cluster.ServerUrl, - InsecureSkipTLSVerify: cluster.InsecureSkipTlsVerify, - } - if cluster.InsecureSkipTlsVerify == false { - clusterConfig.KeyData = cluster.Config[k8s2.TlsKey] - clusterConfig.CertData = cluster.Config[k8s2.CertData] - clusterConfig.CaData = cluster.Config[k8s2.CertificateAuthorityData] - } - releaseIdentifier := &client2.ReleaseIdentifier{ - ReleaseName: releaseName, - ReleaseNamespace: envOverride.Namespace, - ClusterConfig: clusterConfig, - } - - if pipeline.DeploymentAppCreated { - req := &client2.UpgradeReleaseRequest{ - ReleaseIdentifier: releaseIdentifier, - ValuesYaml: mergeAndSave, - HistoryMax: impl.helmAppService.GetRevisionHistoryMaxValue(client2.SOURCE_DEVTRON_APP), - ChartContent: &client2.ChartContent{Content: referenceChartByte}, - } - - updateApplicationResponse, err := impl.helmAppClient.UpdateApplication(ctx, req) - - // For cases where helm release was not found but db flag for deployment app created was true - if err != nil && strings.Contains(err.Error(), "release: not found") { - - // retry install - _, err = impl.helmInstallReleaseWithCustomChart(ctx, releaseIdentifier, referenceChartByte, mergeAndSave) - - // if retry failed, return - if err != nil { - impl.logger.Errorw("release not found, failed to re-install helm application", "err", err) - return false, err - } - } else if err != nil { - impl.logger.Errorw("error in updating helm application for cd pipeline", "err", err) - return false, err - } else { - impl.logger.Debugw("updated helm application", "response", updateApplicationResponse, "isSuccess", updateApplicationResponse.Success) - } - - } else { - - helmResponse, err := impl.helmInstallReleaseWithCustomChart(ctx, releaseIdentifier, referenceChartByte, mergeAndSave) - - // For connection related errors, no need to update the db - if err != nil && strings.Contains(err.Error(), "connection error") { - impl.logger.Errorw("error in helm install custom chart", "err", err) - return false, err - } - - // IMP: update cd pipeline to mark deployment app created, even if helm install fails - // If the helm install fails, it still creates the app in failed state, so trying to - // re-create the app results in error from helm that cannot re-use name which is still in use - _, pgErr := impl.updatePipeline(pipeline, overrideRequest.UserId) - - if err != nil { - impl.logger.Errorw("error in helm install custom chart", "err", err) - - if pgErr != nil { - impl.logger.Errorw("failed to update deployment app created flag in pipeline table", "err", err) - } - return false, err - } - - if pgErr != nil { - impl.logger.Errorw("failed to update deployment app created flag in pipeline table", "err", err) - return false, err - } - - impl.logger.Debugw("received helm release response", "helmResponse", helmResponse, "isSuccess", helmResponse.Success) - } - - //update workflow runner status, used in app workflow view - cdWf, err := impl.cdWorkflowRepository.FindByWorkflowIdAndRunnerType(ctx, overrideRequest.CdWorkflowId, bean.CD_WORKFLOW_TYPE_DEPLOY) - if err != nil && err != pg.ErrNoRows { - impl.logger.Errorw("err on fetching cd workflow", "err", err) - return false, err - } - cdWorkflowId := cdWf.CdWorkflowId - if cdWf.CdWorkflowId == 0 { - cdWf := &pipelineConfig.CdWorkflow{ - CiArtifactId: overrideRequest.CiArtifactId, - PipelineId: overrideRequest.PipelineId, - AuditLog: sql.AuditLog{CreatedOn: triggeredAt, CreatedBy: overrideRequest.UserId, UpdatedOn: triggeredAt, UpdatedBy: overrideRequest.UserId}, - } - err := impl.cdWorkflowRepository.SaveWorkFlow(ctx, cdWf) - if err != nil { - impl.logger.Errorw("err on updating cd workflow for status update", "err", err) - return false, err - } - cdWorkflowId = cdWf.Id - runner := &pipelineConfig.CdWorkflowRunner{ - Id: cdWf.Id, - Name: pipeline.Name, - WorkflowType: bean.CD_WORKFLOW_TYPE_DEPLOY, - ExecutorType: pipelineConfig.WORKFLOW_EXECUTOR_TYPE_AWF, - Status: pipelineConfig.WorkflowInProgress, - TriggeredBy: overrideRequest.UserId, - StartedOn: triggeredAt, - CdWorkflowId: cdWorkflowId, - AuditLog: sql.AuditLog{CreatedOn: triggeredAt, CreatedBy: overrideRequest.UserId, UpdatedOn: triggeredAt, UpdatedBy: overrideRequest.UserId}, - } - _, err = impl.cdWorkflowRepository.SaveWorkFlowRunner(runner) - if err != nil { - impl.logger.Errorw("err on updating cd workflow runner for status update", "err", err) - return false, err - } - } else { - cdWf.Status = pipelineConfig.WorkflowInProgress - cdWf.FinishedOn = time.Now() - cdWf.UpdatedBy = overrideRequest.UserId - cdWf.UpdatedOn = time.Now() - err = impl.cdWorkflowRepository.UpdateWorkFlowRunner(&cdWf) - if err != nil { - impl.logger.Errorw("error on update cd workflow runner", "cdWf", cdWf, "err", err) - return false, err - } - } - } - return true, nil -} - -// helmInstallReleaseWithCustomChart performs helm install with custom chart -func (impl *AppServiceImpl) helmInstallReleaseWithCustomChart(ctx context.Context, releaseIdentifier *client2.ReleaseIdentifier, referenceChartByte []byte, valuesYaml string) (*client2.HelmInstallCustomResponse, error) { - - helmInstallRequest := client2.HelmInstallCustomRequest{ - ValuesYaml: valuesYaml, - ChartContent: &client2.ChartContent{Content: referenceChartByte}, - ReleaseIdentifier: releaseIdentifier, - } - - // Request exec - return impl.helmAppClient.InstallReleaseWithCustomChart(ctx, &helmInstallRequest) -} - func (impl *AppServiceImpl) GetGitOpsRepoPrefix() string { return impl.globalEnvVariables.GitOpsRepoPrefix } diff --git a/pkg/appClone/AppCloneService.go b/pkg/appClone/AppCloneService.go index b7dd35f1b6..73b8996f61 100644 --- a/pkg/appClone/AppCloneService.go +++ b/pkg/appClone/AppCloneService.go @@ -53,6 +53,9 @@ type AppCloneServiceImpl struct { pipelineStageService pipeline.PipelineStageService ciTemplateService pipeline.CiTemplateService appRepository app2.AppRepository + ciPipelineRepository pipelineConfig.CiPipelineRepository + pipelineRepository pipelineConfig.PipelineRepository + appWorkflowRepository appWorkflow2.AppWorkflowRepository } func NewAppCloneServiceImpl(logger *zap.SugaredLogger, @@ -65,7 +68,8 @@ func NewAppCloneServiceImpl(logger *zap.SugaredLogger, propertiesConfigService pipeline.PropertiesConfigService, ciTemplateOverrideRepository pipelineConfig.CiTemplateOverrideRepository, pipelineStageService pipeline.PipelineStageService, ciTemplateService pipeline.CiTemplateService, - appRepository app2.AppRepository) *AppCloneServiceImpl { + appRepository app2.AppRepository, ciPipelineRepository pipelineConfig.CiPipelineRepository, + pipelineRepository pipelineConfig.PipelineRepository, appWorkflowRepository appWorkflow2.AppWorkflowRepository) *AppCloneServiceImpl { return &AppCloneServiceImpl{ logger: logger, pipelineBuilder: pipelineBuilder, @@ -78,6 +82,9 @@ func NewAppCloneServiceImpl(logger *zap.SugaredLogger, pipelineStageService: pipelineStageService, ciTemplateService: ciTemplateService, appRepository: appRepository, + ciPipelineRepository: ciPipelineRepository, + pipelineRepository: pipelineRepository, + appWorkflowRepository: appWorkflowRepository, } } @@ -90,6 +97,15 @@ type CloneRequest struct { AppType helper.AppType `json:"appType"` } +type CreateWorkflowMappingDto struct { + oldAppId int + newAppId int + userId int32 + newWfId int + gitMaterialMapping map[int]int + externalCiPipelineId int +} + func (impl *AppCloneServiceImpl) CloneApp(createReq *bean.CreateAppDTO, context context.Context) (*bean.CreateAppDTO, error) { //validate template app templateApp, err := impl.appRepository.FindById(createReq.TemplateId) @@ -122,18 +138,7 @@ func (impl *AppCloneServiceImpl) CloneApp(createReq *bean.CreateAppDTO, context if err != nil { return nil, err } - refApp, err := impl.pipelineBuilder.GetApp(cloneReq.RefAppId) - if err != nil { - return nil, err - } - isSameProject := refApp.TeamId == cloneReq.ProjectId - /* appStageStatus = append(appStageStatus, impl.makeAppStageStatus(0, "APP", stages.AppId)) - appStageStatus = append(appStageStatus, impl.makeAppStageStatus(1, "MATERIAL", materialExists)) - appStageStatus = append(appStageStatus, impl.makeAppStageStatus(2, "TEMPLATE", stages.CiTemplateId)) - appStageStatus = append(appStageStatus, impl.makeAppStageStatus(3, "CI_PIPELINE", stages.CiPipelineId)) - appStageStatus = append(appStageStatus, impl.makeAppStageStatus(4, "CHART", stages.ChartId)) - appStageStatus = append(appStageStatus, impl.makeAppStageStatus(5, "CD_PIPELINE", stages.PipelineId)) - */ + refAppStatus := make(map[string]bool) for _, as := range appStatus { refAppStatus[as.StageName] = as.Status @@ -193,32 +198,31 @@ func (impl *AppCloneServiceImpl) CloneApp(createReq *bean.CreateAppDTO, context impl.logger.Errorw("error in creating global secret", "ref", cloneReq.RefAppId, "new", newAppId, "err", err) return nil, err } - if isSameProject { - if createReq.AppType != helper.Job { - _, err = impl.CreateEnvCm(context, cloneReq.RefAppId, newAppId, userId) - if err != nil { - impl.logger.Errorw("error in creating env cm", "err", err) - return nil, err - } - _, err = impl.CreateEnvSecret(context, cloneReq.RefAppId, newAppId, userId) - if err != nil { - impl.logger.Errorw("error in creating env secret", "err", err) - return nil, err - } - _, err = impl.createEnvOverride(cloneReq.RefAppId, newAppId, userId, context) - if err != nil { - impl.logger.Errorw("error in cloning env override", "err", err) - return nil, err - } - } else { - _, err := impl.configMapService.ConfigSecretEnvironmentClone(cloneReq.RefAppId, newAppId, userId) - if err != nil { - impl.logger.Errorw("error in cloning cm cs env override", "err", err) - return nil, err - } + + if createReq.AppType != helper.Job { + _, err = impl.CreateEnvCm(context, cloneReq.RefAppId, newAppId, userId) + if err != nil { + impl.logger.Errorw("error in creating env cm", "err", err) + return nil, err + } + _, err = impl.CreateEnvSecret(context, cloneReq.RefAppId, newAppId, userId) + if err != nil { + impl.logger.Errorw("error in creating env secret", "err", err) + return nil, err + } + _, err = impl.createEnvOverride(cloneReq.RefAppId, newAppId, userId, context) + if err != nil { + impl.logger.Errorw("error in cloning env override", "err", err) + return nil, err + } + } else { + _, err := impl.configMapService.ConfigSecretEnvironmentClone(cloneReq.RefAppId, newAppId, userId) + if err != nil { + impl.logger.Errorw("error in cloning cm cs env override", "err", err) + return nil, err } } - _, err = impl.CreateWf(cloneReq.RefAppId, newAppId, userId, gitMaerialMap, context, isSameProject) + _, err = impl.CreateWf(cloneReq.RefAppId, newAppId, userId, gitMaerialMap, context) if err != nil { impl.logger.Errorw("error in creating wf", "ref", cloneReq.RefAppId, "new", newAppId, "err", err) return nil, err @@ -312,6 +316,7 @@ func (impl *AppCloneServiceImpl) CreateCiTemplate(oldAppId, newAppId int, userId UserId: userId, BeforeDockerBuild: refCiConf.BeforeDockerBuild, AfterDockerBuild: refCiConf.AfterDockerBuild, + ScanEnabled: refCiConf.ScanEnabled, } res, err := impl.pipelineBuilder.CreateCiPipeline(ciConfRequest) @@ -580,12 +585,13 @@ func (impl *AppCloneServiceImpl) CreateGlobalSecret(oldAppId, newAppId int, user return thisCm, err } -func (impl *AppCloneServiceImpl) CreateWf(oldAppId, newAppId int, userId int32, gitMaterialMapping map[int]int, ctx context.Context, isSameProject bool) (interface{}, error) { +func (impl *AppCloneServiceImpl) CreateWf(oldAppId, newAppId int, userId int32, gitMaterialMapping map[int]int, ctx context.Context) (interface{}, error) { refAppWFs, err := impl.appWorkflowService.FindAppWorkflows(oldAppId) if err != nil { return nil, err } impl.logger.Debugw("workflow found", "wf", refAppWFs) + for _, refAppWF := range refAppWFs { thisWf := appWorkflow.AppWorkflowDto{ Id: 0, @@ -594,24 +600,37 @@ func (impl *AppCloneServiceImpl) CreateWf(oldAppId, newAppId int, userId int32, AppWorkflowMappingDto: nil, //first create new mapping then add it UserId: userId, } + thisWf, err = impl.appWorkflowService.CreateAppWorkflow(thisWf) + if err != nil { + impl.logger.Errorw("error in creating workflow without external-ci", "err", err) + return nil, err + } isExternalCiPresent := false for _, awm := range refAppWF.AppWorkflowMappingDto { if awm.Type == appWorkflow2.WEBHOOK { isExternalCiPresent = true + break } } - - if !isExternalCiPresent { - thisWf, err = impl.appWorkflowService.CreateAppWorkflow(thisWf) - impl.logger.Debugw("workflow found", thisWf) + createWorkflowMappingDto := CreateWorkflowMappingDto{ + newAppId: newAppId, + oldAppId: oldAppId, + newWfId: thisWf.Id, + userId: userId, + } + var externalCiPipelineId int + if isExternalCiPresent { + externalCiPipelineId, err = impl.createExternalCiAndAppWorkflowMapping(createWorkflowMappingDto) if err != nil { - impl.logger.Errorw("errir in creating workflow without extenal-ci", "err", err) + impl.logger.Errorw("error in createExternalCiAndAppWorkflowMapping", "err", err) return nil, err } } + createWorkflowMappingDto.gitMaterialMapping = gitMaterialMapping + createWorkflowMappingDto.externalCiPipelineId = externalCiPipelineId - err = impl.createWfMappings(refAppWF.AppWorkflowMappingDto, oldAppId, newAppId, userId, thisWf.Id, gitMaterialMapping, ctx, isSameProject) + err = impl.createWfInstances(refAppWF.AppWorkflowMappingDto, createWorkflowMappingDto, ctx) if err != nil { impl.logger.Errorw("error in creating workflow mapping", "err", err) return nil, err @@ -620,7 +639,28 @@ func (impl *AppCloneServiceImpl) CreateWf(oldAppId, newAppId int, userId int32, return nil, nil } -func (impl *AppCloneServiceImpl) createWfMappings(refWfMappings []appWorkflow.AppWorkflowMappingDto, oldAppId, newAppId int, userId int32, thisWfId int, gitMaterialMapping map[int]int, ctx context.Context, isSameProject bool) error { +func (impl *AppCloneServiceImpl) createExternalCiAndAppWorkflowMapping(createWorkflowMappingDto CreateWorkflowMappingDto) (int, error) { + dbConnection := impl.pipelineRepository.GetConnection() + tx, err := dbConnection.Begin() + if err != nil { + impl.logger.Errorw("error in beginning transaction", "err", err) + return 0, err + } + // Rollback tx on error. + defer tx.Rollback() + externalCiPipelineId, err := impl.pipelineBuilder.CreateExternalCiAndAppWorkflowMapping(createWorkflowMappingDto.newAppId, createWorkflowMappingDto.newWfId, createWorkflowMappingDto.userId, tx) + if err != nil { + impl.logger.Errorw("error in creating new external ci pipeline and new app workflow mapping", "refAppId", createWorkflowMappingDto.oldAppId, "newAppId", createWorkflowMappingDto.newAppId, "err", err) + return 0, err + } + err = tx.Commit() + if err != nil { + return 0, err + } + return externalCiPipelineId, nil +} + +func (impl *AppCloneServiceImpl) createWfInstances(refWfMappings []appWorkflow.AppWorkflowMappingDto, createWorkflowMappingDto CreateWorkflowMappingDto, ctx context.Context) error { impl.logger.Debugw("wf mapping cloning", "refWfMappings", refWfMappings) var ciMapping []appWorkflow.AppWorkflowMappingDto var cdMappings []appWorkflow.AppWorkflowMappingDto @@ -636,28 +676,31 @@ func (impl *AppCloneServiceImpl) createWfMappings(refWfMappings []appWorkflow.Ap return fmt.Errorf("unsupported wf type: %s", appWf.Type) } } - refApp, err := impl.pipelineBuilder.GetApp(oldAppId) + sourceToNewPipelineIdMapping := make(map[int]int) + refApp, err := impl.pipelineBuilder.GetApp(createWorkflowMappingDto.oldAppId) + if err != nil { + impl.logger.Errorw("error in getting app from refAppId", "refAppId", createWorkflowMappingDto.oldAppId) + return err + } if len(webhookMappings) > 0 { - if isSameProject { - for _, refwebhookMappings := range cdMappings { - cdCloneReq := &cloneCdPipelineRequest{ - refCdPipelineId: refwebhookMappings.ComponentId, - refAppId: oldAppId, - appId: newAppId, - userId: userId, - ciPipelineId: 0, - appWfId: thisWfId, - refAppName: refApp.AppName, - } - pipeline, err := impl.CreateCdPipeline(cdCloneReq, ctx) - impl.logger.Debugw("cd pipeline created", "pipeline", pipeline) - if err != nil { - impl.logger.Errorw("error in getting cd-pipeling", "err", err) - return err - } + for _, refwebhookMappings := range cdMappings { + cdCloneReq := &cloneCdPipelineRequest{ + refCdPipelineId: refwebhookMappings.ComponentId, + refAppId: createWorkflowMappingDto.oldAppId, + appId: createWorkflowMappingDto.newAppId, + userId: createWorkflowMappingDto.userId, + ciPipelineId: 0, + appWfId: createWorkflowMappingDto.newWfId, + refAppName: refApp.AppName, + sourceToNewPipelineId: sourceToNewPipelineIdMapping, + externalCiPipelineId: createWorkflowMappingDto.externalCiPipelineId, + } + pipeline, err := impl.CreateCdPipeline(cdCloneReq, ctx) + impl.logger.Debugw("cd pipeline created", "pipeline", pipeline) + if err != nil { + impl.logger.Errorw("error in getting cd-pipeline", "refAppId", createWorkflowMappingDto.oldAppId, "newAppId", createWorkflowMappingDto.newAppId, "err", err) + return err } - } else { - impl.logger.Debug("not the same project, skipping cd pipeline creation") } return nil } @@ -666,7 +709,7 @@ func (impl *AppCloneServiceImpl) createWfMappings(refWfMappings []appWorkflow.Ap impl.logger.Warn("no ci pipeline found") return nil } else if len(ciMapping) != 1 { - impl.logger.Warn("more than one cd pipeline not supported") + impl.logger.Warn("more than one ci pipeline not supported") return nil } @@ -678,12 +721,12 @@ func (impl *AppCloneServiceImpl) createWfMappings(refWfMappings []appWorkflow.Ap impl.logger.Debugw("creating ci", "ref", refCiMapping) cloneCiPipelineRequest := &cloneCiPipelineRequest{ - refAppId: oldAppId, + refAppId: createWorkflowMappingDto.oldAppId, refCiPipelineId: refCiMapping.ComponentId, - userId: userId, - appId: newAppId, - wfId: thisWfId, - gitMaterialMapping: gitMaterialMapping, + userId: createWorkflowMappingDto.userId, + appId: createWorkflowMappingDto.newAppId, + wfId: createWorkflowMappingDto.newWfId, + gitMaterialMapping: createWorkflowMappingDto.gitMaterialMapping, refAppName: refApp.AppName, } ci, err = impl.CreateCiPipeline(cloneCiPipelineRequest) @@ -693,26 +736,24 @@ func (impl *AppCloneServiceImpl) createWfMappings(refWfMappings []appWorkflow.Ap } impl.logger.Debugw("ci created", "ci", ci) } - if isSameProject { - for _, refCdMapping := range cdMappings { - cdCloneReq := &cloneCdPipelineRequest{ - refCdPipelineId: refCdMapping.ComponentId, - refAppId: oldAppId, - appId: newAppId, - userId: userId, - ciPipelineId: ci.CiPipelines[0].Id, - appWfId: thisWfId, - refAppName: refApp.AppName, - } - pipeline, err := impl.CreateCdPipeline(cdCloneReq, ctx) - if err != nil { - impl.logger.Errorw("error in creating cd pipeline, app clone", "err", err) - return err - } - impl.logger.Debugw("cd pipeline created", "pipeline", pipeline) + + for _, refCdMapping := range cdMappings { + cdCloneReq := &cloneCdPipelineRequest{ + refCdPipelineId: refCdMapping.ComponentId, + refAppId: createWorkflowMappingDto.oldAppId, + appId: createWorkflowMappingDto.newAppId, + userId: createWorkflowMappingDto.userId, + ciPipelineId: ci.CiPipelines[0].Id, + appWfId: createWorkflowMappingDto.newWfId, + refAppName: refApp.AppName, + sourceToNewPipelineId: sourceToNewPipelineIdMapping, } - } else { - impl.logger.Debug("not the same project, skipping cd pipeline creation") + pipeline, err := impl.CreateCdPipeline(cdCloneReq, ctx) + if err != nil { + impl.logger.Errorw("error in creating cd pipeline, app clone", "err", err) + return err + } + impl.logger.Debugw("cd pipeline created", "pipeline", pipeline) } //find ci @@ -813,6 +854,7 @@ func (impl *AppCloneServiceImpl) CreateCiPipeline(req *cloneCiPipelineRequest) ( PreBuildStage: preStageDetail, PostBuildStage: postStageDetail, EnvironmentId: refCiPipeline.EnvironmentId, + ScanEnabled: refCiPipeline.ScanEnabled, PipelineType: refCiPipeline.PipelineType, }, AppId: req.appId, @@ -867,13 +909,15 @@ func (impl *AppCloneServiceImpl) CreateCiPipeline(req *cloneCiPipelineRequest) ( } type cloneCdPipelineRequest struct { - refCdPipelineId int - refAppId int - appId int - userId int32 - ciPipelineId int - appWfId int - refAppName string + refCdPipelineId int + refAppId int + appId int + userId int32 + ciPipelineId int + appWfId int + refAppName string + sourceToNewPipelineId map[int]int + externalCiPipelineId int } func (impl *AppCloneServiceImpl) CreateCdPipeline(req *cloneCdPipelineRequest, ctx context.Context) (*bean.CdPipelines, error) { @@ -891,6 +935,7 @@ func (impl *AppCloneServiceImpl) CreateCdPipeline(req *cloneCdPipelineRequest, c if refCdPipeline == nil { return nil, fmt.Errorf("no cd pipeline found") } + refCdPipeline.SourceToNewPipelineId = req.sourceToNewPipelineId pipelineName := refCdPipeline.Name if strings.HasPrefix(pipelineName, req.refAppName) { pipelineName = strings.Replace(pipelineName, req.refAppName+"-", "", 1) @@ -921,35 +966,6 @@ func (impl *AppCloneServiceImpl) CreateCdPipeline(req *cloneCdPipelineRequest, c deploymentAppType = util.PIPELINE_DEPLOYMENT_TYPE_HELM } - if refCdPipeline.ParentPipelineType == "WEBHOOK" { - cdPipeline := &bean.CDPipelineConfigObject{ - Id: 0, - EnvironmentId: refCdPipeline.EnvironmentId, - CiPipelineId: 0, - TriggerType: refCdPipeline.TriggerType, - Name: pipelineName, - Strategies: refCdPipeline.Strategies, - Namespace: refCdPipeline.Namespace, - AppWorkflowId: 0, - DeploymentTemplate: refCdPipeline.DeploymentTemplate, - PreStage: refCdPipeline.PreStage, //FIXME - PostStage: refCdPipeline.PostStage, - PreStageConfigMapSecretNames: refCdPipeline.PreStageConfigMapSecretNames, - PostStageConfigMapSecretNames: refCdPipeline.PostStageConfigMapSecretNames, - RunPostStageInEnv: refCdPipeline.RunPostStageInEnv, - RunPreStageInEnv: refCdPipeline.RunPreStageInEnv, - DeploymentAppType: refCdPipeline.DeploymentAppType, - ParentPipelineId: 0, - ParentPipelineType: refCdPipeline.ParentPipelineType, - } - cdPipelineReq := &bean.CdPipelines{ - Pipelines: []*bean.CDPipelineConfigObject{cdPipeline}, - AppId: req.appId, - UserId: req.userId, - } - cdPipelineRes, err := impl.pipelineBuilder.CreateCdPipelines(cdPipelineReq, ctx) - return cdPipelineRes, err - } cdPipeline := &bean.CDPipelineConfigObject{ Id: 0, EnvironmentId: refCdPipeline.EnvironmentId, @@ -969,6 +985,15 @@ func (impl *AppCloneServiceImpl) CreateCdPipeline(req *cloneCdPipelineRequest, c DeploymentAppType: deploymentAppType, PreDeployStage: refCdPipeline.PreDeployStage, PostDeployStage: refCdPipeline.PostDeployStage, + SourceToNewPipelineId: refCdPipeline.SourceToNewPipelineId, + RefPipelineId: refCdPipeline.Id, + ParentPipelineType: refCdPipeline.ParentPipelineType, + } + if refCdPipeline.ParentPipelineType == "WEBHOOK" { + cdPipeline.CiPipelineId = 0 + cdPipeline.ParentPipelineId = req.externalCiPipelineId + } else if refCdPipeline.ParentPipelineType != appWorkflow.CI_PIPELINE_TYPE { + cdPipeline.ParentPipelineId = refCdPipeline.ParentPipelineId } cdPipelineReq := &bean.CdPipelines{ Pipelines: []*bean.CDPipelineConfigObject{cdPipeline}, diff --git a/pkg/appStore/deployment/fullMode/AppStoreDeploymentFullModeService.go b/pkg/appStore/deployment/fullMode/AppStoreDeploymentFullModeService.go index 8aa2e56db9..3673437ef2 100644 --- a/pkg/appStore/deployment/fullMode/AppStoreDeploymentFullModeService.go +++ b/pkg/appStore/deployment/fullMode/AppStoreDeploymentFullModeService.go @@ -86,6 +86,7 @@ type AppStoreDeploymentFullModeServiceImpl struct { gitOpsConfigRepository repository3.GitOpsConfigRepository pipelineStatusTimelineService status.PipelineStatusTimelineService appStoreDeploymentCommonService appStoreDeploymentCommon.AppStoreDeploymentCommonService + argoClientWrapperService argocdServer.ArgoClientWrapperService } func NewAppStoreDeploymentFullModeServiceImpl(logger *zap.SugaredLogger, @@ -101,6 +102,7 @@ func NewAppStoreDeploymentFullModeServiceImpl(logger *zap.SugaredLogger, argoUserService argo.ArgoUserService, gitOpsConfigRepository repository3.GitOpsConfigRepository, pipelineStatusTimelineService status.PipelineStatusTimelineService, appStoreDeploymentCommonService appStoreDeploymentCommon.AppStoreDeploymentCommonService, + argoClientWrapperService argocdServer.ArgoClientWrapperService, ) *AppStoreDeploymentFullModeServiceImpl { return &AppStoreDeploymentFullModeServiceImpl{ logger: logger, @@ -120,6 +122,7 @@ func NewAppStoreDeploymentFullModeServiceImpl(logger *zap.SugaredLogger, gitOpsConfigRepository: gitOpsConfigRepository, pipelineStatusTimelineService: pipelineStatusTimelineService, appStoreDeploymentCommonService: appStoreDeploymentCommonService, + argoClientWrapperService: argoClientWrapperService, } } @@ -316,6 +319,12 @@ func (impl AppStoreDeploymentFullModeServiceImpl) AppStoreDeployOperationACD(ins //STEP 6: Force Sync ACD - works like trigger deployment //impl.SyncACD(installAppVersionRequest.ACDAppName, ctx) + //STEP 7: normal refresh ACD - update for step 6 to avoid delay + err = impl.argoClientWrapperService.GetArgoAppWithNormalRefresh(ctx, installAppVersionRequest.ACDAppName) + if err != nil { + impl.logger.Errorw("error in getting the argo application with normal refresh", "err", err) + } + return installAppVersionRequest, nil } diff --git a/pkg/appStore/deployment/service/AppStoreDeploymentService.go b/pkg/appStore/deployment/service/AppStoreDeploymentService.go index 99b0788d93..8c73baf6a9 100644 --- a/pkg/appStore/deployment/service/AppStoreDeploymentService.go +++ b/pkg/appStore/deployment/service/AppStoreDeploymentService.go @@ -91,6 +91,7 @@ func GetDeploymentServiceTypeConfig() (*DeploymentServiceTypeConfig, error) { type AppStoreDeploymentServiceImpl struct { logger *zap.SugaredLogger installedAppRepository repository.InstalledAppRepository + chartGroupDeploymentRepository repository.ChartGroupDeploymentRepository appStoreApplicationVersionRepository appStoreDiscoverRepository.AppStoreApplicationVersionRepository environmentRepository clusterRepository.EnvironmentRepository clusterInstalledAppsRepository repository.ClusterInstalledAppsRepository @@ -110,7 +111,7 @@ type AppStoreDeploymentServiceImpl struct { } func NewAppStoreDeploymentServiceImpl(logger *zap.SugaredLogger, installedAppRepository repository.InstalledAppRepository, - appStoreApplicationVersionRepository appStoreDiscoverRepository.AppStoreApplicationVersionRepository, environmentRepository clusterRepository.EnvironmentRepository, + chartGroupDeploymentRepository repository.ChartGroupDeploymentRepository, appStoreApplicationVersionRepository appStoreDiscoverRepository.AppStoreApplicationVersionRepository, environmentRepository clusterRepository.EnvironmentRepository, clusterInstalledAppsRepository repository.ClusterInstalledAppsRepository, appRepository app.AppRepository, appStoreDeploymentHelmService appStoreDeploymentTool.AppStoreDeploymentHelmService, appStoreDeploymentArgoCdService appStoreDeploymentGitopsTool.AppStoreDeploymentArgoCdService, environmentService cluster.EnvironmentService, @@ -122,6 +123,7 @@ func NewAppStoreDeploymentServiceImpl(logger *zap.SugaredLogger, installedAppRep appStoreDeploymentServiceImpl := &AppStoreDeploymentServiceImpl{ logger: logger, installedAppRepository: installedAppRepository, + chartGroupDeploymentRepository: chartGroupDeploymentRepository, appStoreApplicationVersionRepository: appStoreApplicationVersionRepository, environmentRepository: environmentRepository, clusterInstalledAppsRepository: clusterInstalledAppsRepository, @@ -765,6 +767,21 @@ func (impl AppStoreDeploymentServiceImpl) DeleteInstalledApp(ctx context.Context } } + // soft delete chart-group deployment + chartGroupDeployment, err := impl.chartGroupDeploymentRepository.FindByInstalledAppId(model.Id) + if err != nil && err != pg.ErrNoRows { + impl.logger.Errorw("error while fetching chart group deployment", "error", err) + return nil, err + } + if chartGroupDeployment.Id != 0 { + chartGroupDeployment.Deleted = true + _, err = impl.chartGroupDeploymentRepository.Update(chartGroupDeployment, tx) + if err != nil { + impl.logger.Errorw("error while updating chart group deployment", "error", err) + return nil, err + } + } + if util2.IsBaseStack() || util2.IsHelmApp(app.AppOfferingMode) || util.IsHelmApp(model.DeploymentAppType) { // there might be a case if helm release gets uninstalled from helm cli. //in this case on deleting the app from API, it should not give error as it should get deleted from db, otherwise due to delete error, db does not get clean @@ -1470,13 +1487,11 @@ func (impl *AppStoreDeploymentServiceImpl) UpdateInstalledApp(ctx context.Contex } if installAppVersionRequest.PerformACDDeployment { - if monoRepoMigrationRequired { - // update repo details on ArgoCD as repo is changed - err = impl.appStoreDeploymentArgoCdService.UpdateChartInfo(installAppVersionRequest, gitOpsResponse.ChartGitAttribute, 0, ctx) - if err != nil { - impl.logger.Errorw("error in acd patch request", "err", err) - return nil, err - } + // refresh update repo details on ArgoCD if repo is changed + err = impl.appStoreDeploymentArgoCdService.RefreshAndUpdateACDApp(installAppVersionRequest, gitOpsResponse.ChartGitAttribute, monoRepoMigrationRequired, ctx) + if err != nil { + impl.logger.Errorw("error in acd patch request", "err", err) + return nil, err } } else if installAppVersionRequest.PerformHelmDeployment { err = impl.appStoreDeploymentHelmService.UpdateChartInfo(installAppVersionRequest, gitOpsResponse.ChartGitAttribute, installAppVersionRequest.InstalledAppVersionHistoryId, ctx) diff --git a/pkg/appStore/deployment/tool/AppStoreDeploymentHelmService.go b/pkg/appStore/deployment/tool/AppStoreDeploymentHelmService.go index f5a99a1ec7..38371338c2 100644 --- a/pkg/appStore/deployment/tool/AppStoreDeploymentHelmService.go +++ b/pkg/appStore/deployment/tool/AppStoreDeploymentHelmService.go @@ -473,3 +473,10 @@ func (impl *AppStoreDeploymentHelmServiceImpl) SaveTimelineForACDHelmApps(instal func (impl *AppStoreDeploymentHelmServiceImpl) UpdateInstalledAppAndPipelineStatusForFailedDeploymentStatus(installAppVersionRequest *appStoreBean.InstallAppVersionDTO, triggeredAt time.Time, err error) error { return nil } + +// TODO: Need to refactor this,refer below reason +// This is being done as in ea mode wire argocd service is being binded to helmServiceImpl due to which we are restricted to implement this here. +// RefreshAndUpdateACDApp this will update chart info in acd app if required in case of mono repo migration and will refresh argo app +func (impl *AppStoreDeploymentHelmServiceImpl) RefreshAndUpdateACDApp(installAppVersionRequest *appStoreBean.InstallAppVersionDTO, ChartGitAttribute *util.ChartGitAttribute, isMonoRepoMigrationRequired bool, ctx context.Context) error { + return errors.New("this is not implemented") +} diff --git a/pkg/appStore/deployment/tool/gitops/AppStoreDeploymentArgoCdService.go b/pkg/appStore/deployment/tool/gitops/AppStoreDeploymentArgoCdService.go index 9f9f9b049e..d0492eed88 100644 --- a/pkg/appStore/deployment/tool/gitops/AppStoreDeploymentArgoCdService.go +++ b/pkg/appStore/deployment/tool/gitops/AppStoreDeploymentArgoCdService.go @@ -11,6 +11,7 @@ import ( client "github.com/devtron-labs/devtron/api/helm-app" openapi "github.com/devtron-labs/devtron/api/helm-app/openapiClient" openapi2 "github.com/devtron-labs/devtron/api/openapi/openapiClient" + "github.com/devtron-labs/devtron/client/argocdServer" application2 "github.com/devtron-labs/devtron/client/argocdServer/application" "github.com/devtron-labs/devtron/internal/constants" repository3 "github.com/devtron-labs/devtron/internal/sql/repository" @@ -55,6 +56,7 @@ type AppStoreDeploymentArgoCdService interface { UpdateInstalledAppAndPipelineStatusForFailedDeploymentStatus(installAppVersionRequest *appStoreBean.InstallAppVersionDTO, triggeredAt time.Time, err error) error SaveTimelineForACDHelmApps(installAppVersionRequest *appStoreBean.InstallAppVersionDTO, status string, statusDetail string, tx *pg.Tx) error UpdateChartInfo(installAppVersionRequest *appStoreBean.InstallAppVersionDTO, ChartGitAttribute *util.ChartGitAttribute, installedAppVersionHistoryId int, ctx context.Context) error + RefreshAndUpdateACDApp(installAppVersionRequest *appStoreBean.InstallAppVersionDTO, ChartGitAttribute *util.ChartGitAttribute, isMonoRepoMigrationRequired bool, ctx context.Context) error } type AppStoreDeploymentArgoCdServiceImpl struct { @@ -75,6 +77,7 @@ type AppStoreDeploymentArgoCdServiceImpl struct { userService user.UserService pipelineStatusTimelineRepository pipelineConfig.PipelineStatusTimelineRepository appStoreApplicationVersionRepository appStoreDiscoverRepository.AppStoreApplicationVersionRepository + argoClientWrapperService argocdServer.ArgoClientWrapperService } func NewAppStoreDeploymentArgoCdServiceImpl(logger *zap.SugaredLogger, appStoreDeploymentFullModeService appStoreDeploymentFullMode.AppStoreDeploymentFullModeService, @@ -85,7 +88,7 @@ func NewAppStoreDeploymentArgoCdServiceImpl(logger *zap.SugaredLogger, appStoreD pipelineStatusTimelineService status.PipelineStatusTimelineService, userService user.UserService, pipelineStatusTimelineRepository pipelineConfig.PipelineStatusTimelineRepository, appStoreApplicationVersionRepository appStoreDiscoverRepository.AppStoreApplicationVersionRepository, -) *AppStoreDeploymentArgoCdServiceImpl { + argoClientWrapperService argocdServer.ArgoClientWrapperService) *AppStoreDeploymentArgoCdServiceImpl { return &AppStoreDeploymentArgoCdServiceImpl{ Logger: logger, appStoreDeploymentFullModeService: appStoreDeploymentFullModeService, @@ -104,9 +107,28 @@ func NewAppStoreDeploymentArgoCdServiceImpl(logger *zap.SugaredLogger, appStoreD userService: userService, pipelineStatusTimelineRepository: pipelineStatusTimelineRepository, appStoreApplicationVersionRepository: appStoreApplicationVersionRepository, + argoClientWrapperService: argoClientWrapperService, } } +// RefreshAndUpdateACDApp this will update chart info in acd app if required in case of mono repo migration and will refresh argo app +func (impl AppStoreDeploymentArgoCdServiceImpl) RefreshAndUpdateACDApp(installAppVersionRequest *appStoreBean.InstallAppVersionDTO, ChartGitAttribute *util.ChartGitAttribute, isMonoRepoMigrationRequired bool, ctx context.Context) error { + if isMonoRepoMigrationRequired { + // update repo details on ArgoCD as repo is changed + err := impl.UpdateChartInfo(installAppVersionRequest, ChartGitAttribute, 0, ctx) + if err != nil { + impl.Logger.Errorw("error in acd patch request", "err", err) + return err + } + } + // Doing this to refresh normally by getting app to avoid sync delay argo cd + err := impl.argoClientWrapperService.GetArgoAppWithNormalRefresh(ctx, installAppVersionRequest.ACDAppName) + if err != nil { + impl.Logger.Errorw("error in getting argocd application with normal refresh", "err", err, "argoAppName", installAppVersionRequest.ACDAppName) + } + return nil +} + // UpdateChartInfo this will update chart info in acd app, needed when repo for an app is changed func (impl AppStoreDeploymentArgoCdServiceImpl) UpdateChartInfo(installAppVersionRequest *appStoreBean.InstallAppVersionDTO, ChartGitAttribute *util.ChartGitAttribute, installedAppVersionHistoryId int, ctx context.Context) error { installAppVersionRequest, err := impl.patchAcdApp(ctx, installAppVersionRequest, ChartGitAttribute) diff --git a/pkg/auth/UserAuthOidcHelper.go b/pkg/auth/UserAuthOidcHelper.go index eefb93e2b1..829f905c1a 100644 --- a/pkg/auth/UserAuthOidcHelper.go +++ b/pkg/auth/UserAuthOidcHelper.go @@ -21,7 +21,7 @@ import ( "github.com/devtron-labs/authenticator/client" authMiddleware "github.com/devtron-labs/authenticator/middleware" "github.com/devtron-labs/authenticator/oidc" - "github.com/devtron-labs/devtron/client/argocdServer" + "github.com/devtron-labs/devtron/client/argocdServer/connection" "github.com/devtron-labs/devtron/pkg/user" "go.uber.org/zap" "net/http" @@ -66,8 +66,8 @@ func NewUserAuthOidcHelperImpl(logger *zap.SugaredLogger, selfRegistrationRolesS // SanitiseRedirectUrl replaces initial "/orchestrator" from url func (impl UserAuthOidcHelperImpl) sanitiseRedirectUrl(redirectUrl string) string { - if strings.Contains(redirectUrl, argocdServer.Dashboard) { - redirectUrl = strings.ReplaceAll(redirectUrl, argocdServer.Orchestrator, "") + if strings.Contains(redirectUrl, connection.Dashboard) { + redirectUrl = strings.ReplaceAll(redirectUrl, connection.Orchestrator, "") } return redirectUrl } diff --git a/pkg/bean/app.go b/pkg/bean/app.go index 6fd01e4b6c..8743917f01 100644 --- a/pkg/bean/app.go +++ b/pkg/bean/app.go @@ -560,6 +560,9 @@ type CDPipelineConfigObject struct { ManifestStorageType string `json:"manifestStorageType"` PreDeployStage *bean.PipelineStageDto `json:"preDeployStage,omitempty"` PostDeployStage *bean.PipelineStageDto `json:"postDeployStage,omitempty"` + SourceToNewPipelineId map[int]int `json:"sourceToNewPipelineId,omitempty"` + RefPipelineId int `json:"refPipelineId,omitempty"` + ExternalCiPipelineId int `json:"externalCiPipelineId,omitempty"` CustomTagObject *CustomTagData `json:"customTag,omitempty"` CustomTagStage *repository.PipelineStageType `json:"customTagStage,omitempty"` } diff --git a/pkg/chart/ChartService.go b/pkg/chart/ChartService.go index cf426dd8c2..9be1e4872f 100644 --- a/pkg/chart/ChartService.go +++ b/pkg/chart/ChartService.go @@ -24,6 +24,7 @@ import ( "fmt" "github.com/devtron-labs/devtron/pkg/resourceQualifiers" "github.com/devtron-labs/devtron/pkg/variables" + models2 "github.com/devtron-labs/devtron/pkg/variables/models" "github.com/devtron-labs/devtron/pkg/variables/parsers" repository5 "github.com/devtron-labs/devtron/pkg/variables/repository" @@ -100,7 +101,7 @@ type ChartService interface { FlaggerCanaryEnabled(values json.RawMessage) (bool, error) GetCustomChartInBytes(chatRefId int) ([]byte, error) GetRefChart(templateRequest TemplateRequest) (string, string, error, string, string) - ExtractVariablesAndResolveTemplate(scope resourceQualifiers.Scope, template string, templateType parsers.VariableTemplateType) (string, error) + ExtractVariablesAndResolveTemplate(scope resourceQualifiers.Scope, template string, templateType parsers.VariableTemplateType, isSuperAdmin bool, maskUnknownVariable bool) (string, map[string]string, error) } type ChartServiceImpl struct { @@ -1332,30 +1333,46 @@ const cpuPattern = `"50m" or "0.05"` const cpu = "cpu" const memory = "memory" -func (impl ChartServiceImpl) ExtractVariablesAndResolveTemplate(scope resourceQualifiers.Scope, template string, templateType parsers.VariableTemplateType) (string, error) { - +func (impl ChartServiceImpl) ExtractVariablesAndResolveTemplate(scope resourceQualifiers.Scope, template string, templateType parsers.VariableTemplateType, isSuperAdmin bool, maskUnknownVariable bool) (string, map[string]string, error) { + //Todo Subhashish manager layer + variableSnapshot := make(map[string]string) usedVariables, err := impl.variableTemplateParser.ExtractVariables(template, templateType) if err != nil { - return "", err + return template, variableSnapshot, err } if len(usedVariables) == 0 { - return template, nil + return template, variableSnapshot, err } - scopedVariables, err := impl.scopedVariableService.GetScopedVariables(scope, usedVariables, true) + scopedVariables, err := impl.scopedVariableService.GetScopedVariables(scope, usedVariables, isSuperAdmin) if err != nil { - return "", err + return template, variableSnapshot, err + } + + for _, variable := range scopedVariables { + variableSnapshot[variable.VariableName] = variable.VariableValue.StringValue() + } + + if maskUnknownVariable { + for _, variable := range usedVariables { + if _, ok := variableSnapshot[variable]; !ok { + scopedVariables = append(scopedVariables, &models2.ScopedVariableData{ + VariableName: variable, + VariableValue: &models2.VariableValue{Value: models2.UndefinedValue}, + }) + } + } } parserRequest := parsers.VariableParserRequest{Template: template, Variables: scopedVariables, TemplateType: templateType, IgnoreUnknownVariables: true} parserResponse := impl.variableTemplateParser.ParseTemplate(parserRequest) err = parserResponse.Error if err != nil { - return "", err + return template, variableSnapshot, err } resolvedTemplate := parserResponse.ResolvedTemplate - return resolvedTemplate, nil + return resolvedTemplate, variableSnapshot, nil } func (impl ChartServiceImpl) DeploymentTemplateValidate(ctx context.Context, template interface{}, chartRefId int, scope resourceQualifiers.Scope) (bool, error) { @@ -1375,7 +1392,7 @@ func (impl ChartServiceImpl) DeploymentTemplateValidate(ctx context.Context, tem //} templateBytes := template.(json.RawMessage) - templatejsonstring, err := impl.ExtractVariablesAndResolveTemplate(scope, string(templateBytes), parsers.JsonVariableTemplate) + templatejsonstring, _, err := impl.ExtractVariablesAndResolveTemplate(scope, string(templateBytes), parsers.JsonVariableTemplate, true, false) if err != nil { return false, err } diff --git a/pkg/cluster/ClusterCronService.go b/pkg/cluster/ClusterCronService.go index d35d91191a..5af2240960 100644 --- a/pkg/cluster/ClusterCronService.go +++ b/pkg/cluster/ClusterCronService.go @@ -46,7 +46,7 @@ func (impl *ClusterCronServiceImpl) GetAndUpdateClusterConnectionStatus() { defer impl.logger.Debug("stopped cluster connection status fetch thread") //getting all clusters - clusters, err := impl.clusterService.FindAll() + clusters, err := impl.clusterService.FindAllExceptVirtual() if err != nil { impl.logger.Errorw("error in getting all clusters", "err", err) return diff --git a/pkg/cluster/ClusterService.go b/pkg/cluster/ClusterService.go index 990772950d..b7aab98214 100644 --- a/pkg/cluster/ClusterService.go +++ b/pkg/cluster/ClusterService.go @@ -159,6 +159,7 @@ type ClusterService interface { FindOne(clusterName string) (*ClusterBean, error) FindOneActive(clusterName string) (*ClusterBean, error) FindAll() ([]*ClusterBean, error) + FindAllExceptVirtual() ([]*ClusterBean, error) FindAllWithoutConfig() ([]*ClusterBean, error) FindAllActive() ([]ClusterBean, error) DeleteFromDb(bean *ClusterBean, userId int32) error @@ -356,6 +357,19 @@ func (impl *ClusterServiceImpl) FindAll() ([]*ClusterBean, error) { return beans, nil } +func (impl *ClusterServiceImpl) FindAllExceptVirtual() ([]*ClusterBean, error) { + models, err := impl.clusterRepository.FindAllActiveExceptVirtual() + if err != nil { + return nil, err + } + var beans []*ClusterBean + for _, model := range models { + bean := GetClusterBean(model) + beans = append(beans, &bean) + } + return beans, nil +} + func (impl *ClusterServiceImpl) FindAllActive() ([]ClusterBean, error) { models, err := impl.clusterRepository.FindAllActive() if err != nil { diff --git a/pkg/cluster/ClusterServiceExtended.go b/pkg/cluster/ClusterServiceExtended.go index 8b62eb7681..1564867bce 100644 --- a/pkg/cluster/ClusterServiceExtended.go +++ b/pkg/cluster/ClusterServiceExtended.go @@ -71,11 +71,7 @@ func (impl *ClusterServiceImplExtended) FindAllWithoutConfig() ([]*ClusterBean, return beans, nil } -func (impl *ClusterServiceImplExtended) FindAll() ([]*ClusterBean, error) { - beans, err := impl.ClusterServiceImpl.FindAll() - if err != nil { - return nil, err - } +func (impl *ClusterServiceImplExtended) GetClusterFullModeDTO(beans []*ClusterBean) ([]*ClusterBean, error) { //devtron full mode logic var clusterIds []int for _, cluster := range beans { @@ -143,6 +139,22 @@ func (impl *ClusterServiceImplExtended) FindAll() ([]*ClusterBean, error) { return beans, nil } +func (impl *ClusterServiceImplExtended) FindAll() ([]*ClusterBean, error) { + beans, err := impl.ClusterServiceImpl.FindAll() + if err != nil { + return nil, err + } + return impl.GetClusterFullModeDTO(beans) +} + +func (impl *ClusterServiceImplExtended) FindAllExceptVirtual() ([]*ClusterBean, error) { + beans, err := impl.ClusterServiceImpl.FindAll() + if err != nil { + return nil, err + } + return impl.GetClusterFullModeDTO(beans) +} + func (impl *ClusterServiceImplExtended) Update(ctx context.Context, bean *ClusterBean, userId int32) (*ClusterBean, error) { isGitOpsConfigured, err1 := impl.gitOpsRepository.IsGitOpsConfigured() if err1 != nil { diff --git a/pkg/cluster/repository/ClusterRepository.go b/pkg/cluster/repository/ClusterRepository.go index fa57ff0ba8..5fe9941e93 100644 --- a/pkg/cluster/repository/ClusterRepository.go +++ b/pkg/cluster/repository/ClusterRepository.go @@ -50,6 +50,7 @@ type ClusterRepository interface { FindOneActive(clusterName string) (*Cluster, error) FindAll() ([]Cluster, error) FindAllActive() ([]Cluster, error) + FindAllActiveExceptVirtual() ([]Cluster, error) FindById(id int) (*Cluster, error) FindByIds(id []int) ([]Cluster, error) Update(model *Cluster) error @@ -127,6 +128,16 @@ func (impl ClusterRepositoryImpl) FindAllActive() ([]Cluster, error) { return clusters, err } +func (impl ClusterRepositoryImpl) FindAllActiveExceptVirtual() ([]Cluster, error) { + var clusters []Cluster + err := impl.dbConnection. + Model(&clusters). + Where("active=?", true). + Where("is_virtual_cluster=?", false). + Select() + return clusters, err +} + func (impl ClusterRepositoryImpl) FindById(id int) (*Cluster, error) { cluster := &Cluster{} err := impl.dbConnection. diff --git a/pkg/generateManifest/DeployementTemplateService.go b/pkg/generateManifest/DeployementTemplateService.go index df5f0d5f5c..681bee30d4 100644 --- a/pkg/generateManifest/DeployementTemplateService.go +++ b/pkg/generateManifest/DeployementTemplateService.go @@ -17,6 +17,7 @@ import ( "github.com/devtron-labs/devtron/pkg/pipeline/history" "github.com/devtron-labs/devtron/pkg/resourceQualifiers" "github.com/devtron-labs/devtron/pkg/variables/parsers" + util2 "github.com/devtron-labs/devtron/util" "github.com/go-pg/pg" "go.uber.org/zap" "os" @@ -56,7 +57,9 @@ var ReleaseIdentifier = &client.ReleaseIdentifier{ } type DeploymentTemplateResponse struct { - Data string `json:"data"` + Data string `json:"data"` + ResolvedData string `json:"resolvedData"` + VariableSnapshot map[string]string `json:"variableSnapshot"` } type DeploymentTemplateService interface { @@ -177,25 +180,25 @@ func (impl DeploymentTemplateServiceImpl) FetchDeploymentsWithChartRefs(appId in func (impl DeploymentTemplateServiceImpl) GetDeploymentTemplate(ctx context.Context, request DeploymentTemplateRequest) (DeploymentTemplateResponse, error) { var result DeploymentTemplateResponse - var values string + var values, resolvedValue string var err error + var variableSnapshot map[string]string if request.Values != "" { values = request.Values - if request.RequestDataMode == Manifest { - values, err = impl.resolveTemplateVariables(request.Values, request) - if err != nil { - return result, err - } + resolvedValue, variableSnapshot, err = impl.resolveTemplateVariables(ctx, request.Values, request) + if err != nil { + return result, err } } else { switch request.Type { case repository.DefaultVersions: _, values, err = impl.chartService.GetAppOverrideForDefaultTemplate(request.ChartRefId) + resolvedValue = values case repository.PublishedOnEnvironments: - values, err = impl.fetchResolvedTemplateForPublishedEnvs(request) + values, resolvedValue, variableSnapshot, err = impl.fetchResolvedTemplateForPublishedEnvs(ctx, request) case repository.DeployedOnSelfEnvironment, repository.DeployedOnOtherEnvironment: - values, err = impl.fetchTemplateForDeployedEnv(request) + values, resolvedValue, variableSnapshot, err = impl.fetchTemplateForDeployedEnv(ctx, request) } if err != nil { impl.Logger.Errorw("error in getting values", "err", err) @@ -205,10 +208,12 @@ func (impl DeploymentTemplateServiceImpl) GetDeploymentTemplate(ctx context.Cont if request.RequestDataMode == Values { result.Data = values + result.ResolvedData = resolvedValue + result.VariableSnapshot = variableSnapshot return result, nil } - manifest, err := impl.GenerateManifest(ctx, request.ChartRefId, values) + manifest, err := impl.GenerateManifest(ctx, request.ChartRefId, resolvedValue) if err != nil { return result, err } @@ -216,7 +221,7 @@ func (impl DeploymentTemplateServiceImpl) GetDeploymentTemplate(ctx context.Cont return result, nil } -func (impl DeploymentTemplateServiceImpl) fetchResolvedTemplateForPublishedEnvs(request DeploymentTemplateRequest) (string, error) { +func (impl DeploymentTemplateServiceImpl) fetchResolvedTemplateForPublishedEnvs(ctx context.Context, request DeploymentTemplateRequest) (string, string, map[string]string, error) { var values string override, err := impl.propertiesConfigService.GetEnvironmentProperties(request.AppId, request.EnvId, request.ChartRefId) if err == nil && override.GlobalConfig != nil { @@ -227,42 +232,42 @@ func (impl DeploymentTemplateServiceImpl) fetchResolvedTemplateForPublishedEnvs( } } else { impl.Logger.Errorw("error in getting overridden values", "err", err) - return "", err + return "", "", nil, err } - if request.RequestDataMode == Manifest { - resolvedTemplate, err := impl.resolveTemplateVariables(values, request) - if err != nil { - return values, err - } - values = resolvedTemplate + resolvedTemplate, variableSnapshot, err := impl.resolveTemplateVariables(ctx, values, request) + if err != nil { + return values, values, variableSnapshot, err } - return values, nil + return values, resolvedTemplate, variableSnapshot, nil } -func (impl DeploymentTemplateServiceImpl) fetchTemplateForDeployedEnv(request DeploymentTemplateRequest) (string, error) { - history, err := impl.deploymentTemplateHistoryService.GetHistoryForDeployedTemplateById(request.DeploymentTemplateHistoryId, request.PipelineId) +func (impl DeploymentTemplateServiceImpl) fetchTemplateForDeployedEnv(ctx context.Context, request DeploymentTemplateRequest) (string, string, map[string]string, error) { + historyObject, err := impl.deploymentTemplateHistoryService.GetHistoryForDeployedTemplateById(ctx, request.DeploymentTemplateHistoryId, request.PipelineId) if err != nil { impl.Logger.Errorw("error in getting deployment template history", "err", err, "id", request.DeploymentTemplateHistoryId, "pipelineId", request.PipelineId) - return "", err + return "", "", nil, err } - if request.RequestDataMode == Values { - return history.CodeEditorValue.Value, nil - } - return history.ResolvedTemplate, nil + //todo Subhashish solve variable leak + return historyObject.CodeEditorValue.Value, historyObject.ResolvedTemplateData, historyObject.VariableSnapshot, nil } -func (impl DeploymentTemplateServiceImpl) resolveTemplateVariables(values string, request DeploymentTemplateRequest) (string, error) { +func (impl DeploymentTemplateServiceImpl) resolveTemplateVariables(ctx context.Context, values string, request DeploymentTemplateRequest) (string, map[string]string, error) { + isSuperAdmin, err := util2.GetIsSuperAdminFromContext(ctx) + if err != nil { + return values, nil, err + } scope, err := impl.extractScopeData(request) if err != nil { - return "", err + return values, nil, err } - resolvedTemplate, err := impl.chartService.ExtractVariablesAndResolveTemplate(scope, values, parsers.StringVariableTemplate) + maskUnknownVariableForHelmGenerate := request.RequestDataMode == Manifest + resolvedTemplate, variableSnapshot, err := impl.chartService.ExtractVariablesAndResolveTemplate(scope, values, parsers.StringVariableTemplate, isSuperAdmin, maskUnknownVariableForHelmGenerate) if err != nil { - return "", err + return values, variableSnapshot, err } - return resolvedTemplate, nil + return resolvedTemplate, variableSnapshot, nil } func (impl DeploymentTemplateServiceImpl) extractScopeData(request DeploymentTemplateRequest) (resourceQualifiers.Scope, error) { diff --git a/pkg/k8s/K8sCommonService.go b/pkg/k8s/K8sCommonService.go index c38519b952..f8c7e3f372 100644 --- a/pkg/k8s/K8sCommonService.go +++ b/pkg/k8s/K8sCommonService.go @@ -345,9 +345,10 @@ func (impl *K8sCommonServiceImpl) GetCoreClientByClusterId(clusterId int) (*kube } func (impl K8sCommonServiceImpl) PortNumberExtraction(resp []BatchResourceResponse, resourceTree map[string]interface{}) map[string]interface{} { - portsService := make([]int64, 0) - portsEndpoint := make([]int64, 0) - portEndpointSlice := make([]int64, 0) + servicePortMapping := make(map[string]interface{}) + endpointPortMapping := make(map[string]interface{}) + endpointSlicePortMapping := make(map[string]interface{}) + for _, portHolder := range resp { if portHolder.ManifestResponse == nil { continue @@ -357,6 +358,26 @@ func (impl K8sCommonServiceImpl) PortNumberExtraction(resp []BatchResourceRespon impl.logger.Warnw("kind not found in resource tree, unable to extract port no") continue } + metadataResp, ok := portHolder.ManifestResponse.Manifest.Object[k8sCommonBean.K8sClusterResourceMetadataKey] + if !ok { + impl.logger.Warnw("metadata not found in resource tree, unable to extract port no") + continue + } + metadata, ok := metadataResp.(map[string]interface{}) + if !ok { + impl.logger.Warnw("metadata not found in resource tree, unable to extract port no") + continue + } + serviceNameResp, ok := metadata[k8sCommonBean.K8sClusterResourceMetadataNameKey] + if !ok { + impl.logger.Warnw("service name not found in resource tree, unable to extract port no") + continue + } + serviceName, ok := serviceNameResp.(string) + if !ok { + impl.logger.Warnw("service name not found in resource tree, unable to extract port no") + continue + } if kind == k8sCommonBean.ServiceKind { specField, ok := portHolder.ManifestResponse.Manifest.Object[k8sCommonBean.Spec] if !ok { @@ -368,6 +389,7 @@ func (impl K8sCommonServiceImpl) PortNumberExtraction(resp []BatchResourceRespon impl.logger.Warnw("spec not found in resource tree, unable to extract port no") continue } + if spec != nil { ports, ok := spec[k8sCommonBean.Ports] if !ok { @@ -379,6 +401,7 @@ func (impl K8sCommonServiceImpl) PortNumberExtraction(resp []BatchResourceRespon impl.logger.Warnw("portList not found in resource tree, unable to extract port no") continue } + servicePorts := make([]int64, 0) for _, portItem := range portList { portItems, ok := portItem.(map[string]interface{}) if !ok { @@ -397,10 +420,11 @@ func (impl K8sCommonServiceImpl) PortNumberExtraction(resp []BatchResourceRespon continue } if portNumber != 0 { - portsService = append(portsService, portNumber) + servicePorts = append(servicePorts, portNumber) } } } + servicePortMapping[serviceName] = servicePorts } else { impl.logger.Warnw("spec doest not contain data", "spec", spec) continue @@ -435,6 +459,7 @@ func (impl K8sCommonServiceImpl) PortNumberExtraction(resp []BatchResourceRespon impl.logger.Warnw("portsIfs not found in resource tree, unable to extract port no") continue } + endpointPorts := make([]int64, 0) for _, portsIf := range portsIfs { portsIfObj, ok := portsIf.(map[string]interface{}) if !ok { @@ -447,9 +472,10 @@ func (impl K8sCommonServiceImpl) PortNumberExtraction(resp []BatchResourceRespon impl.logger.Warnw("port not found in resource tree, unable to extract port no") continue } - portsEndpoint = append(portsEndpoint, port) + endpointPorts = append(endpointPorts, port) } } + endpointPortMapping[serviceName] = endpointPorts } } } @@ -466,6 +492,7 @@ func (impl K8sCommonServiceImpl) PortNumberExtraction(resp []BatchResourceRespon impl.logger.Warnw("endPointsSlicePorts not found in resource tree endpoint, unable to extract port no") continue } + endpointSlicePorts := make([]int64, 0) for _, val := range endPointsSlicePorts { portNumbers, ok := val.(map[string]interface{})[k8sCommonBean.Port] if !ok { @@ -478,9 +505,10 @@ func (impl K8sCommonServiceImpl) PortNumberExtraction(resp []BatchResourceRespon continue } if portNumber != 0 { - portEndpointSlice = append(portEndpointSlice, portNumber) + endpointSlicePorts = append(endpointSlicePorts, portNumber) } } + endpointSlicePortMapping[serviceName] = endpointSlicePorts } } } @@ -496,15 +524,25 @@ func (impl K8sCommonServiceImpl) PortNumberExtraction(resp []BatchResourceRespon impl.logger.Warnw("value not found in resourceTreeVal, unable to extract port no") continue } + serviceNameRes, ok := value[k8sCommonBean.K8sClusterResourceMetadataNameKey] + if !ok { + impl.logger.Warnw("service name not found in resourceTreeVal, unable to extract port no") + continue + } + serviceName, ok := serviceNameRes.(string) + if !ok { + impl.logger.Warnw("service name not found in resourceTreeVal, unable to extract port no") + continue + } for key, _type := range value { if key == k8sCommonBean.Kind && _type == k8sCommonBean.EndpointsKind { - value[k8sCommonBean.Port] = portsEndpoint + value[k8sCommonBean.Port] = endpointPortMapping[serviceName] } if key == k8sCommonBean.Kind && _type == k8sCommonBean.ServiceKind { - value[k8sCommonBean.Port] = portsService + value[k8sCommonBean.Port] = servicePortMapping[serviceName] } if key == k8sCommonBean.Kind && _type == k8sCommonBean.EndPointsSlice { - value[k8sCommonBean.Port] = portEndpointSlice + value[k8sCommonBean.Port] = endpointSlicePortMapping[serviceName] } } } diff --git a/pkg/k8s/capacity/bean/bean.go b/pkg/k8s/capacity/bean/bean.go index 138aa69953..95e35c8644 100644 --- a/pkg/k8s/capacity/bean/bean.go +++ b/pkg/k8s/capacity/bean/bean.go @@ -66,7 +66,6 @@ type ClusterCapacityDetail struct { ServerVersion string `json:"serverVersion,omitempty"` Cpu *ResourceDetailObject `json:"cpu"` Memory *ResourceDetailObject `json:"memory"` - IsVirtualCluster bool `json:"isVirtualCluster"` } type NodeCapacityDetail struct { diff --git a/pkg/k8s/capacity/k8sCapacityService.go b/pkg/k8s/capacity/k8sCapacityService.go index 1bef44b9dc..97da7376b0 100644 --- a/pkg/k8s/capacity/k8sCapacityService.go +++ b/pkg/k8s/capacity/k8sCapacityService.go @@ -67,9 +67,7 @@ func (impl *K8sCapacityServiceImpl) GetClusterCapacityDetailList(ctx context.Con for _, cluster := range clusters { clusterCapacityDetail := &bean.ClusterCapacityDetail{} var err error - if cluster.IsVirtualCluster { - clusterCapacityDetail.IsVirtualCluster = cluster.IsVirtualCluster - } else if len(cluster.ErrorInConnecting) > 0 { + if len(cluster.ErrorInConnecting) > 0 { clusterCapacityDetail.ErrorInConnection = cluster.ErrorInConnecting } else { clusterCapacityDetail, err = impl.GetClusterCapacityDetail(ctx, cluster, true) diff --git a/pkg/pipeline/BuildPipelineConfigService.go b/pkg/pipeline/BuildPipelineConfigService.go index b2c968a4d6..4219f677ef 100644 --- a/pkg/pipeline/BuildPipelineConfigService.go +++ b/pkg/pipeline/BuildPipelineConfigService.go @@ -26,7 +26,6 @@ import ( dockerRegistryRepository "github.com/devtron-labs/devtron/internal/sql/repository/dockerRegistry" "github.com/devtron-labs/devtron/internal/sql/repository/pipelineConfig" "github.com/devtron-labs/devtron/internal/util" - "github.com/devtron-labs/devtron/pkg" "github.com/devtron-labs/devtron/pkg/attributes" "github.com/devtron-labs/devtron/pkg/bean" bean3 "github.com/devtron-labs/devtron/pkg/pipeline/bean" @@ -126,7 +125,7 @@ type CiPipelineConfigServiceImpl struct { ciPipelineMaterialRepository pipelineConfig.CiPipelineMaterialRepository resourceGroupService resourceGroup2.ResourceGroupService enforcerUtil rbac.EnforcerUtil - customTagService pkg.CustomTagService + customTagService CustomTagService } func NewCiPipelineConfigServiceImpl(logger *zap.SugaredLogger, @@ -148,7 +147,7 @@ func NewCiPipelineConfigServiceImpl(logger *zap.SugaredLogger, enforcerUtil rbac.EnforcerUtil, ciWorkflowRepository pipelineConfig.CiWorkflowRepository, resourceGroupService resourceGroup2.ResourceGroupService, - customTagService pkg.CustomTagService) *CiPipelineConfigServiceImpl { + customTagService CustomTagService) *CiPipelineConfigServiceImpl { securityConfig := &SecurityConfig{} err := env.Parse(securityConfig) @@ -621,7 +620,7 @@ func (impl *CiPipelineConfigServiceImpl) GetCiPipeline(appId int) (ciConfig *bea impl.logger.Errorw("error in fetching ciEnvMapping", "ciPipelineId ", pipeline.Id, "err", err) return nil, err } - customTag, err := impl.customTagService.GetActiveCustomTagByEntityKeyAndValue(pkg.EntityTypeCiPipelineId, strconv.Itoa(pipeline.Id)) + customTag, err := impl.customTagService.GetActiveCustomTagByEntityKeyAndValue(bean3.EntityTypeCiPipelineId, strconv.Itoa(pipeline.Id)) if err != nil && err != pg.ErrNoRows { return nil, err } @@ -758,6 +757,16 @@ func (impl *CiPipelineConfigServiceImpl) GetCiPipelineById(pipelineId int) (ciPi IsDockerConfigOverridden: pipeline.IsDockerConfigOverridden, PipelineType: bean.PipelineType(pipeline.PipelineType), } + customTag, err := impl.customTagService.GetActiveCustomTagByEntityKeyAndValue(bean3.EntityTypeCiPipelineId, strconv.Itoa(pipeline.Id)) + if err != nil && err != pg.ErrNoRows { + return nil, err + } + if customTag.Id != 0 { + ciPipeline.CustomTagObject = &bean.CustomTagData{ + TagPattern: customTag.TagPattern, + CounterX: customTag.AutoIncreasingNumber, + } + } ciEnvMapping, err := impl.ciPipelineRepository.FindCiEnvMappingByCiPipelineId(pipelineId) if err != nil && err != pg.ErrNoRows { impl.logger.Errorw("error in fetching ci env mapping", "pipelineId", pipelineId, "err", err) diff --git a/pkg/pipeline/CdHandler.go b/pkg/pipeline/CdHandler.go index b2dbb3913b..ad22659614 100644 --- a/pkg/pipeline/CdHandler.go +++ b/pkg/pipeline/CdHandler.go @@ -52,6 +52,7 @@ import ( "github.com/go-pg/pg" "go.opentelemetry.io/otel" "go.uber.org/zap" + "k8s.io/client-go/rest" "os" "path/filepath" "strconv" @@ -600,10 +601,13 @@ func (impl *CdHandlerImpl) CancelStage(workflowRunnerId int, userId int32) (int, } else if workflowRunner.WorkflowType == POST { isExtCluster = pipeline.RunPostStageInEnv } - restConfig, err := impl.k8sUtil.GetRestConfigByCluster(clusterConfig) - if err != nil { - impl.Logger.Errorw("error in getting rest config by cluster id", "err", err) - return 0, err + var restConfig *rest.Config + if isExtCluster { + restConfig, err = impl.k8sUtil.GetRestConfigByCluster(clusterConfig) + if err != nil { + impl.Logger.Errorw("error in getting rest config by cluster id", "err", err) + return 0, err + } } // Terminate workflow err = impl.workflowService.TerminateWorkflow(workflowRunner.ExecutorType, workflowRunner.Name, workflowRunner.Namespace, restConfig, isExtCluster, nil) diff --git a/pkg/pipeline/CiCdConfig.go b/pkg/pipeline/CiCdConfig.go index af9d54d35c..6cf984bef9 100644 --- a/pkg/pipeline/CiCdConfig.go +++ b/pkg/pipeline/CiCdConfig.go @@ -1,12 +1,14 @@ package pipeline import ( + "encoding/json" "flag" "fmt" "github.com/caarlos0/env" blob_storage "github.com/devtron-labs/common-lib/blob-storage" "github.com/devtron-labs/devtron/internal/sql/repository/pipelineConfig" "github.com/devtron-labs/devtron/pkg/pipeline/bean" + v12 "k8s.io/api/core/v1" "k8s.io/client-go/rest" "k8s.io/client-go/tools/clientcmd" "os/user" @@ -420,3 +422,45 @@ func (impl *CiCdConfig) WorkflowRetriesEnabled() bool { return false } } + +func (impl *CiCdConfig) GetWorkflowVolumeAndVolumeMounts() ([]v12.Volume, []v12.VolumeMount, error) { + var volumes []v12.Volume + var volumeMounts []v12.VolumeMount + volumeMountsForCiJson := impl.VolumeMountsForCiJson + if len(volumeMountsForCiJson) > 0 { + var volumeMountsForCi []CiVolumeMount + // Unmarshal or Decode the JSON to the interface. + err := json.Unmarshal([]byte(volumeMountsForCiJson), &volumeMountsForCi) + if err != nil { + return nil, nil, err + } + + for _, volumeMountForCi := range volumeMountsForCi { + volumes = append(volumes, getWorkflowVolume(volumeMountForCi)) + volumeMounts = append(volumeMounts, getWorkflowVolumeMounts(volumeMountForCi)) + } + } + return volumes, volumeMounts, nil +} + +func getWorkflowVolume(volumeMountForCi CiVolumeMount) v12.Volume { + hostPathDirectoryOrCreate := v12.HostPathDirectoryOrCreate + + return v12.Volume{ + Name: volumeMountForCi.Name, + VolumeSource: v12.VolumeSource{ + HostPath: &v12.HostPathVolumeSource{ + Path: volumeMountForCi.HostMountPath, + Type: &hostPathDirectoryOrCreate, + }, + }, + } + +} + +func getWorkflowVolumeMounts(volumeMountForCi CiVolumeMount) v12.VolumeMount { + return v12.VolumeMount{ + Name: volumeMountForCi.Name, + MountPath: volumeMountForCi.ContainerMountPath, + } +} diff --git a/pkg/pipeline/CiCdPipelineOrchestrator.go b/pkg/pipeline/CiCdPipelineOrchestrator.go index e0936e50c0..f7500a83d3 100644 --- a/pkg/pipeline/CiCdPipelineOrchestrator.go +++ b/pkg/pipeline/CiCdPipelineOrchestrator.go @@ -32,7 +32,6 @@ import ( app2 "github.com/devtron-labs/devtron/internal/sql/repository/app" dockerRegistryRepository "github.com/devtron-labs/devtron/internal/sql/repository/dockerRegistry" "github.com/devtron-labs/devtron/internal/sql/repository/helper" - "github.com/devtron-labs/devtron/pkg" repository2 "github.com/devtron-labs/devtron/pkg/cluster/repository" "github.com/devtron-labs/devtron/pkg/genericNotes" repository3 "github.com/devtron-labs/devtron/pkg/genericNotes/repository" @@ -115,7 +114,7 @@ type CiCdPipelineOrchestratorImpl struct { dockerArtifactStoreRepository dockerRegistryRepository.DockerArtifactStoreRepository configMapService ConfigMapService genericNoteService genericNotes.GenericNoteService - customTagService pkg.CustomTagService + customTagService CustomTagService } func NewCiCdPipelineOrchestrator( @@ -141,7 +140,7 @@ func NewCiCdPipelineOrchestrator( ciTemplateService CiTemplateService, dockerArtifactStoreRepository dockerRegistryRepository.DockerArtifactStoreRepository, configMapService ConfigMapService, - customTagService pkg.CustomTagService, + customTagService CustomTagService, genericNoteService genericNotes.GenericNoteService) *CiCdPipelineOrchestratorImpl { return &CiCdPipelineOrchestratorImpl{ appRepository: pipelineGroupRepository, @@ -335,7 +334,7 @@ func (impl CiCdPipelineOrchestratorImpl) PatchMaterialValue(createRequest *bean. //Otherwise deleteIfExists if createRequest.CustomTagObject != nil { customTag := bean4.CustomTag{ - EntityKey: pkg.EntityTypeCiPipelineId, + EntityKey: bean2.EntityTypeCiPipelineId, EntityValue: strconv.Itoa(ciPipelineObject.Id), TagPattern: createRequest.CustomTagObject.TagPattern, AutoIncreasingNumber: createRequest.CustomTagObject.CounterX, @@ -346,7 +345,7 @@ func (impl CiCdPipelineOrchestratorImpl) PatchMaterialValue(createRequest *bean. } } else { customTag := bean4.CustomTag{ - EntityKey: pkg.EntityTypeCiPipelineId, + EntityKey: bean2.EntityTypeCiPipelineId, EntityValue: strconv.Itoa(ciPipelineObject.Id), } err := impl.customTagService.DeleteCustomTagIfExists(customTag) @@ -771,7 +770,7 @@ func (impl CiCdPipelineOrchestratorImpl) CreateCiConf(createRequest *bean.CiConf //If customTagObejct has been passed, save it if ciPipeline.CustomTagObject != nil { customTag := &bean4.CustomTag{ - EntityKey: pkg.EntityTypeCiPipelineId, + EntityKey: bean2.EntityTypeCiPipelineId, EntityValue: strconv.Itoa(ciPipeline.Id), TagPattern: ciPipeline.CustomTagObject.TagPattern, AutoIncreasingNumber: ciPipeline.CustomTagObject.CounterX, @@ -1065,18 +1064,23 @@ func (impl CiCdPipelineOrchestratorImpl) CreateApp(createRequest *bean.CreateApp } // create labels and tags with app if app.Active && len(createRequest.AppLabels) > 0 { + appLabelMap := make(map[string]bool) for _, label := range createRequest.AppLabels { - request := &bean.AppLabelDto{ - AppId: app.Id, - Key: label.Key, - Value: label.Value, - Propagate: label.Propagate, - UserId: createRequest.UserId, - } - _, err := impl.appLabelsService.Create(request, tx) - if err != nil { - impl.logger.Errorw("error on creating labels for app id ", "err", err, "appId", app.Id) - return nil, err + uniqueLabelExists := fmt.Sprintf("%s:%s:%t", label.Key, label.Value, label.Propagate) + if _, ok := appLabelMap[uniqueLabelExists]; !ok { + appLabelMap[uniqueLabelExists] = true + request := &bean.AppLabelDto{ + AppId: app.Id, + Key: label.Key, + Value: label.Value, + Propagate: label.Propagate, + UserId: createRequest.UserId, + } + _, err := impl.appLabelsService.Create(request, tx) + if err != nil { + impl.logger.Errorw("error on creating labels for app id ", "err", err, "appId", app.Id) + return nil, err + } } } } diff --git a/pkg/pipeline/CiHandler.go b/pkg/pipeline/CiHandler.go index e6a697f34c..385036df79 100644 --- a/pkg/pipeline/CiHandler.go +++ b/pkg/pipeline/CiHandler.go @@ -30,7 +30,6 @@ import ( "github.com/devtron-labs/devtron/client/gitSensor" "github.com/devtron-labs/devtron/internal/sql/repository/appWorkflow" repository2 "github.com/devtron-labs/devtron/internal/sql/repository/imageTagging" - "github.com/devtron-labs/devtron/pkg" "github.com/devtron-labs/devtron/pkg/cluster" repository3 "github.com/devtron-labs/devtron/pkg/cluster/repository" k8s2 "github.com/devtron-labs/devtron/pkg/k8s" @@ -107,13 +106,13 @@ type CiHandlerImpl struct { resourceGroupService resourceGroup.ResourceGroupService envRepository repository3.EnvironmentRepository imageTaggingService ImageTaggingService - customTagService pkg.CustomTagService + customTagService CustomTagService appWorkflowRepository appWorkflow.AppWorkflowRepository config *CiConfig k8sCommonService k8s2.K8sCommonService } -func NewCiHandlerImpl(Logger *zap.SugaredLogger, ciService CiService, ciPipelineMaterialRepository pipelineConfig.CiPipelineMaterialRepository, gitSensorClient gitSensor.Client, ciWorkflowRepository pipelineConfig.CiWorkflowRepository, workflowService WorkflowService, ciLogService CiLogService, ciArtifactRepository repository.CiArtifactRepository, userService user.UserService, eventClient client.EventClient, eventFactory client.EventFactory, ciPipelineRepository pipelineConfig.CiPipelineRepository, appListingRepository repository.AppListingRepository, K8sUtil *k8s.K8sUtil, cdPipelineRepository pipelineConfig.PipelineRepository, enforcerUtil rbac.EnforcerUtil, resourceGroupService resourceGroup.ResourceGroupService, envRepository repository3.EnvironmentRepository, imageTaggingService ImageTaggingService, appWorkflowRepository appWorkflow.AppWorkflowRepository, customTagService pkg.CustomTagService, k8sCommonService k8s2.K8sCommonService) *CiHandlerImpl { +func NewCiHandlerImpl(Logger *zap.SugaredLogger, ciService CiService, ciPipelineMaterialRepository pipelineConfig.CiPipelineMaterialRepository, gitSensorClient gitSensor.Client, ciWorkflowRepository pipelineConfig.CiWorkflowRepository, workflowService WorkflowService, ciLogService CiLogService, ciArtifactRepository repository.CiArtifactRepository, userService user.UserService, eventClient client.EventClient, eventFactory client.EventFactory, ciPipelineRepository pipelineConfig.CiPipelineRepository, appListingRepository repository.AppListingRepository, K8sUtil *k8s.K8sUtil, cdPipelineRepository pipelineConfig.PipelineRepository, enforcerUtil rbac.EnforcerUtil, resourceGroupService resourceGroup.ResourceGroupService, envRepository repository3.EnvironmentRepository, imageTaggingService ImageTaggingService, appWorkflowRepository appWorkflow.AppWorkflowRepository, customTagService CustomTagService, k8sCommonService k8s2.K8sCommonService) *CiHandlerImpl { cih := &CiHandlerImpl{ Logger: Logger, ciService: ciService, @@ -621,8 +620,8 @@ func (impl *CiHandlerImpl) GetBuildHistory(pipelineId int, appId int, offset int EnvironmentName: w.EnvironmentName, ReferenceWorkflowId: w.RefCiWorkflowId, } - if w.Message == pkg.ImageTagUnavailableMessage { - customTag, err := impl.customTagService.GetCustomTagByEntityKeyAndValue(pkg.EntityTypeCiPipelineId, strconv.Itoa(w.CiPipelineId)) + if w.Message == bean3.ImageTagUnavailableMessage { + customTag, err := impl.customTagService.GetCustomTagByEntityKeyAndValue(bean3.EntityTypeCiPipelineId, strconv.Itoa(w.CiPipelineId)) if err != nil && err != pg.ErrNoRows { //err == pg.ErrNoRows should never happen return nil, err @@ -635,7 +634,7 @@ func (impl *CiHandlerImpl) GetBuildHistory(pipelineId int, appId int, offset int wfResponse.CustomTag = &bean2.CustomTagErrorResponse{ TagPattern: customTag.TagPattern, AutoIncreasingNumber: customTag.AutoIncreasingNumber, - Message: pkg.ImageTagUnavailableMessage, + Message: bean3.ImageTagUnavailableMessage, } } if imageTagsDataMap[w.CiArtifactId] != nil { diff --git a/pkg/pipeline/CiService.go b/pkg/pipeline/CiService.go index 15b39010a0..a8e58881ca 100644 --- a/pkg/pipeline/CiService.go +++ b/pkg/pipeline/CiService.go @@ -24,7 +24,6 @@ import ( appRepository "github.com/devtron-labs/devtron/internal/sql/repository/app" repository3 "github.com/devtron-labs/devtron/internal/sql/repository/dockerRegistry" "github.com/devtron-labs/devtron/internal/sql/repository/helper" - "github.com/devtron-labs/devtron/pkg" "github.com/devtron-labs/devtron/pkg/app" repository1 "github.com/devtron-labs/devtron/pkg/cluster/repository" bean2 "github.com/devtron-labs/devtron/pkg/pipeline/bean" @@ -73,7 +72,7 @@ type CiServiceImpl struct { appCrudOperationService app.AppCrudOperationService envRepository repository1.EnvironmentRepository appRepository appRepository.AppRepository - customTagService pkg.CustomTagService + customTagService CustomTagService variableSnapshotHistoryService variables.VariableSnapshotHistoryService config *CiConfig } @@ -87,7 +86,7 @@ func NewCiServiceImpl(Logger *zap.SugaredLogger, workflowService WorkflowService userService user.UserService, ciTemplateService CiTemplateService, appCrudOperationService app.AppCrudOperationService, envRepository repository1.EnvironmentRepository, appRepository appRepository.AppRepository, variableSnapshotHistoryService variables.VariableSnapshotHistoryService, - customTagService pkg.CustomTagService, + customTagService CustomTagService, ) *CiServiceImpl { cis := &CiServiceImpl{ Logger: Logger, @@ -456,16 +455,16 @@ func (impl *CiServiceImpl) buildWfRequestForCiPipeline(pipeline *pipelineConfig. } var dockerImageTag string - customTag, err := impl.customTagService.GetActiveCustomTagByEntityKeyAndValue(pkg.EntityTypeCiPipelineId, strconv.Itoa(pipeline.Id)) + customTag, err := impl.customTagService.GetActiveCustomTagByEntityKeyAndValue(bean2.EntityTypeCiPipelineId, strconv.Itoa(pipeline.Id)) if err != nil && err != pg.ErrNoRows { return nil, err } if customTag.Id != 0 { - imagePathReservation, err := impl.customTagService.GenerateImagePath(pkg.EntityTypeCiPipelineId, strconv.Itoa(pipeline.Id), pipeline.CiTemplate.DockerRegistry.RegistryURL, pipeline.CiTemplate.DockerRepository) + imagePathReservation, err := impl.customTagService.GenerateImagePath(bean2.EntityTypeCiPipelineId, strconv.Itoa(pipeline.Id), pipeline.CiTemplate.DockerRegistry.RegistryURL, pipeline.CiTemplate.DockerRepository) if err != nil { - if errors.Is(err, pkg.ErrImagePathInUse) { + if errors.Is(err, bean2.ErrImagePathInUse) { savedWf.Status = pipelineConfig.WorkflowFailed - savedWf.Message = pkg.ImageTagUnavailableMessage + savedWf.Message = bean2.ImageTagUnavailableMessage err1 := impl.ciWorkflowRepository.UpdateWorkFlow(savedWf) if err1 != nil { impl.Logger.Errorw("could not save workflow, after failing due to conflicting image tag") diff --git a/pkg/CustomTagService.go b/pkg/pipeline/CustomTagService.go similarity index 71% rename from pkg/CustomTagService.go rename to pkg/pipeline/CustomTagService.go index b8e067b0d2..76cfc766ac 100644 --- a/pkg/CustomTagService.go +++ b/pkg/pipeline/CustomTagService.go @@ -1,9 +1,10 @@ -package pkg +package pipeline import ( "fmt" "github.com/devtron-labs/devtron/api/bean" "github.com/devtron-labs/devtron/internal/sql/repository" + bean2 "github.com/devtron-labs/devtron/pkg/pipeline/bean" "github.com/go-pg/pg" "go.uber.org/zap" "regexp" @@ -11,24 +12,6 @@ import ( "strings" ) -const ( - EntityNull = iota - EntityTypeCiPipelineId - EntityTypePreCD - EntityTypePostCD -) - -const ( - imagePathPattern = "%s/%s:%s" // dockerReg/dockerRepo:Tag - ImageTagUnavailableMessage = "Desired image tag already exists" - REGEX_PATTERN_FOR_ENSURING_ONLY_ONE_VARIABLE_BETWEEN_BRACKETS = `\{.{2,}\}` - REGEX_PATTERN_FOR_CHARACTER_OTHER_THEN_X_OR_x = `\{[^xX]|{}\}` -) - -var ( - ErrImagePathInUse = fmt.Errorf(ImageTagUnavailableMessage) -) - type CustomTagService interface { CreateOrUpdateCustomTag(tag *bean.CustomTag) error GetCustomTagByEntityKeyAndValue(entityKey int, entityValue string) (*repository.CustomTag, error) @@ -61,7 +44,7 @@ func (impl *CustomTagServiceImpl) CreateOrUpdateCustomTag(tag *bean.CustomTag) e customTagData := repository.CustomTag{ EntityKey: tag.EntityKey, EntityValue: tag.EntityValue, - TagPattern: strings.ReplaceAll(tag.TagPattern, "{X}", "{x}"), + TagPattern: strings.ReplaceAll(tag.TagPattern, bean2.IMAGE_TAG_VARIABLE_NAME_X, bean2.IMAGE_TAG_VARIABLE_NAME_x), AutoIncreasingNumber: tag.AutoIncreasingNumber, Metadata: tag.Metadata, Active: true, @@ -106,13 +89,13 @@ func (impl *CustomTagServiceImpl) GenerateImagePath(entityKey int, entityValue s if err != nil { return nil, err } - imagePath := fmt.Sprintf(imagePathPattern, dockerRegistryURL, dockerRepo, tag) + imagePath := fmt.Sprintf(bean2.ImagePathPattern, dockerRegistryURL, dockerRepo, tag) imagePathReservations, err := impl.customTagRepository.FindByImagePath(tx, imagePath) if err != nil && err != pg.ErrNoRows { return nil, err } if len(imagePathReservations) > 0 { - return nil, ErrImagePathInUse + return nil, bean2.ErrImagePathInUse } imagePathReservation := &repository.ImagePathReservation{ ImagePath: imagePath, @@ -137,10 +120,9 @@ func validateAndConstructTag(customTagData *repository.CustomTag) (string, error if customTagData.AutoIncreasingNumber < 0 { return "", fmt.Errorf("counter {x} can not be negative") } - dockerImageTag := strings.ReplaceAll(customTagData.TagPattern, "{x}", strconv.Itoa(customTagData.AutoIncreasingNumber-1)) //-1 because number is already incremented, current value will be used next time - err = validateTag(dockerImageTag) - if err != nil { - return "", err + dockerImageTag := strings.ReplaceAll(customTagData.TagPattern, bean2.IMAGE_TAG_VARIABLE_NAME_x, strconv.Itoa(customTagData.AutoIncreasingNumber-1)) //-1 because number is already incremented, current value will be used next time + if !isValidDockerImageTag(dockerImageTag) { + return dockerImageTag, fmt.Errorf("invalid docker tag") } return dockerImageTag, nil } @@ -150,38 +132,32 @@ func validateTagPattern(customTagPattern string) error { return fmt.Errorf("tag length can not be zero") } - if IsInvalidVariableFormat(customTagPattern) { - return fmt.Errorf("only one variable is allowed. Allowed variable format : {x} or {X}") - } + variableCount := 0 + variableCount = variableCount + strings.Count(customTagPattern, bean2.IMAGE_TAG_VARIABLE_NAME_x) + variableCount = variableCount + strings.Count(customTagPattern, bean2.IMAGE_TAG_VARIABLE_NAME_X) - remainingString := strings.ReplaceAll(customTagPattern, ".{x}", "") - remainingString = strings.ReplaceAll(remainingString, ".{X}", "") - if len(remainingString) == 0 { - return nil + if variableCount == 0 { + // there can be case when there is only one {x} or {x} + return fmt.Errorf("variable with format {x} or {X} not found") + } else if variableCount > 1 { + return fmt.Errorf("only one variable with format {x} or {X} allowed") } - n := len(remainingString) - if remainingString[0] == '.' || remainingString[0] == '-' { - return fmt.Errorf("tag can not start with an hyphen or a period") - } - if n != 0 && (remainingString[n-1] == '.' || remainingString[n-1] == '-') { - return fmt.Errorf("tag can not end with an hyphen or a period") + // replacing variable with 1 (dummy value) and checking if resulting string is valid tag + tagWithDummyValue := strings.ReplaceAll(customTagPattern, bean2.IMAGE_TAG_VARIABLE_NAME_x, "1") + tagWithDummyValue = strings.ReplaceAll(tagWithDummyValue, bean2.IMAGE_TAG_VARIABLE_NAME_X, "1") + + if !isValidDockerImageTag(tagWithDummyValue) { + return fmt.Errorf("not a valid image tag") } + return nil } -func IsInvalidVariableFormat(customTagPattern string) bool { - regex := regexp.MustCompile(REGEX_PATTERN_FOR_ENSURING_ONLY_ONE_VARIABLE_BETWEEN_BRACKETS) - matches := regex.FindAllString(customTagPattern, -1) - if len(matches) > 0 { - return true - } - regex = regexp.MustCompile(REGEX_PATTERN_FOR_CHARACTER_OTHER_THEN_X_OR_x) - matches = regex.FindAllString(customTagPattern, -1) - if len(matches) > 0 { - return true - } - return false +func isValidDockerImageTag(tag string) bool { + // Define the regular expression for a valid Docker image tag + re := regexp.MustCompile(bean2.REGEX_PATTERN_FOR_IMAGE_TAG) + return re.MatchString(tag) } func validateTag(imageTag string) error { diff --git a/pkg/pipeline/DeploymentConfigService.go b/pkg/pipeline/DeploymentConfigService.go index 7f5ec89832..91c9e1d7e2 100644 --- a/pkg/pipeline/DeploymentConfigService.go +++ b/pkg/pipeline/DeploymentConfigService.go @@ -1,6 +1,7 @@ package pipeline import ( + "context" "encoding/json" "github.com/devtron-labs/devtron/internal/sql/repository" "github.com/devtron-labs/devtron/internal/sql/repository/chartConfig" @@ -11,14 +12,16 @@ import ( "github.com/devtron-labs/devtron/pkg/resourceQualifiers" "github.com/devtron-labs/devtron/pkg/variables" "github.com/devtron-labs/devtron/pkg/variables/models" + "github.com/devtron-labs/devtron/pkg/variables/parsers" repository6 "github.com/devtron-labs/devtron/pkg/variables/repository" + "github.com/devtron-labs/devtron/util" "github.com/go-pg/pg" errors2 "github.com/juju/errors" "go.uber.org/zap" ) type DeploymentConfigService interface { - GetLatestDeploymentConfigurationByPipelineId(pipelineId int, userHasAdminAccess bool) (*history.AllDeploymentConfigurationDetail, error) + GetLatestDeploymentConfigurationByPipelineId(ctx context.Context, pipelineId int, userHasAdminAccess bool) (*history.AllDeploymentConfigurationDetail, error) } type DeploymentConfigServiceImpl struct { @@ -34,6 +37,7 @@ type DeploymentConfigServiceImpl struct { chartRefRepository chartRepoRepository.ChartRefRepository variableEntityMappingService variables.VariableEntityMappingService scopedVariableService variables.ScopedVariableService + variableTemplateParser parsers.VariableTemplateParser } func NewDeploymentConfigServiceImpl(logger *zap.SugaredLogger, @@ -47,7 +51,9 @@ func NewDeploymentConfigServiceImpl(logger *zap.SugaredLogger, configMapHistoryService history.ConfigMapHistoryService, chartRefRepository chartRepoRepository.ChartRefRepository, variableEntityMappingService variables.VariableEntityMappingService, - scopedVariableService variables.ScopedVariableService) *DeploymentConfigServiceImpl { + scopedVariableService variables.ScopedVariableService, + variableTemplateParser parsers.VariableTemplateParser, +) *DeploymentConfigServiceImpl { return &DeploymentConfigServiceImpl{ logger: logger, envConfigOverrideRepository: envConfigOverrideRepository, @@ -61,10 +67,11 @@ func NewDeploymentConfigServiceImpl(logger *zap.SugaredLogger, chartRefRepository: chartRefRepository, variableEntityMappingService: variableEntityMappingService, scopedVariableService: scopedVariableService, + variableTemplateParser: variableTemplateParser, } } -func (impl *DeploymentConfigServiceImpl) GetLatestDeploymentConfigurationByPipelineId(pipelineId int, userHasAdminAccess bool) (*history.AllDeploymentConfigurationDetail, error) { +func (impl *DeploymentConfigServiceImpl) GetLatestDeploymentConfigurationByPipelineId(ctx context.Context, pipelineId int, userHasAdminAccess bool) (*history.AllDeploymentConfigurationDetail, error) { configResp := &history.AllDeploymentConfigurationDetail{} pipeline, err := impl.pipelineRepository.FindById(pipelineId) if err != nil { @@ -72,7 +79,7 @@ func (impl *DeploymentConfigServiceImpl) GetLatestDeploymentConfigurationByPipel return nil, err } - deploymentTemplateConfig, err := impl.GetLatestDeploymentTemplateConfig(pipeline) + deploymentTemplateConfig, err := impl.GetLatestDeploymentTemplateConfig(ctx, pipeline) if err != nil { impl.logger.Errorw("error in getting latest deploymentTemplate", "err", err) return nil, err @@ -96,28 +103,41 @@ func (impl *DeploymentConfigServiceImpl) GetLatestDeploymentConfigurationByPipel return configResp, nil } -func (impl *DeploymentConfigServiceImpl) extractVariablesAndGetScopedVariables(scope resourceQualifiers.Scope, entity repository6.Entity) (map[string]string, error) { +func (impl *DeploymentConfigServiceImpl) extractVariablesAndGetScopedVariables(template string, scope resourceQualifiers.Scope, entity repository6.Entity, isSuperAdmin bool) (string, map[string]string, error) { variableMap := make(map[string]string) entityToVariables, err := impl.variableEntityMappingService.GetAllMappingsForEntities([]repository6.Entity{entity}) if err != nil { - return variableMap, err + return template, variableMap, err } scopedVariables := make([]*models.ScopedVariableData, 0) if _, ok := entityToVariables[entity]; ok && len(entityToVariables[entity]) > 0 { - scopedVariables, err = impl.scopedVariableService.GetScopedVariables(scope, entityToVariables[entity], true) + scopedVariables, err = impl.scopedVariableService.GetScopedVariables(scope, entityToVariables[entity], isSuperAdmin) if err != nil { - return variableMap, err + return template, variableMap, err } } for _, variable := range scopedVariables { variableMap[variable.VariableName] = variable.VariableValue.StringValue() } - return variableMap, nil + + if len(variableMap) == 0 { + return template, variableMap, nil + } + + parserRequest := parsers.VariableParserRequest{Template: template, Variables: scopedVariables, TemplateType: parsers.JsonVariableTemplate} + parserResponse := impl.variableTemplateParser.ParseTemplate(parserRequest) + err = parserResponse.Error + if err != nil { + return template, variableMap, err + } + resolvedTemplate := parserResponse.ResolvedTemplate + + return resolvedTemplate, variableMap, nil } -func (impl *DeploymentConfigServiceImpl) GetLatestDeploymentTemplateConfig(pipeline *pipelineConfig.Pipeline) (*history.HistoryDetailDto, error) { +func (impl *DeploymentConfigServiceImpl) GetLatestDeploymentTemplateConfig(ctx context.Context, pipeline *pipelineConfig.Pipeline) (*history.HistoryDetailDto, error) { isAppMetricsEnabled := false envLevelAppMetrics, err := impl.envLevelAppMetricsRepository.FindByAppIdAndEnvId(pipeline.AppId, pipeline.EnvironmentId) if err != nil && err != pg.ErrNoRows { @@ -155,10 +175,14 @@ func (impl *DeploymentConfigServiceImpl) GetLatestDeploymentTemplateConfig(pipel EntityType: repository6.EntityTypeDeploymentTemplateEnvLevel, EntityId: envOverride.Id, } - scopedVariablesMap, err := impl.extractVariablesAndGetScopedVariables(scope, entity) + isSuperAdmin, err := util.GetIsSuperAdminFromContext(ctx) if err != nil { return nil, err } + resolvedTemplate, scopedVariablesMap, err := impl.extractVariablesAndGetScopedVariables(envOverride.EnvOverrideValues, scope, entity, isSuperAdmin) + if err != nil { + impl.logger.Errorw("could not resolve template", "err", err, "envOverrideId", envOverride.Id, "scope", scope, "pipelineId", pipeline.Id) + } deploymentTemplateConfig = &history.HistoryDetailDto{ TemplateName: envOverride.Chart.ChartName, @@ -168,7 +192,8 @@ func (impl *DeploymentConfigServiceImpl) GetLatestDeploymentTemplateConfig(pipel DisplayName: "values.yaml", Value: envOverride.EnvOverrideValues, }, - VariableSnapshot: scopedVariablesMap, + VariableSnapshot: scopedVariablesMap, + ResolvedTemplateData: resolvedTemplate, } } } else { @@ -192,10 +217,14 @@ func (impl *DeploymentConfigServiceImpl) GetLatestDeploymentTemplateConfig(pipel EntityType: repository6.EntityTypeDeploymentTemplateAppLevel, EntityId: chart.Id, } - scopedVariablesMap, err := impl.extractVariablesAndGetScopedVariables(scope, entity) + isSuperAdmin, err := util.GetIsSuperAdminFromContext(ctx) if err != nil { return nil, err } + resolvedTemplate, scopedVariablesMap, err := impl.extractVariablesAndGetScopedVariables(chart.GlobalOverride, scope, entity, isSuperAdmin) + if err != nil { + impl.logger.Errorw("could not resolve template", "err", err, "chartId", chart.Id, "scope", scope, "pipelineId", pipeline.Id) + } deploymentTemplateConfig = &history.HistoryDetailDto{ TemplateName: chart.ChartName, TemplateVersion: chartRef.Version, @@ -204,7 +233,8 @@ func (impl *DeploymentConfigServiceImpl) GetLatestDeploymentTemplateConfig(pipel DisplayName: "values.yaml", Value: chart.GlobalOverride, }, - VariableSnapshot: scopedVariablesMap, + VariableSnapshot: scopedVariablesMap, + ResolvedTemplateData: resolvedTemplate, } } return deploymentTemplateConfig, nil diff --git a/pkg/pipeline/DeploymentPipelineConfigService.go b/pkg/pipeline/DeploymentPipelineConfigService.go index 80e37041fe..d8953ff3c9 100644 --- a/pkg/pipeline/DeploymentPipelineConfigService.go +++ b/pkg/pipeline/DeploymentPipelineConfigService.go @@ -109,6 +109,7 @@ type CdPipelineConfigService interface { GetEnvironmentListForAutocompleteFilter(envName string, clusterIds []int, offset int, size int, emailId string, checkAuthBatch func(emailId string, appObject []string, envObject []string) (map[string]bool, map[string]bool), ctx context.Context) (*cluster.ResourceGroupingResponse, error) IsGitopsConfigured() (bool, error) RegisterInACD(gitOpsRepoName string, chartGitAttr *util.ChartGitAttribute, userId int32, ctx context.Context) error + CreateExternalCiAndAppWorkflowMapping(appId, appWorkflowId int, userId int32, tx *pg.Tx) (int, error) } type CdPipelineConfigServiceImpl struct { @@ -1541,15 +1542,7 @@ func (impl *CdPipelineConfigServiceImpl) createCdPipeline(ctx context.Context, a } // Rollback tx on error. defer tx.Rollback() - if pipeline.AppWorkflowId == 0 && pipeline.ParentPipelineType == "WEBHOOK" { - externalCiPipeline := &pipelineConfig.ExternalCiPipeline{ - AppId: app.Id, - AccessToken: "", - Active: true, - AuditLog: sql.AuditLog{CreatedBy: userId, CreatedOn: time.Now(), UpdatedOn: time.Now(), UpdatedBy: userId}, - } - externalCiPipeline, err = impl.ciPipelineRepository.SaveExternalCi(externalCiPipeline, tx) wf := &appWorkflow.AppWorkflow{ Name: fmt.Sprintf("wf-%d-%s", app.Id, util2.Generate(4)), AppId: app.Id, @@ -1558,21 +1551,15 @@ func (impl *CdPipelineConfigServiceImpl) createCdPipeline(ctx context.Context, a } savedAppWf, err := impl.appWorkflowRepository.SaveAppWorkflowWithTx(wf, tx) if err != nil { - impl.logger.Errorw("err", err) + impl.logger.Errorw("error in saving app workflow", "appId", app.Id, "err", err) return 0, err } - appWorkflowMap := &appWorkflow.AppWorkflowMapping{ - AppWorkflowId: savedAppWf.Id, - ComponentId: externalCiPipeline.Id, - Type: "WEBHOOK", - Active: true, - AuditLog: sql.AuditLog{CreatedBy: userId, CreatedOn: time.Now(), UpdatedOn: time.Now(), UpdatedBy: userId}, - } - appWorkflowMap, err = impl.appWorkflowRepository.SaveAppWorkflowMapping(appWorkflowMap, tx) + externalCiPipelineId, err := impl.CreateExternalCiAndAppWorkflowMapping(app.Id, savedAppWf.Id, userId, tx) if err != nil { + impl.logger.Errorw("error in creating new external ci pipeline and new app workflow mapping", "appId", app.Id, "err", err) return 0, err } - pipeline.ParentPipelineId = externalCiPipeline.Id + pipeline.ParentPipelineId = externalCiPipelineId pipeline.AppWorkflowId = savedAppWf.Id } @@ -1589,9 +1576,12 @@ func (impl *CdPipelineConfigServiceImpl) createCdPipeline(ctx context.Context, a //TODO: mark as created in our db pipelineId, err := impl.ciCdPipelineOrchestrator.CreateCDPipelines(pipeline, app.Id, userId, tx, app.AppName) if err != nil { - impl.logger.Errorw("error in ") + impl.logger.Errorw("error in creating cd pipeline", "appId", app.Id, "pipeline", pipeline) return 0, err } + if pipeline.RefPipelineId > 0 { + pipeline.SourceToNewPipelineId[pipeline.RefPipelineId] = pipelineId + } //adding pipeline to workflow _, err = impl.appWorkflowRepository.FindByIdAndAppId(pipeline.AppWorkflowId, app.Id) @@ -1601,12 +1591,16 @@ func (impl *CdPipelineConfigServiceImpl) createCdPipeline(ctx context.Context, a if pipeline.AppWorkflowId > 0 { var parentPipelineId int var parentPipelineType string + if pipeline.ParentPipelineId == 0 { parentPipelineId = pipeline.CiPipelineId parentPipelineType = "CI_PIPELINE" } else { parentPipelineId = pipeline.ParentPipelineId parentPipelineType = pipeline.ParentPipelineType + if pipeline.ParentPipelineType != appWorkflow.WEBHOOK && pipeline.RefPipelineId > 0 && len(pipeline.SourceToNewPipelineId) > 0 { + parentPipelineId = pipeline.SourceToNewPipelineId[pipeline.ParentPipelineId] + } } appWorkflowMap := &appWorkflow.AppWorkflowMapping{ AppWorkflowId: pipeline.AppWorkflowId, @@ -1982,3 +1976,30 @@ func (impl *CdPipelineConfigServiceImpl) BulkDeleteCdPipelines(impactedPipelines return respDtos } + +func (impl *CdPipelineConfigServiceImpl) CreateExternalCiAndAppWorkflowMapping(appId, appWorkflowId int, userId int32, tx *pg.Tx) (int, error) { + externalCiPipeline := &pipelineConfig.ExternalCiPipeline{ + AppId: appId, + AccessToken: "", + Active: true, + AuditLog: sql.AuditLog{CreatedBy: userId, CreatedOn: time.Now(), UpdatedOn: time.Now(), UpdatedBy: userId}, + } + externalCiPipeline, err := impl.ciPipelineRepository.SaveExternalCi(externalCiPipeline, tx) + if err != nil { + impl.logger.Errorw("error in saving external ci", "appId", appId, "err", err) + return 0, err + } + appWorkflowMap := &appWorkflow.AppWorkflowMapping{ + AppWorkflowId: appWorkflowId, + ComponentId: externalCiPipeline.Id, + Type: "WEBHOOK", + Active: true, + AuditLog: sql.AuditLog{CreatedBy: userId, CreatedOn: time.Now(), UpdatedOn: time.Now(), UpdatedBy: userId}, + } + appWorkflowMap, err = impl.appWorkflowRepository.SaveAppWorkflowMapping(appWorkflowMap, tx) + if err != nil { + impl.logger.Errorw("error in saving app workflow mapping for external ci", "appId", appId, "appWorkflowId", appWorkflowId, "externalCiPipelineId", externalCiPipeline.Id, "err", err) + return 0, err + } + return externalCiPipeline.Id, nil +} diff --git a/pkg/pipeline/PipelineStageService.go b/pkg/pipeline/PipelineStageService.go index 56ae6d9ef3..4952fce7c9 100644 --- a/pkg/pipeline/PipelineStageService.go +++ b/pkg/pipeline/PipelineStageService.go @@ -964,10 +964,12 @@ func (impl *PipelineStageServiceImpl) UpdatePipelineStage(stageReq *bean.Pipelin if err == pg.ErrNoRows || createNewPipStage { //no stage found, creating new stage stageReq.Id = 0 - err = impl.CreatePipelineStage(stageReq, stageType, pipelineId, userId) - if err != nil { - impl.logger.Errorw("error in creating new pipeline stage", "err", err, "pipelineStageReq", stageReq) - return err + if len(stageReq.Steps) > 0 { + err = impl.CreatePipelineStage(stageReq, stageType, pipelineId, userId) + if err != nil { + impl.logger.Errorw("error in creating new pipeline stage", "err", err, "pipelineStageReq", stageReq) + return err + } } } else { //stageId found, to handle as an update request @@ -2139,7 +2141,7 @@ func (impl *PipelineStageServiceImpl) fetchScopedVariablesAndResolveTemplate(unr return nil, err } parserResponse := impl.variableTemplateParser.ParseTemplate(parsers.VariableParserRequest{ - TemplateType: parsers.JsonVariableTemplate, + TemplateType: parsers.StringVariableTemplate, Template: string(responseJson), Variables: scopedVariables, IgnoreUnknownVariables: true, diff --git a/pkg/pipeline/WebhookService.go b/pkg/pipeline/WebhookService.go index adfd216bfc..5e1b6bc18e 100644 --- a/pkg/pipeline/WebhookService.go +++ b/pkg/pipeline/WebhookService.go @@ -28,7 +28,6 @@ import ( "github.com/devtron-labs/devtron/internal/sql/repository" "github.com/devtron-labs/devtron/internal/sql/repository/pipelineConfig" util2 "github.com/devtron-labs/devtron/internal/util" - "github.com/devtron-labs/devtron/pkg" "github.com/devtron-labs/devtron/pkg/app" "github.com/devtron-labs/devtron/pkg/sql" "github.com/devtron-labs/devtron/util/event" @@ -71,7 +70,7 @@ type WebhookServiceImpl struct { eventFactory client.EventFactory workflowDagExecutor WorkflowDagExecutor ciHandler CiHandler - customTagService pkg.CustomTagService + customTagService CustomTagService } func NewWebhookServiceImpl( @@ -81,7 +80,7 @@ func NewWebhookServiceImpl( appService app.AppService, eventClient client.EventClient, eventFactory client.EventFactory, ciWorkflowRepository pipelineConfig.CiWorkflowRepository, - customTagService pkg.CustomTagService, + customTagService CustomTagService, workflowDagExecutor WorkflowDagExecutor, ciHandler CiHandler) *WebhookServiceImpl { webhookHandler := &WebhookServiceImpl{ ciArtifactRepository: ciArtifactRepository, diff --git a/pkg/pipeline/WorkflowDagExecutor.go b/pkg/pipeline/WorkflowDagExecutor.go index 67e032cc77..b7ed0a951e 100644 --- a/pkg/pipeline/WorkflowDagExecutor.go +++ b/pkg/pipeline/WorkflowDagExecutor.go @@ -20,23 +20,45 @@ package pipeline import ( "context" "encoding/json" + errors3 "errors" "fmt" + "github.com/argoproj/argo-cd/v2/pkg/apiclient/application" + "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1" + "github.com/aws/aws-sdk-go/service/autoscaling" blob_storage "github.com/devtron-labs/common-lib/blob-storage" util5 "github.com/devtron-labs/common-lib/utils/k8s" "github.com/devtron-labs/common-lib/utils/k8s/health" + client2 "github.com/devtron-labs/devtron/api/helm-app" + "github.com/devtron-labs/devtron/client/argocdServer" + application2 "github.com/devtron-labs/devtron/client/argocdServer/application" gitSensorClient "github.com/devtron-labs/devtron/client/gitSensor" "github.com/devtron-labs/devtron/pkg" + "github.com/devtron-labs/devtron/internal/middleware" + app2 "github.com/devtron-labs/devtron/internal/sql/repository/app" + bean4 "github.com/devtron-labs/devtron/pkg/app/bean" "github.com/devtron-labs/devtron/pkg/app/status" + "github.com/devtron-labs/devtron/pkg/chartRepo/repository" + "github.com/devtron-labs/devtron/pkg/dockerRegistry" "github.com/devtron-labs/devtron/pkg/k8s" bean3 "github.com/devtron-labs/devtron/pkg/pipeline/bean" repository4 "github.com/devtron-labs/devtron/pkg/pipeline/repository" "github.com/devtron-labs/devtron/pkg/plugin" "github.com/devtron-labs/devtron/pkg/resourceQualifiers" "github.com/devtron-labs/devtron/pkg/variables" + "github.com/devtron-labs/devtron/pkg/variables/parsers" repository5 "github.com/devtron-labs/devtron/pkg/variables/repository" util4 "github.com/devtron-labs/devtron/util" "github.com/devtron-labs/devtron/util/argo" + errors2 "github.com/juju/errors" + "github.com/pkg/errors" + "github.com/tidwall/gjson" + "github.com/tidwall/sjson" "go.opentelemetry.io/otel" + "google.golang.org/grpc/codes" + status2 "google.golang.org/grpc/status" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/helm/pkg/proto/hapi/chart" + "path" "strconv" "strings" "time" @@ -122,8 +144,51 @@ type WorkflowDagExecutorImpl struct { globalPluginService plugin.GlobalPluginService variableSnapshotHistoryService variables.VariableSnapshotHistoryService pluginInputVariableParser PluginInputVariableParser + + deploymentTemplateHistoryService history2.DeploymentTemplateHistoryService + configMapHistoryService history2.ConfigMapHistoryService + pipelineStrategyHistoryService history2.PipelineStrategyHistoryService + manifestPushConfigRepository repository4.ManifestPushConfigRepository + gitOpsManifestPushService app.GitOpsPushService + ciPipelineMaterialRepository pipelineConfig.CiPipelineMaterialRepository + imageScanHistoryRepository security.ImageScanHistoryRepository + imageScanDeployInfoRepository security.ImageScanDeployInfoRepository + appCrudOperationService app.AppCrudOperationService + pipelineConfigRepository chartConfig.PipelineConfigRepository + dockerRegistryIpsConfigService dockerRegistry.DockerRegistryIpsConfigService + chartRepository chartRepoRepository.ChartRepository + chartTemplateService util.ChartTemplateService + strategyHistoryRepository repository3.PipelineStrategyHistoryRepository + appRepository app2.AppRepository + deploymentTemplateHistoryRepository repository3.DeploymentTemplateHistoryRepository + argoK8sClient argocdServer.ArgoK8sClient + configMapRepository chartConfig.ConfigMapRepository + configMapHistoryRepository repository3.ConfigMapHistoryRepository + refChartDir chartRepoRepository.RefChartDir + helmAppService client2.HelmAppService + helmAppClient client2.HelmAppClient + chartRefRepository chartRepoRepository.ChartRefRepository + environmentConfigRepository chartConfig.EnvConfigOverrideRepository + appLevelMetricsRepository repository.AppLevelMetricsRepository + envLevelMetricsRepository repository.EnvLevelAppMetricsRepository + dbMigrationConfigRepository pipelineConfig.DbMigrationConfigRepository + mergeUtil *util.MergeUtil + gitOpsConfigRepository repository.GitOpsConfigRepository + gitFactory *util.GitFactory + acdClient application2.ServiceClient + variableEntityMappingService variables.VariableEntityMappingService + variableTemplateParser parsers.VariableTemplateParser + argoClientWrapperService argocdServer.ArgoClientWrapperService + scopedVariableService variables.ScopedVariableService } +const kedaAutoscaling = "kedaAutoscaling" +const horizontalPodAutoscaler = "HorizontalPodAutoscaler" +const fullnameOverride = "fullnameOverride" +const nameOverride = "nameOverride" +const enabled = "enabled" +const replicaCount = "replicaCount" + const ( GIT_COMMIT_HASH_PREFIX = "GIT_COMMIT_HASH" GIT_SOURCE_TYPE_PREFIX = "GIT_SOURCE_TYPE" @@ -206,6 +271,42 @@ func NewWorkflowDagExecutorImpl(Logger *zap.SugaredLogger, pipelineRepository pi variableSnapshotHistoryService variables.VariableSnapshotHistoryService, globalPluginService plugin.GlobalPluginService, pluginInputVariableParser PluginInputVariableParser, + + deploymentTemplateHistoryService history2.DeploymentTemplateHistoryService, + configMapHistoryService history2.ConfigMapHistoryService, + pipelineStrategyHistoryService history2.PipelineStrategyHistoryService, + manifestPushConfigRepository repository4.ManifestPushConfigRepository, + gitOpsManifestPushService app.GitOpsPushService, + ciPipelineMaterialRepository pipelineConfig.CiPipelineMaterialRepository, + imageScanHistoryRepository security.ImageScanHistoryRepository, + imageScanDeployInfoRepository security.ImageScanDeployInfoRepository, + appCrudOperationService app.AppCrudOperationService, + pipelineConfigRepository chartConfig.PipelineConfigRepository, + dockerRegistryIpsConfigService dockerRegistry.DockerRegistryIpsConfigService, + chartRepository chartRepoRepository.ChartRepository, + chartTemplateService util.ChartTemplateService, + strategyHistoryRepository repository3.PipelineStrategyHistoryRepository, + appRepository app2.AppRepository, + deploymentTemplateHistoryRepository repository3.DeploymentTemplateHistoryRepository, + ArgoK8sClient argocdServer.ArgoK8sClient, + configMapRepository chartConfig.ConfigMapRepository, + configMapHistoryRepository repository3.ConfigMapHistoryRepository, + refChartDir chartRepoRepository.RefChartDir, + helmAppService client2.HelmAppService, + helmAppClient client2.HelmAppClient, + chartRefRepository chartRepoRepository.ChartRefRepository, + environmentConfigRepository chartConfig.EnvConfigOverrideRepository, + appLevelMetricsRepository repository.AppLevelMetricsRepository, + envLevelMetricsRepository repository.EnvLevelAppMetricsRepository, + dbMigrationConfigRepository pipelineConfig.DbMigrationConfigRepository, + mergeUtil *util.MergeUtil, + gitOpsConfigRepository repository.GitOpsConfigRepository, + gitFactory *util.GitFactory, + acdClient application2.ServiceClient, + variableEntityMappingService variables.VariableEntityMappingService, + variableTemplateParser parsers.VariableTemplateParser, + argoClientWrapperService argocdServer.ArgoClientWrapperService, + scopedVariableService variables.ScopedVariableService, ) *WorkflowDagExecutorImpl { wde := &WorkflowDagExecutorImpl{logger: Logger, pipelineRepository: pipelineRepository, @@ -242,6 +343,42 @@ func NewWorkflowDagExecutorImpl(Logger *zap.SugaredLogger, pipelineRepository pi variableSnapshotHistoryService: variableSnapshotHistoryService, globalPluginService: globalPluginService, pluginInputVariableParser: pluginInputVariableParser, + + deploymentTemplateHistoryService: deploymentTemplateHistoryService, + configMapHistoryService: configMapHistoryService, + pipelineStrategyHistoryService: pipelineStrategyHistoryService, + manifestPushConfigRepository: manifestPushConfigRepository, + gitOpsManifestPushService: gitOpsManifestPushService, + ciPipelineMaterialRepository: ciPipelineMaterialRepository, + imageScanHistoryRepository: imageScanHistoryRepository, + imageScanDeployInfoRepository: imageScanDeployInfoRepository, + appCrudOperationService: appCrudOperationService, + pipelineConfigRepository: pipelineConfigRepository, + dockerRegistryIpsConfigService: dockerRegistryIpsConfigService, + chartRepository: chartRepository, + chartTemplateService: chartTemplateService, + strategyHistoryRepository: strategyHistoryRepository, + appRepository: appRepository, + deploymentTemplateHistoryRepository: deploymentTemplateHistoryRepository, + argoK8sClient: ArgoK8sClient, + configMapRepository: configMapRepository, + configMapHistoryRepository: configMapHistoryRepository, + refChartDir: refChartDir, + helmAppService: helmAppService, + helmAppClient: helmAppClient, + chartRefRepository: chartRefRepository, + environmentConfigRepository: environmentConfigRepository, + appLevelMetricsRepository: appLevelMetricsRepository, + envLevelMetricsRepository: envLevelMetricsRepository, + dbMigrationConfigRepository: dbMigrationConfigRepository, + mergeUtil: mergeUtil, + gitOpsConfigRepository: gitOpsConfigRepository, + gitFactory: gitFactory, + acdClient: acdClient, + variableEntityMappingService: variableEntityMappingService, + variableTemplateParser: variableTemplateParser, + argoClientWrapperService: argoClientWrapperService, + scopedVariableService: scopedVariableService, } config, err := GetCdConfig() if err != nil { @@ -961,6 +1098,7 @@ func (impl *WorkflowDagExecutorImpl) buildWFRequest(runner *pipelineConfig.CdWor impl.logger.Errorw("error in getting environment by id", "err", err) return nil, err } + if pipelineStage != nil { //Scope will pick the environment of CD pipeline irrespective of in-cluster mode, //since user sees the environment of the CD pipeline @@ -972,7 +1110,8 @@ func (impl *WorkflowDagExecutorImpl) buildWFRequest(runner *pipelineConfig.CdWor EnvironmentName: env.Name, ClusterName: env.Cluster.ClusterName, Namespace: env.Namespace, - ImageTag: artifact.Image, + Image: artifact.Image, + ImageTag: util3.GetImageTagFromImage(artifact.Image), }, } var variableSnapshot map[string]string @@ -1523,7 +1662,7 @@ func (impl *WorkflowDagExecutorImpl) TriggerDeployment(cdWf *pipelineConfig.CdWo return nil } - err = impl.appService.TriggerCD(artifact, cdWf.Id, savedWfr.Id, pipeline, triggeredAt) + err = impl.TriggerCD(artifact, cdWf.Id, savedWfr.Id, pipeline, triggeredAt) err1 := impl.updatePreviousDeploymentStatus(runner, pipeline.Id, err, triggeredAt, triggeredBy) if err1 != nil || err != nil { impl.logger.Errorw("error while update previous cd workflow runners", "err", err, "runner", runner, "pipelineId", pipeline.Id) @@ -1796,7 +1935,7 @@ func (impl *WorkflowDagExecutorImpl) ManualCdTrigger(overrideRequest *bean.Value impl.logger.Errorf("invalid req", "err", err, "req", overrideRequest) return 0, err } - impl.appService.SetPipelineFieldsInOverrideRequest(overrideRequest, cdPipeline) + impl.SetPipelineFieldsInOverrideRequest(overrideRequest, cdPipeline) if overrideRequest.CdWorkflowType == bean.CD_WORKFLOW_TYPE_PRE { _, span = otel.Tracer("orchestrator").Start(ctx, "ciArtifactRepository.Get") @@ -1923,7 +2062,7 @@ func (impl *WorkflowDagExecutorImpl) ManualCdTrigger(overrideRequest *bean.Value return 0, fmt.Errorf("found vulnerability for image digest %s", artifact.ImageDigest) } _, span = otel.Tracer("orchestrator").Start(ctx, "appService.TriggerRelease") - releaseId, _, err = impl.appService.TriggerRelease(overrideRequest, ctx, triggeredAt, overrideRequest.UserId) + releaseId, _, err = impl.TriggerRelease(overrideRequest, ctx, triggeredAt, overrideRequest.UserId) span.End() if overrideRequest.DeploymentAppType == util.PIPELINE_DEPLOYMENT_TYPE_MANIFEST_DOWNLOAD { @@ -2182,3 +2321,1861 @@ func (impl *WorkflowDagExecutorImpl) buildACDContext() (acdContext context.Conte ctx = context.WithValue(ctx, "token", acdToken) return ctx, nil } + +func (impl *WorkflowDagExecutorImpl) TriggerRelease(overrideRequest *bean.ValuesOverrideRequest, ctx context.Context, triggeredAt time.Time, deployedBy int32) (releaseNo int, manifest []byte, err error) { + triggerEvent := impl.GetTriggerEvent(overrideRequest.DeploymentAppType, triggeredAt, deployedBy) + releaseNo, manifest, err = impl.TriggerPipeline(overrideRequest, triggerEvent, ctx) + if err != nil { + return 0, manifest, err + } + return releaseNo, manifest, nil +} + +func (impl *WorkflowDagExecutorImpl) TriggerCD(artifact *repository.CiArtifact, cdWorkflowId, wfrId int, pipeline *pipelineConfig.Pipeline, triggeredAt time.Time) error { + impl.logger.Debugw("automatic pipeline trigger attempt async", "artifactId", artifact.Id) + + return impl.triggerReleaseAsync(artifact, cdWorkflowId, wfrId, pipeline, triggeredAt) +} + +func (impl *WorkflowDagExecutorImpl) triggerReleaseAsync(artifact *repository.CiArtifact, cdWorkflowId, wfrId int, pipeline *pipelineConfig.Pipeline, triggeredAt time.Time) error { + err := impl.validateAndTrigger(pipeline, artifact, cdWorkflowId, wfrId, triggeredAt) + if err != nil { + impl.logger.Errorw("error in trigger for pipeline", "pipelineId", strconv.Itoa(pipeline.Id)) + } + impl.logger.Debugw("trigger attempted for all pipeline ", "artifactId", artifact.Id) + return err +} + +func (impl *WorkflowDagExecutorImpl) validateAndTrigger(p *pipelineConfig.Pipeline, artifact *repository.CiArtifact, cdWorkflowId, wfrId int, triggeredAt time.Time) error { + object := impl.enforcerUtil.GetAppRBACNameByAppId(p.AppId) + envApp := strings.Split(object, "/") + if len(envApp) != 2 { + impl.logger.Error("invalid req, app and env not found from rbac") + return errors.New("invalid req, app and env not found from rbac") + } + err := impl.releasePipeline(p, artifact, cdWorkflowId, wfrId, triggeredAt) + return err +} + +func (impl *WorkflowDagExecutorImpl) releasePipeline(pipeline *pipelineConfig.Pipeline, artifact *repository.CiArtifact, cdWorkflowId, wfrId int, triggeredAt time.Time) error { + impl.logger.Debugw("triggering release for ", "cdPipelineId", pipeline.Id, "artifactId", artifact.Id) + + pipeline, err := impl.pipelineRepository.FindById(pipeline.Id) + if err != nil { + impl.logger.Errorw("error in fetching pipeline by pipelineId", "err", err) + return err + } + + request := &bean.ValuesOverrideRequest{ + PipelineId: pipeline.Id, + UserId: artifact.CreatedBy, + CiArtifactId: artifact.Id, + AppId: pipeline.AppId, + CdWorkflowId: cdWorkflowId, + ForceTrigger: true, + DeploymentWithConfig: bean.DEPLOYMENT_CONFIG_TYPE_LAST_SAVED, + WfrId: wfrId, + } + impl.SetPipelineFieldsInOverrideRequest(request, pipeline) + + ctx, err := impl.buildACDContext() + if err != nil { + impl.logger.Errorw("error in creating acd synch context", "pipelineId", pipeline.Id, "artifactId", artifact.Id, "err", err) + return err + } + //setting deployedBy as 1(system user) since case of auto trigger + id, _, err := impl.TriggerRelease(request, ctx, triggeredAt, 1) + if err != nil { + impl.logger.Errorw("error in auto cd pipeline trigger", "pipelineId", pipeline.Id, "artifactId", artifact.Id, "err", err) + } else { + impl.logger.Infow("pipeline successfully triggered ", "cdPipelineId", pipeline.Id, "artifactId", artifact.Id, "releaseId", id) + } + return err + +} + +func (impl *WorkflowDagExecutorImpl) SetPipelineFieldsInOverrideRequest(overrideRequest *bean.ValuesOverrideRequest, pipeline *pipelineConfig.Pipeline) { + overrideRequest.PipelineId = pipeline.Id + overrideRequest.PipelineName = pipeline.Name + overrideRequest.EnvId = pipeline.EnvironmentId + overrideRequest.EnvName = pipeline.Environment.Name + overrideRequest.ClusterId = pipeline.Environment.ClusterId + overrideRequest.AppId = pipeline.AppId + overrideRequest.AppName = pipeline.App.AppName + overrideRequest.DeploymentAppType = pipeline.DeploymentAppType +} + +func (impl *WorkflowDagExecutorImpl) GetTriggerEvent(deploymentAppType string, triggeredAt time.Time, deployedBy int32) bean.TriggerEvent { + // trigger event will decide whether to perform GitOps or deployment for a particular deployment app type + triggerEvent := bean.TriggerEvent{ + TriggeredBy: deployedBy, + TriggerdAt: triggeredAt, + } + switch deploymentAppType { + case bean2.ArgoCd: + triggerEvent.PerformChartPush = true + triggerEvent.PerformDeploymentOnCluster = true + triggerEvent.GetManifestInResponse = false + triggerEvent.DeploymentAppType = bean2.ArgoCd + triggerEvent.ManifestStorageType = bean2.ManifestStorageGit + case bean2.Helm: + triggerEvent.PerformChartPush = false + triggerEvent.PerformDeploymentOnCluster = true + triggerEvent.GetManifestInResponse = false + triggerEvent.DeploymentAppType = bean2.Helm + } + return triggerEvent +} + +// write integration/unit test for each function +func (impl *WorkflowDagExecutorImpl) TriggerPipeline(overrideRequest *bean.ValuesOverrideRequest, triggerEvent bean.TriggerEvent, ctx context.Context) (releaseNo int, manifest []byte, err error) { + + isRequestValid, err := impl.ValidateTriggerEvent(triggerEvent) + if !isRequestValid { + return releaseNo, manifest, err + } + + valuesOverrideResponse, builtChartPath, err := impl.BuildManifestForTrigger(overrideRequest, triggerEvent.TriggerdAt, ctx) + _, span := otel.Tracer("orchestrator").Start(ctx, "CreateHistoriesForDeploymentTrigger") + err1 := impl.CreateHistoriesForDeploymentTrigger(valuesOverrideResponse.Pipeline, valuesOverrideResponse.PipelineStrategy, valuesOverrideResponse.EnvOverride, triggerEvent.TriggerdAt, triggerEvent.TriggeredBy) + if err1 != nil { + impl.logger.Errorw("error in saving histories for trigger", "err", err1, "pipelineId", valuesOverrideResponse.Pipeline.Id, "wfrId", overrideRequest.WfrId) + } + span.End() + if err != nil { + return releaseNo, manifest, err + } + + if triggerEvent.PerformChartPush { + manifestPushTemplate, err := impl.BuildManifestPushTemplate(overrideRequest, valuesOverrideResponse, builtChartPath, &manifest) + if err != nil { + impl.logger.Errorw("error in building manifest push template", "err", err) + return releaseNo, manifest, err + } + manifestPushService := impl.GetManifestPushService(triggerEvent) + manifestPushResponse := manifestPushService.PushChart(manifestPushTemplate, ctx) + if manifestPushResponse.Error != nil { + impl.logger.Errorw("Error in pushing manifest to git", "err", err, "git_repo_url", manifestPushTemplate.RepoUrl) + return releaseNo, manifest, err + } + pipelineOverrideUpdateRequest := &chartConfig.PipelineOverride{ + Id: valuesOverrideResponse.PipelineOverride.Id, + GitHash: manifestPushResponse.CommitHash, + CommitTime: manifestPushResponse.CommitTime, + EnvConfigOverrideId: valuesOverrideResponse.EnvOverride.Id, + PipelineOverrideValues: valuesOverrideResponse.ReleaseOverrideJSON, + PipelineId: overrideRequest.PipelineId, + CiArtifactId: overrideRequest.CiArtifactId, + PipelineMergedValues: valuesOverrideResponse.MergedValues, + AuditLog: sql.AuditLog{UpdatedOn: triggerEvent.TriggerdAt, UpdatedBy: overrideRequest.UserId}, + } + _, span := otel.Tracer("orchestrator").Start(ctx, "pipelineOverrideRepository.Update") + err = impl.pipelineOverrideRepository.Update(pipelineOverrideUpdateRequest) + span.End() + } + + if triggerEvent.PerformDeploymentOnCluster { + err = impl.DeployApp(overrideRequest, valuesOverrideResponse, triggerEvent.TriggerdAt, ctx) + if err != nil { + impl.logger.Errorw("error in deploying app", "err", err) + return releaseNo, manifest, err + } + } + + go impl.WriteCDTriggerEvent(overrideRequest, valuesOverrideResponse.Artifact, valuesOverrideResponse.PipelineOverride.PipelineReleaseCounter, valuesOverrideResponse.PipelineOverride.Id) + + _, spann := otel.Tracer("orchestrator").Start(ctx, "MarkImageScanDeployed") + _ = impl.MarkImageScanDeployed(overrideRequest.AppId, valuesOverrideResponse.EnvOverride.TargetEnvironment, valuesOverrideResponse.Artifact.ImageDigest, overrideRequest.ClusterId, valuesOverrideResponse.Artifact.ScanEnabled) + spann.End() + + middleware.CdTriggerCounter.WithLabelValues(overrideRequest.AppName, overrideRequest.EnvName).Inc() + + return valuesOverrideResponse.PipelineOverride.PipelineReleaseCounter, manifest, nil + +} + +func (impl *WorkflowDagExecutorImpl) ValidateTriggerEvent(triggerEvent bean.TriggerEvent) (bool, error) { + + switch triggerEvent.DeploymentAppType { + case bean2.ArgoCd: + if !triggerEvent.PerformChartPush { + return false, errors2.New("For deployment type ArgoCd, PerformChartPush flag expected value = true, got false") + } + case bean2.Helm: + return true, nil + case bean2.GitOpsWithoutDeployment: + if triggerEvent.PerformDeploymentOnCluster { + return false, errors2.New("For deployment type GitOpsWithoutDeployment, PerformDeploymentOnCluster flag expected value = false, got value = true") + } + case bean2.ManifestDownload: + if triggerEvent.PerformChartPush { + return false, errors3.New("For deployment type ManifestDownload, PerformChartPush flag expected value = false, got true") + } + if triggerEvent.PerformDeploymentOnCluster { + return false, errors3.New("For deployment type ManifestDownload, PerformDeploymentOnCluster flag expected value = false, got true") + } + } + return true, nil + +} + +func (impl *WorkflowDagExecutorImpl) BuildManifestForTrigger(overrideRequest *bean.ValuesOverrideRequest, triggeredAt time.Time, ctx context.Context) (valuesOverrideResponse *app.ValuesOverrideResponse, builtChartPath string, err error) { + + valuesOverrideResponse = &app.ValuesOverrideResponse{} + valuesOverrideResponse, err = impl.GetValuesOverrideForTrigger(overrideRequest, triggeredAt, ctx) + if err != nil { + impl.logger.Errorw("error in fetching values for trigger", "err", err) + return valuesOverrideResponse, "", err + } + builtChartPath, err = impl.appService.BuildChartAndGetPath(overrideRequest.AppName, valuesOverrideResponse.EnvOverride, ctx) + if err != nil { + impl.logger.Errorw("error in parsing reference chart", "err", err) + return valuesOverrideResponse, "", err + } + return valuesOverrideResponse, builtChartPath, err +} + +func (impl *WorkflowDagExecutorImpl) CreateHistoriesForDeploymentTrigger(pipeline *pipelineConfig.Pipeline, strategy *chartConfig.PipelineStrategy, envOverride *chartConfig.EnvConfigOverride, deployedOn time.Time, deployedBy int32) error { + //creating history for deployment template + deploymentTemplateHistory, err := impl.deploymentTemplateHistoryService.CreateDeploymentTemplateHistoryForDeploymentTrigger(pipeline, envOverride, envOverride.Chart.ImageDescriptorTemplate, deployedOn, deployedBy) + if err != nil { + impl.logger.Errorw("error in creating deployment template history for deployment trigger", "err", err) + return err + } + err = impl.configMapHistoryService.CreateCMCSHistoryForDeploymentTrigger(pipeline, deployedOn, deployedBy) + if err != nil { + impl.logger.Errorw("error in creating CM/CS history for deployment trigger", "err", err) + return err + } + if strategy != nil { + err = impl.pipelineStrategyHistoryService.CreateStrategyHistoryForDeploymentTrigger(strategy, deployedOn, deployedBy, pipeline.TriggerType) + if err != nil { + impl.logger.Errorw("error in creating strategy history for deployment trigger", "err", err) + return err + } + } + //VARIABLE_SNAPSHOT_SAVE + if envOverride.VariableSnapshot != nil && len(envOverride.VariableSnapshot) > 0 { + variableMapBytes, _ := json.Marshal(envOverride.VariableSnapshot) + variableSnapshotHistory := &repository5.VariableSnapshotHistoryBean{ + VariableSnapshot: variableMapBytes, + HistoryReference: repository5.HistoryReference{ + HistoryReferenceId: deploymentTemplateHistory.Id, + HistoryReferenceType: repository5.HistoryReferenceTypeDeploymentTemplate, + }, + } + err = impl.variableSnapshotHistoryService.SaveVariableHistoriesForTrigger([]*repository5.VariableSnapshotHistoryBean{variableSnapshotHistory}, deployedBy) + if err != nil { + return err + } + } + return nil +} + +func (impl *WorkflowDagExecutorImpl) BuildManifestPushTemplate(overrideRequest *bean.ValuesOverrideRequest, valuesOverrideResponse *app.ValuesOverrideResponse, builtChartPath string, manifest *[]byte) (*bean4.ManifestPushTemplate, error) { + + manifestPushTemplate := &bean4.ManifestPushTemplate{ + WorkflowRunnerId: overrideRequest.WfrId, + AppId: overrideRequest.AppId, + ChartRefId: valuesOverrideResponse.EnvOverride.Chart.ChartRefId, + EnvironmentId: valuesOverrideResponse.EnvOverride.Environment.Id, + UserId: overrideRequest.UserId, + PipelineOverrideId: valuesOverrideResponse.PipelineOverride.Id, + AppName: overrideRequest.AppName, + TargetEnvironmentName: valuesOverrideResponse.EnvOverride.TargetEnvironment, + BuiltChartPath: builtChartPath, + BuiltChartBytes: manifest, + MergedValues: valuesOverrideResponse.MergedValues, + } + + manifestPushConfig, err := impl.manifestPushConfigRepository.GetManifestPushConfigByAppIdAndEnvId(overrideRequest.AppId, overrideRequest.EnvId) + if err != nil && err != pg.ErrNoRows { + impl.logger.Errorw("error in fetching manifest push config from db", "err", err) + return manifestPushTemplate, err + } + + if manifestPushConfig != nil { + if manifestPushConfig.StorageType == bean2.ManifestStorageGit { + // need to implement for git repo push + // currently manifest push config doesn't have git push config. Gitops config is derived from charts, chart_env_config_override and chart_ref table + } + } else { + manifestPushTemplate.ChartReferenceTemplate = valuesOverrideResponse.EnvOverride.Chart.ReferenceTemplate + manifestPushTemplate.ChartName = valuesOverrideResponse.EnvOverride.Chart.ChartName + manifestPushTemplate.ChartVersion = valuesOverrideResponse.EnvOverride.Chart.ChartVersion + manifestPushTemplate.ChartLocation = valuesOverrideResponse.EnvOverride.Chart.ChartLocation + manifestPushTemplate.RepoUrl = valuesOverrideResponse.EnvOverride.Chart.GitRepoUrl + } + return manifestPushTemplate, err +} + +func (impl *WorkflowDagExecutorImpl) GetManifestPushService(triggerEvent bean.TriggerEvent) app.ManifestPushService { + var manifestPushService app.ManifestPushService + if triggerEvent.ManifestStorageType == bean2.ManifestStorageGit { + manifestPushService = impl.gitOpsManifestPushService + } + return manifestPushService +} + +func (impl *WorkflowDagExecutorImpl) DeployApp(overrideRequest *bean.ValuesOverrideRequest, valuesOverrideResponse *app.ValuesOverrideResponse, triggeredAt time.Time, ctx context.Context) error { + + if util.IsAcdApp(overrideRequest.DeploymentAppType) { + _, span := otel.Tracer("orchestrator").Start(ctx, "DeployArgocdApp") + err := impl.DeployArgocdApp(overrideRequest, valuesOverrideResponse, ctx) + span.End() + if err != nil { + impl.logger.Errorw("error in deploying app on argocd", "err", err) + return err + } + } else if util.IsHelmApp(overrideRequest.DeploymentAppType) { + _, span := otel.Tracer("orchestrator").Start(ctx, "createHelmAppForCdPipeline") + _, err := impl.createHelmAppForCdPipeline(overrideRequest, valuesOverrideResponse, triggeredAt, ctx) + span.End() + if err != nil { + impl.logger.Errorw("error in creating or updating helm application for cd pipeline", "err", err) + return err + } + } + return nil +} + +func (impl *WorkflowDagExecutorImpl) WriteCDTriggerEvent(overrideRequest *bean.ValuesOverrideRequest, artifact *repository.CiArtifact, releaseId, pipelineOverrideId int) { + + event := impl.eventFactory.Build(util2.Trigger, &overrideRequest.PipelineId, overrideRequest.AppId, &overrideRequest.EnvId, util2.CD) + impl.logger.Debugw("event WriteCDTriggerEvent", "event", event) + event = impl.eventFactory.BuildExtraCDData(event, nil, pipelineOverrideId, bean.CD_WORKFLOW_TYPE_DEPLOY) + _, evtErr := impl.eventClient.WriteNotificationEvent(event) + if evtErr != nil { + impl.logger.Errorw("CD trigger event not sent", "error", evtErr) + } + deploymentEvent := app.DeploymentEvent{ + ApplicationId: overrideRequest.AppId, + EnvironmentId: overrideRequest.EnvId, //check for production Environment + ReleaseId: releaseId, + PipelineOverrideId: pipelineOverrideId, + TriggerTime: time.Now(), + CiArtifactId: overrideRequest.CiArtifactId, + } + ciPipelineMaterials, err := impl.ciPipelineMaterialRepository.GetByPipelineId(artifact.PipelineId) + if err != nil { + impl.logger.Errorw("error in ") + } + materialInfoMap, mErr := artifact.ParseMaterialInfo() + if mErr != nil { + impl.logger.Errorw("material info map error", mErr) + return + } + for _, ciPipelineMaterial := range ciPipelineMaterials { + hash := materialInfoMap[ciPipelineMaterial.GitMaterial.Url] + pipelineMaterialInfo := &app.PipelineMaterialInfo{PipelineMaterialId: ciPipelineMaterial.Id, CommitHash: hash} + deploymentEvent.PipelineMaterials = append(deploymentEvent.PipelineMaterials, pipelineMaterialInfo) + } + impl.logger.Infow("triggering deployment event", "event", deploymentEvent) + err = impl.eventClient.WriteNatsEvent(pubsub.CD_SUCCESS, deploymentEvent) + if err != nil { + impl.logger.Errorw("error in writing cd trigger event", "err", err) + } +} + +func (impl *WorkflowDagExecutorImpl) MarkImageScanDeployed(appId int, envId int, imageDigest string, clusterId int, isScanEnabled bool) error { + impl.logger.Debugw("mark image scan deployed for normal app, from cd auto or manual trigger", "imageDigest", imageDigest) + executionHistory, err := impl.imageScanHistoryRepository.FindByImageDigest(imageDigest) + if err != nil && err != pg.ErrNoRows { + impl.logger.Errorw("error in fetching execution history", "err", err) + return err + } + if executionHistory == nil || executionHistory.Id == 0 { + impl.logger.Errorw("no execution history found for digest", "digest", imageDigest) + return fmt.Errorf("no execution history found for digest - %s", imageDigest) + } + impl.logger.Debugw("mark image scan deployed for normal app, from cd auto or manual trigger", "executionHistory", executionHistory) + var ids []int + ids = append(ids, executionHistory.Id) + + ot, err := impl.imageScanDeployInfoRepository.FetchByAppIdAndEnvId(appId, envId, []string{security.ScanObjectType_APP}) + + if err == pg.ErrNoRows && !isScanEnabled { + //ignoring if no rows are found and scan is disabled + return nil + } + + if err != nil && err != pg.ErrNoRows { + return err + } else if err == pg.ErrNoRows && isScanEnabled { + imageScanDeployInfo := &security.ImageScanDeployInfo{ + ImageScanExecutionHistoryId: ids, + ScanObjectMetaId: appId, + ObjectType: security.ScanObjectType_APP, + EnvId: envId, + ClusterId: clusterId, + AuditLog: sql.AuditLog{ + CreatedOn: time.Now(), + CreatedBy: 1, + UpdatedOn: time.Now(), + UpdatedBy: 1, + }, + } + impl.logger.Debugw("mark image scan deployed for normal app, from cd auto or manual trigger", "imageScanDeployInfo", imageScanDeployInfo) + err = impl.imageScanDeployInfoRepository.Save(imageScanDeployInfo) + if err != nil { + impl.logger.Errorw("error in creating deploy info", "err", err) + } + } else { + // Updating Execution history for Latest Deployment to fetch out security Vulnerabilities for latest deployed info + if isScanEnabled { + ot.ImageScanExecutionHistoryId = ids + } else { + arr := []int{-1} + ot.ImageScanExecutionHistoryId = arr + } + err = impl.imageScanDeployInfoRepository.Update(ot) + if err != nil { + impl.logger.Errorw("error in updating deploy info for latest deployed image", "err", err) + } + } + return err +} + +func (impl *WorkflowDagExecutorImpl) GetValuesOverrideForTrigger(overrideRequest *bean.ValuesOverrideRequest, triggeredAt time.Time, ctx context.Context) (*app.ValuesOverrideResponse, error) { + if overrideRequest.DeploymentType == models.DEPLOYMENTTYPE_UNKNOWN { + overrideRequest.DeploymentType = models.DEPLOYMENTTYPE_DEPLOY + } + if len(overrideRequest.DeploymentWithConfig) == 0 { + overrideRequest.DeploymentWithConfig = bean.DEPLOYMENT_CONFIG_TYPE_LAST_SAVED + } + valuesOverrideResponse := &app.ValuesOverrideResponse{} + + pipeline, err := impl.pipelineRepository.FindById(overrideRequest.PipelineId) + valuesOverrideResponse.Pipeline = pipeline + if err != nil { + impl.logger.Errorw("error in fetching pipeline by pipeline id", "err", err, "pipeline-id-", overrideRequest.PipelineId) + return valuesOverrideResponse, err + } + + _, span := otel.Tracer("orchestrator").Start(ctx, "ciArtifactRepository.Get") + artifact, err := impl.ciArtifactRepository.Get(overrideRequest.CiArtifactId) + valuesOverrideResponse.Artifact = artifact + span.End() + if err != nil { + return valuesOverrideResponse, err + } + overrideRequest.Image = artifact.Image + + strategy, err := impl.GetDeploymentStrategyByTriggerType(overrideRequest, ctx) + valuesOverrideResponse.PipelineStrategy = strategy + if err != nil { + impl.logger.Errorw("error in getting strategy by trigger type", "err", err) + return valuesOverrideResponse, err + } + + envOverride, err := impl.GetEnvOverrideByTriggerType(overrideRequest, triggeredAt, ctx) + valuesOverrideResponse.EnvOverride = envOverride + if err != nil { + impl.logger.Errorw("error in getting env override by trigger type", "err", err) + return valuesOverrideResponse, err + } + appMetrics, err := impl.GetAppMetricsByTriggerType(overrideRequest, ctx) + valuesOverrideResponse.AppMetrics = appMetrics + if err != nil { + impl.logger.Errorw("error in getting app metrics by trigger type", "err", err) + return valuesOverrideResponse, err + } + + _, span = otel.Tracer("orchestrator").Start(ctx, "getDbMigrationOverride") + //FIXME: how to determine rollback + //we can't depend on ciArtifact ID because CI pipeline can be manually triggered in any order regardless of sourcecode status + dbMigrationOverride, err := impl.getDbMigrationOverride(overrideRequest, artifact, false) + span.End() + if err != nil { + impl.logger.Errorw("error in fetching db migration config", "req", overrideRequest, "err", err) + return valuesOverrideResponse, err + } + chartVersion := envOverride.Chart.ChartVersion + _, span = otel.Tracer("orchestrator").Start(ctx, "getConfigMapAndSecretJsonV2") + configMapJson, err := impl.getConfigMapAndSecretJsonV2(overrideRequest.AppId, envOverride.TargetEnvironment, overrideRequest.PipelineId, chartVersion, overrideRequest.DeploymentWithConfig, overrideRequest.WfrIdForDeploymentWithSpecificTrigger) + span.End() + if err != nil { + impl.logger.Errorw("error in fetching config map n secret ", "err", err) + configMapJson = nil + } + _, span = otel.Tracer("orchestrator").Start(ctx, "appCrudOperationService.GetLabelsByAppIdForDeployment") + appLabelJsonByte, err := impl.appCrudOperationService.GetLabelsByAppIdForDeployment(overrideRequest.AppId) + span.End() + if err != nil { + impl.logger.Errorw("error in fetching app labels for gitOps commit", "err", err) + appLabelJsonByte = nil + } + _, span = otel.Tracer("orchestrator").Start(ctx, "mergeAndSave") + pipelineOverride, err := impl.savePipelineOverride(overrideRequest, envOverride.Id, triggeredAt) + valuesOverrideResponse.PipelineOverride = pipelineOverride + if err != nil { + return valuesOverrideResponse, err + } + //TODO: check status and apply lock + releaseOverrideJson, err := impl.getReleaseOverride(envOverride, overrideRequest, artifact, pipelineOverride, strategy, &appMetrics) + valuesOverrideResponse.ReleaseOverrideJSON = releaseOverrideJson + if err != nil { + return valuesOverrideResponse, err + } + mergedValues, err := impl.mergeOverrideValues(envOverride, dbMigrationOverride, releaseOverrideJson, configMapJson, appLabelJsonByte, strategy) + + appName := fmt.Sprintf("%s-%s", overrideRequest.AppName, envOverride.Environment.Name) + mergedValues = impl.autoscalingCheckBeforeTrigger(ctx, appName, envOverride.Namespace, mergedValues, overrideRequest) + + _, span = otel.Tracer("orchestrator").Start(ctx, "dockerRegistryIpsConfigService.HandleImagePullSecretOnApplicationDeployment") + // handle image pull secret if access given + mergedValues, err = impl.dockerRegistryIpsConfigService.HandleImagePullSecretOnApplicationDeployment(envOverride.Environment, pipeline.CiPipelineId, mergedValues) + valuesOverrideResponse.MergedValues = string(mergedValues) + span.End() + if err != nil { + return valuesOverrideResponse, err + } + pipelineOverride.PipelineMergedValues = string(mergedValues) + err = impl.pipelineOverrideRepository.Update(pipelineOverride) + if err != nil { + return valuesOverrideResponse, err + } + return valuesOverrideResponse, err +} + +func (impl *WorkflowDagExecutorImpl) DeployArgocdApp(overrideRequest *bean.ValuesOverrideRequest, valuesOverrideResponse *app.ValuesOverrideResponse, ctx context.Context) error { + + impl.logger.Debugw("new pipeline found", "pipeline", valuesOverrideResponse.Pipeline) + _, span := otel.Tracer("orchestrator").Start(ctx, "createArgoApplicationIfRequired") + name, err := impl.createArgoApplicationIfRequired(overrideRequest.AppId, valuesOverrideResponse.EnvOverride, valuesOverrideResponse.Pipeline, overrideRequest.UserId) + span.End() + if err != nil { + impl.logger.Errorw("acd application create error on cd trigger", "err", err, "req", overrideRequest) + return err + } + impl.logger.Debugw("argocd application created", "name", name) + + _, span = otel.Tracer("orchestrator").Start(ctx, "updateArgoPipeline") + updateAppInArgocd, err := impl.updateArgoPipeline(overrideRequest.AppId, valuesOverrideResponse.Pipeline.Name, valuesOverrideResponse.EnvOverride, ctx) + span.End() + if err != nil { + impl.logger.Errorw("error in updating argocd app ", "err", err) + return err + } + if updateAppInArgocd { + impl.logger.Debug("argo-cd successfully updated") + } else { + impl.logger.Debug("argo-cd failed to update, ignoring it") + } + return nil +} +func (impl *WorkflowDagExecutorImpl) createArgoApplicationIfRequired(appId int, envConfigOverride *chartConfig.EnvConfigOverride, pipeline *pipelineConfig.Pipeline, userId int32) (string, error) { + //repo has been registered while helm create + chart, err := impl.chartRepository.FindLatestChartForAppByAppId(appId) + if err != nil { + impl.logger.Errorw("no chart found ", "app", appId) + return "", err + } + envModel, err := impl.envRepository.FindById(envConfigOverride.TargetEnvironment) + if err != nil { + return "", err + } + argoAppName := pipeline.DeploymentAppName + if pipeline.DeploymentAppCreated { + return argoAppName, nil + } else { + //create + appNamespace := envConfigOverride.Namespace + if appNamespace == "" { + appNamespace = "default" + } + namespace := argocdServer.DevtronInstalationNs + appRequest := &argocdServer.AppTemplate{ + ApplicationName: argoAppName, + Namespace: namespace, + TargetNamespace: appNamespace, + TargetServer: envModel.Cluster.ServerUrl, + Project: "default", + ValuesFile: impl.getValuesFileForEnv(envModel.Id), + RepoPath: chart.ChartLocation, + RepoUrl: chart.GitRepoUrl, + } + + argoAppName, err := impl.argoK8sClient.CreateAcdApp(appRequest, envModel.Cluster) + if err != nil { + return "", err + } + //update cd pipeline to mark deployment app created + _, err = impl.updatePipeline(pipeline, userId) + if err != nil { + impl.logger.Errorw("error in update cd pipeline for deployment app created or not", "err", err) + return "", err + } + return argoAppName, nil + } +} + +func (impl *WorkflowDagExecutorImpl) createHelmAppForCdPipeline(overrideRequest *bean.ValuesOverrideRequest, valuesOverrideResponse *app.ValuesOverrideResponse, triggeredAt time.Time, ctx context.Context) (bool, error) { + + pipeline := valuesOverrideResponse.Pipeline + envOverride := valuesOverrideResponse.EnvOverride + mergeAndSave := valuesOverrideResponse.MergedValues + + chartMetaData := &chart.Metadata{ + Name: pipeline.App.AppName, + Version: envOverride.Chart.ChartVersion, + } + referenceTemplatePath := path.Join(string(impl.refChartDir), envOverride.Chart.ReferenceTemplate) + + if util.IsHelmApp(pipeline.DeploymentAppType) { + referenceChartByte := envOverride.Chart.ReferenceChart + // here updating reference chart into database. + if len(envOverride.Chart.ReferenceChart) == 0 { + refChartByte, err := impl.chartTemplateService.GetByteArrayRefChart(chartMetaData, referenceTemplatePath) + if err != nil { + impl.logger.Errorw("ref chart commit error on cd trigger", "err", err, "req", overrideRequest) + return false, err + } + ch := envOverride.Chart + ch.ReferenceChart = refChartByte + ch.UpdatedOn = time.Now() + ch.UpdatedBy = overrideRequest.UserId + err = impl.chartRepository.Update(ch) + if err != nil { + impl.logger.Errorw("chart update error", "err", err, "req", overrideRequest) + return false, err + } + referenceChartByte = refChartByte + } + + releaseName := pipeline.DeploymentAppName + cluster := envOverride.Environment.Cluster + bearerToken := cluster.Config[util5.BearerToken] + clusterConfig := &client2.ClusterConfig{ + ClusterName: cluster.ClusterName, + Token: bearerToken, + ApiServerUrl: cluster.ServerUrl, + InsecureSkipTLSVerify: cluster.InsecureSkipTlsVerify, + } + if cluster.InsecureSkipTlsVerify == false { + clusterConfig.KeyData = cluster.Config[util5.TlsKey] + clusterConfig.CertData = cluster.Config[util5.CertData] + clusterConfig.CaData = cluster.Config[util5.CertificateAuthorityData] + } + releaseIdentifier := &client2.ReleaseIdentifier{ + ReleaseName: releaseName, + ReleaseNamespace: envOverride.Namespace, + ClusterConfig: clusterConfig, + } + + if pipeline.DeploymentAppCreated { + req := &client2.UpgradeReleaseRequest{ + ReleaseIdentifier: releaseIdentifier, + ValuesYaml: mergeAndSave, + HistoryMax: impl.helmAppService.GetRevisionHistoryMaxValue(client2.SOURCE_DEVTRON_APP), + ChartContent: &client2.ChartContent{Content: referenceChartByte}, + } + + updateApplicationResponse, err := impl.helmAppClient.UpdateApplication(ctx, req) + + // For cases where helm release was not found but db flag for deployment app created was true + if err != nil && strings.Contains(err.Error(), "release: not found") { + + // retry install + _, err = impl.helmInstallReleaseWithCustomChart(ctx, releaseIdentifier, referenceChartByte, mergeAndSave) + + // if retry failed, return + if err != nil { + impl.logger.Errorw("release not found, failed to re-install helm application", "err", err) + return false, err + } + } else if err != nil { + impl.logger.Errorw("error in updating helm application for cd pipeline", "err", err) + return false, err + } else { + impl.logger.Debugw("updated helm application", "response", updateApplicationResponse, "isSuccess", updateApplicationResponse.Success) + } + + } else { + + helmResponse, err := impl.helmInstallReleaseWithCustomChart(ctx, releaseIdentifier, referenceChartByte, mergeAndSave) + + // For connection related errors, no need to update the db + if err != nil && strings.Contains(err.Error(), "connection error") { + impl.logger.Errorw("error in helm install custom chart", "err", err) + return false, err + } + + // IMP: update cd pipeline to mark deployment app created, even if helm install fails + // If the helm install fails, it still creates the app in failed state, so trying to + // re-create the app results in error from helm that cannot re-use name which is still in use + _, pgErr := impl.updatePipeline(pipeline, overrideRequest.UserId) + + if err != nil { + impl.logger.Errorw("error in helm install custom chart", "err", err) + + if pgErr != nil { + impl.logger.Errorw("failed to update deployment app created flag in pipeline table", "err", err) + } + return false, err + } + + if pgErr != nil { + impl.logger.Errorw("failed to update deployment app created flag in pipeline table", "err", err) + return false, err + } + + impl.logger.Debugw("received helm release response", "helmResponse", helmResponse, "isSuccess", helmResponse.Success) + } + + //update workflow runner status, used in app workflow view + cdWf, err := impl.cdWorkflowRepository.FindByWorkflowIdAndRunnerType(ctx, overrideRequest.CdWorkflowId, bean.CD_WORKFLOW_TYPE_DEPLOY) + if err != nil && err != pg.ErrNoRows { + impl.logger.Errorw("err on fetching cd workflow", "err", err) + return false, err + } + cdWorkflowId := cdWf.CdWorkflowId + if cdWf.CdWorkflowId == 0 { + cdWf := &pipelineConfig.CdWorkflow{ + CiArtifactId: overrideRequest.CiArtifactId, + PipelineId: overrideRequest.PipelineId, + AuditLog: sql.AuditLog{CreatedOn: triggeredAt, CreatedBy: overrideRequest.UserId, UpdatedOn: triggeredAt, UpdatedBy: overrideRequest.UserId}, + } + err := impl.cdWorkflowRepository.SaveWorkFlow(ctx, cdWf) + if err != nil { + impl.logger.Errorw("err on updating cd workflow for status update", "err", err) + return false, err + } + cdWorkflowId = cdWf.Id + runner := &pipelineConfig.CdWorkflowRunner{ + Id: cdWf.Id, + Name: pipeline.Name, + WorkflowType: bean.CD_WORKFLOW_TYPE_DEPLOY, + ExecutorType: pipelineConfig.WORKFLOW_EXECUTOR_TYPE_AWF, + Status: pipelineConfig.WorkflowInProgress, + TriggeredBy: overrideRequest.UserId, + StartedOn: triggeredAt, + CdWorkflowId: cdWorkflowId, + AuditLog: sql.AuditLog{CreatedOn: triggeredAt, CreatedBy: overrideRequest.UserId, UpdatedOn: triggeredAt, UpdatedBy: overrideRequest.UserId}, + } + _, err = impl.cdWorkflowRepository.SaveWorkFlowRunner(runner) + if err != nil { + impl.logger.Errorw("err on updating cd workflow runner for status update", "err", err) + return false, err + } + } else { + cdWf.Status = pipelineConfig.WorkflowInProgress + cdWf.FinishedOn = time.Now() + cdWf.UpdatedBy = overrideRequest.UserId + cdWf.UpdatedOn = time.Now() + err = impl.cdWorkflowRepository.UpdateWorkFlowRunner(&cdWf) + if err != nil { + impl.logger.Errorw("error on update cd workflow runner", "cdWf", cdWf, "err", err) + return false, err + } + } + } + return true, nil +} + +func (impl *WorkflowDagExecutorImpl) GetDeploymentStrategyByTriggerType(overrideRequest *bean.ValuesOverrideRequest, ctx context.Context) (*chartConfig.PipelineStrategy, error) { + + strategy := &chartConfig.PipelineStrategy{} + var err error + if overrideRequest.DeploymentWithConfig == bean.DEPLOYMENT_CONFIG_TYPE_SPECIFIC_TRIGGER { + _, span := otel.Tracer("orchestrator").Start(ctx, "strategyHistoryRepository.GetHistoryByPipelineIdAndWfrId") + strategyHistory, err := impl.strategyHistoryRepository.GetHistoryByPipelineIdAndWfrId(overrideRequest.PipelineId, overrideRequest.WfrIdForDeploymentWithSpecificTrigger) + span.End() + if err != nil { + impl.logger.Errorw("error in getting deployed strategy history by pipleinId and wfrId", "err", err, "pipelineId", overrideRequest.PipelineId, "wfrId", overrideRequest.WfrIdForDeploymentWithSpecificTrigger) + return nil, err + } + strategy.Strategy = strategyHistory.Strategy + strategy.Config = strategyHistory.Config + strategy.PipelineId = overrideRequest.PipelineId + } else if overrideRequest.DeploymentWithConfig == bean.DEPLOYMENT_CONFIG_TYPE_LAST_SAVED { + if overrideRequest.ForceTrigger { + _, span := otel.Tracer("orchestrator").Start(ctx, "pipelineConfigRepository.GetDefaultStrategyByPipelineId") + strategy, err = impl.pipelineConfigRepository.GetDefaultStrategyByPipelineId(overrideRequest.PipelineId) + span.End() + } else { + var deploymentTemplate chartRepoRepository.DeploymentStrategy + if overrideRequest.DeploymentTemplate == "ROLLING" { + deploymentTemplate = chartRepoRepository.DEPLOYMENT_STRATEGY_ROLLING + } else if overrideRequest.DeploymentTemplate == "BLUE-GREEN" { + deploymentTemplate = chartRepoRepository.DEPLOYMENT_STRATEGY_BLUE_GREEN + } else if overrideRequest.DeploymentTemplate == "CANARY" { + deploymentTemplate = chartRepoRepository.DEPLOYMENT_STRATEGY_CANARY + } else if overrideRequest.DeploymentTemplate == "RECREATE" { + deploymentTemplate = chartRepoRepository.DEPLOYMENT_STRATEGY_RECREATE + } + + if len(deploymentTemplate) > 0 { + _, span := otel.Tracer("orchestrator").Start(ctx, "pipelineConfigRepository.FindByStrategyAndPipelineId") + strategy, err = impl.pipelineConfigRepository.FindByStrategyAndPipelineId(deploymentTemplate, overrideRequest.PipelineId) + span.End() + } else { + _, span := otel.Tracer("orchestrator").Start(ctx, "pipelineConfigRepository.GetDefaultStrategyByPipelineId") + strategy, err = impl.pipelineConfigRepository.GetDefaultStrategyByPipelineId(overrideRequest.PipelineId) + span.End() + } + } + if err != nil && errors2.IsNotFound(err) == false { + impl.logger.Errorf("invalid state", "err", err, "req", strategy) + return nil, err + } + } + return strategy, nil +} + +func (impl *WorkflowDagExecutorImpl) GetEnvOverrideByTriggerType(overrideRequest *bean.ValuesOverrideRequest, triggeredAt time.Time, ctx context.Context) (*chartConfig.EnvConfigOverride, error) { + + envOverride := &chartConfig.EnvConfigOverride{} + + var err error + if overrideRequest.DeploymentWithConfig == bean.DEPLOYMENT_CONFIG_TYPE_SPECIFIC_TRIGGER { + _, span := otel.Tracer("orchestrator").Start(ctx, "deploymentTemplateHistoryRepository.GetHistoryByPipelineIdAndWfrId") + deploymentTemplateHistory, err := impl.deploymentTemplateHistoryRepository.GetHistoryByPipelineIdAndWfrId(overrideRequest.PipelineId, overrideRequest.WfrIdForDeploymentWithSpecificTrigger) + //VARIABLE_SNAPSHOT_GET and resolve + + span.End() + if err != nil { + impl.logger.Errorw("error in getting deployed deployment template history by pipelineId and wfrId", "err", err, "pipelineId", &overrideRequest, "wfrId", overrideRequest.WfrIdForDeploymentWithSpecificTrigger) + return nil, err + } + templateName := deploymentTemplateHistory.TemplateName + templateVersion := deploymentTemplateHistory.TemplateVersion + if templateName == "Rollout Deployment" { + templateName = "" + } + //getting chart_ref by id + _, span = otel.Tracer("orchestrator").Start(ctx, "chartRefRepository.FindByVersionAndName") + chartRef, err := impl.chartRefRepository.FindByVersionAndName(templateName, templateVersion) + span.End() + if err != nil { + impl.logger.Errorw("error in getting chartRef by version and name", "err", err, "version", templateVersion, "name", templateName) + return nil, err + } + //assuming that if a chartVersion is deployed then it's envConfigOverride will be available + _, span = otel.Tracer("orchestrator").Start(ctx, "environmentConfigRepository.GetByAppIdEnvIdAndChartRefId") + envOverride, err = impl.environmentConfigRepository.GetByAppIdEnvIdAndChartRefId(overrideRequest.AppId, overrideRequest.EnvId, chartRef.Id) + span.End() + if err != nil { + impl.logger.Errorw("error in getting envConfigOverride for pipeline for specific chartVersion", "err", err, "appId", overrideRequest.AppId, "envId", overrideRequest.EnvId, "chartRefId", chartRef.Id) + return nil, err + } + + _, span = otel.Tracer("orchestrator").Start(ctx, "envRepository.FindById") + env, err := impl.envRepository.FindById(envOverride.TargetEnvironment) + span.End() + if err != nil { + impl.logger.Errorw("unable to find env", "err", err) + return nil, err + } + envOverride.Environment = env + + //updating historical data in envConfigOverride and appMetrics flag + envOverride.IsOverride = true + envOverride.EnvOverrideValues = deploymentTemplateHistory.Template + + resolvedTemplate, variableMap, err := impl.getResolvedTemplateWithSnapshot(deploymentTemplateHistory.Id, envOverride.EnvOverrideValues) + envOverride.ResolvedEnvOverrideValues = resolvedTemplate + envOverride.VariableSnapshot = variableMap + if err != nil { + return envOverride, err + } + } else if overrideRequest.DeploymentWithConfig == bean.DEPLOYMENT_CONFIG_TYPE_LAST_SAVED { + _, span := otel.Tracer("orchestrator").Start(ctx, "environmentConfigRepository.ActiveEnvConfigOverride") + envOverride, err = impl.environmentConfigRepository.ActiveEnvConfigOverride(overrideRequest.AppId, overrideRequest.EnvId) + + var chart *chartRepoRepository.Chart + span.End() + if err != nil { + impl.logger.Errorw("invalid state", "err", err, "req", overrideRequest) + return nil, err + } + if envOverride.Id == 0 { + _, span = otel.Tracer("orchestrator").Start(ctx, "chartRepository.FindLatestChartForAppByAppId") + chart, err = impl.chartRepository.FindLatestChartForAppByAppId(overrideRequest.AppId) + span.End() + if err != nil { + impl.logger.Errorw("invalid state", "err", err, "req", overrideRequest) + return nil, err + } + _, span = otel.Tracer("orchestrator").Start(ctx, "environmentConfigRepository.FindChartByAppIdAndEnvIdAndChartRefId") + envOverride, err = impl.environmentConfigRepository.FindChartByAppIdAndEnvIdAndChartRefId(overrideRequest.AppId, overrideRequest.EnvId, chart.ChartRefId) + span.End() + if err != nil && !errors2.IsNotFound(err) { + impl.logger.Errorw("invalid state", "err", err, "req", overrideRequest) + return nil, err + } + + //creating new env override config + if errors2.IsNotFound(err) || envOverride == nil { + _, span = otel.Tracer("orchestrator").Start(ctx, "envRepository.FindById") + environment, err := impl.envRepository.FindById(overrideRequest.EnvId) + span.End() + if err != nil && !util.IsErrNoRows(err) { + return nil, err + } + envOverride = &chartConfig.EnvConfigOverride{ + Active: true, + ManualReviewed: true, + Status: models.CHARTSTATUS_SUCCESS, + TargetEnvironment: overrideRequest.EnvId, + ChartId: chart.Id, + AuditLog: sql.AuditLog{UpdatedBy: overrideRequest.UserId, UpdatedOn: triggeredAt, CreatedOn: triggeredAt, CreatedBy: overrideRequest.UserId}, + Namespace: environment.Namespace, + IsOverride: false, + EnvOverrideValues: "{}", + Latest: false, + IsBasicViewLocked: chart.IsBasicViewLocked, + CurrentViewEditor: chart.CurrentViewEditor, + } + _, span = otel.Tracer("orchestrator").Start(ctx, "environmentConfigRepository.Save") + err = impl.environmentConfigRepository.Save(envOverride) + span.End() + if err != nil { + impl.logger.Errorw("error in creating envconfig", "data", envOverride, "error", err) + return nil, err + } + } + envOverride.Chart = chart + } else if envOverride.Id > 0 && !envOverride.IsOverride { + _, span = otel.Tracer("orchestrator").Start(ctx, "chartRepository.FindLatestChartForAppByAppId") + chart, err = impl.chartRepository.FindLatestChartForAppByAppId(overrideRequest.AppId) + span.End() + if err != nil { + impl.logger.Errorw("invalid state", "err", err, "req", overrideRequest) + return nil, err + } + envOverride.Chart = chart + } + + _, span = otel.Tracer("orchestrator").Start(ctx, "envRepository.FindById") + env, err := impl.envRepository.FindById(envOverride.TargetEnvironment) + span.End() + if err != nil { + impl.logger.Errorw("unable to find env", "err", err) + return nil, err + } + envOverride.Environment = env + + //VARIABLE different cases for variable resolution + scope := resourceQualifiers.Scope{ + AppId: overrideRequest.AppId, + EnvId: overrideRequest.EnvId, + ClusterId: overrideRequest.ClusterId, + SystemMetadata: &resourceQualifiers.SystemMetadata{ + EnvironmentName: env.Name, + ClusterName: env.Cluster.ClusterName, + Namespace: env.Namespace, + AppName: overrideRequest.AppName, + Image: overrideRequest.Image, + ImageTag: util3.GetImageTagFromImage(overrideRequest.Image), + }, + } + + if envOverride.IsOverride { + + resolvedTemplate, variableMap, err := impl.extractVariablesAndResolveTemplate(scope, envOverride.EnvOverrideValues, repository5.Entity{ + EntityType: repository5.EntityTypeDeploymentTemplateEnvLevel, + EntityId: envOverride.Id, + }) + envOverride.ResolvedEnvOverrideValues = resolvedTemplate + envOverride.VariableSnapshot = variableMap + if err != nil { + return envOverride, err + } + + } else { + resolvedTemplate, variableMap, err := impl.extractVariablesAndResolveTemplate(scope, chart.GlobalOverride, repository5.Entity{ + EntityType: repository5.EntityTypeDeploymentTemplateAppLevel, + EntityId: chart.Id, + }) + envOverride.Chart.ResolvedGlobalOverride = resolvedTemplate + envOverride.VariableSnapshot = variableMap + if err != nil { + return envOverride, err + } + + } + } + + return envOverride, nil +} + +func (impl *WorkflowDagExecutorImpl) GetAppMetricsByTriggerType(overrideRequest *bean.ValuesOverrideRequest, ctx context.Context) (bool, error) { + + var appMetrics bool + if overrideRequest.DeploymentWithConfig == bean.DEPLOYMENT_CONFIG_TYPE_SPECIFIC_TRIGGER { + _, span := otel.Tracer("orchestrator").Start(ctx, "deploymentTemplateHistoryRepository.GetHistoryByPipelineIdAndWfrId") + deploymentTemplateHistory, err := impl.deploymentTemplateHistoryRepository.GetHistoryByPipelineIdAndWfrId(overrideRequest.PipelineId, overrideRequest.WfrIdForDeploymentWithSpecificTrigger) + span.End() + if err != nil { + impl.logger.Errorw("error in getting deployed deployment template history by pipelineId and wfrId", "err", err, "pipelineId", &overrideRequest, "wfrId", overrideRequest.WfrIdForDeploymentWithSpecificTrigger) + return appMetrics, err + } + appMetrics = deploymentTemplateHistory.IsAppMetricsEnabled + + } else if overrideRequest.DeploymentWithConfig == bean.DEPLOYMENT_CONFIG_TYPE_LAST_SAVED { + _, span := otel.Tracer("orchestrator").Start(ctx, "appLevelMetricsRepository.FindByAppId") + appLevelMetrics, err := impl.appLevelMetricsRepository.FindByAppId(overrideRequest.AppId) + span.End() + if err != nil && !util.IsErrNoRows(err) { + impl.logger.Errorw("err", err) + return appMetrics, &util.ApiError{InternalMessage: "unable to fetch app level metrics flag"} + } + appMetrics = appLevelMetrics.AppMetrics + + _, span = otel.Tracer("orchestrator").Start(ctx, "envLevelMetricsRepository.FindByAppIdAndEnvId") + envLevelMetrics, err := impl.envLevelMetricsRepository.FindByAppIdAndEnvId(overrideRequest.AppId, overrideRequest.EnvId) + span.End() + if err != nil && !util.IsErrNoRows(err) { + impl.logger.Errorw("err", err) + return appMetrics, &util.ApiError{InternalMessage: "unable to fetch env level metrics flag"} + } + if envLevelMetrics.Id != 0 && envLevelMetrics.AppMetrics != nil { + appMetrics = *envLevelMetrics.AppMetrics + } + } + return appMetrics, nil +} + +func (impl *WorkflowDagExecutorImpl) getDbMigrationOverride(overrideRequest *bean.ValuesOverrideRequest, artifact *repository.CiArtifact, isRollback bool) (overrideJson []byte, err error) { + if isRollback { + return nil, fmt.Errorf("rollback not supported ye") + } + notConfigured := false + config, err := impl.dbMigrationConfigRepository.FindByPipelineId(overrideRequest.PipelineId) + if err != nil && !util.IsErrNoRows(err) { + impl.logger.Errorw("error in fetching pipeline override config", "req", overrideRequest, "err", err) + return nil, err + } else if util.IsErrNoRows(err) { + notConfigured = true + } + envVal := &EnvironmentOverride{} + if notConfigured { + impl.logger.Warnw("no active db migration found", "pipeline", overrideRequest.PipelineId) + envVal.Enabled = false + } else { + materialInfos, err := artifact.ParseMaterialInfo() + if err != nil { + return nil, err + } + + hash, ok := materialInfos[config.GitMaterial.Url] + if !ok { + impl.logger.Errorf("wrong url map ", "map", materialInfos, "url", config.GitMaterial.Url) + return nil, fmt.Errorf("configured url not found in material %s", config.GitMaterial.Url) + } + + envVal.Enabled = true + if config.GitMaterial.GitProvider.AuthMode != repository.AUTH_MODE_USERNAME_PASSWORD && + config.GitMaterial.GitProvider.AuthMode != repository.AUTH_MODE_ACCESS_TOKEN && + config.GitMaterial.GitProvider.AuthMode != repository.AUTH_MODE_ANONYMOUS { + return nil, fmt.Errorf("auth mode %s not supported for migration", config.GitMaterial.GitProvider.AuthMode) + } + envVal.appendEnvironmentVariable("GIT_REPO_URL", config.GitMaterial.Url) + envVal.appendEnvironmentVariable("GIT_USER", config.GitMaterial.GitProvider.UserName) + var password string + if config.GitMaterial.GitProvider.AuthMode == repository.AUTH_MODE_USERNAME_PASSWORD { + password = config.GitMaterial.GitProvider.Password + } else { + password = config.GitMaterial.GitProvider.AccessToken + } + envVal.appendEnvironmentVariable("GIT_AUTH_TOKEN", password) + // parse git-tag not required + //envVal.appendEnvironmentVariable("GIT_TAG", "") + envVal.appendEnvironmentVariable("GIT_HASH", hash) + envVal.appendEnvironmentVariable("SCRIPT_LOCATION", config.ScriptSource) + envVal.appendEnvironmentVariable("DB_TYPE", string(config.DbConfig.Type)) + envVal.appendEnvironmentVariable("DB_USER_NAME", config.DbConfig.UserName) + envVal.appendEnvironmentVariable("DB_PASSWORD", config.DbConfig.Password) + envVal.appendEnvironmentVariable("DB_HOST", config.DbConfig.Host) + envVal.appendEnvironmentVariable("DB_PORT", config.DbConfig.Port) + envVal.appendEnvironmentVariable("DB_NAME", config.DbConfig.DbName) + //Will be used for rollback don't delete it + //envVal.appendEnvironmentVariable("MIGRATE_TO_VERSION", strconv.Itoa(overrideRequest.TargetDbVersion)) + } + dbMigrationConfig := map[string]interface{}{"dbMigrationConfig": envVal} + confByte, err := json.Marshal(dbMigrationConfig) + if err != nil { + return nil, err + } + return confByte, nil +} + +func (impl *WorkflowDagExecutorImpl) getConfigMapAndSecretJsonV2(appId int, envId int, pipelineId int, chartVersion string, deploymentWithConfig bean.DeploymentConfigurationType, wfrIdForDeploymentWithSpecificTrigger int) ([]byte, error) { + + var configMapJson string + var secretDataJson string + var configMapJsonApp string + var secretDataJsonApp string + var configMapJsonEnv string + var secretDataJsonEnv string + var err error + //var configMapJsonPipeline string + //var secretDataJsonPipeline string + + merged := []byte("{}") + if deploymentWithConfig == bean.DEPLOYMENT_CONFIG_TYPE_LAST_SAVED { + configMapA, err := impl.configMapRepository.GetByAppIdAppLevel(appId) + if err != nil && pg.ErrNoRows != err { + return []byte("{}"), err + } + if configMapA != nil && configMapA.Id > 0 { + configMapJsonApp = configMapA.ConfigMapData + secretDataJsonApp = configMapA.SecretData + } + configMapE, err := impl.configMapRepository.GetByAppIdAndEnvIdEnvLevel(appId, envId) + if err != nil && pg.ErrNoRows != err { + return []byte("{}"), err + } + if configMapE != nil && configMapE.Id > 0 { + configMapJsonEnv = configMapE.ConfigMapData + secretDataJsonEnv = configMapE.SecretData + } + } else if deploymentWithConfig == bean.DEPLOYMENT_CONFIG_TYPE_SPECIFIC_TRIGGER { + //fetching history and setting envLevelConfig and not appLevelConfig because history already contains merged appLevel and envLevel configs + configMapHistory, err := impl.configMapHistoryRepository.GetHistoryByPipelineIdAndWfrId(pipelineId, wfrIdForDeploymentWithSpecificTrigger, repository3.CONFIGMAP_TYPE) + if err != nil { + impl.logger.Errorw("error in getting config map history config by pipelineId and wfrId ", "err", err, "pipelineId", pipelineId, "wfrid", wfrIdForDeploymentWithSpecificTrigger) + return []byte("{}"), err + } + configMapJsonEnv = configMapHistory.Data + secretHistory, err := impl.configMapHistoryRepository.GetHistoryByPipelineIdAndWfrId(pipelineId, wfrIdForDeploymentWithSpecificTrigger, repository3.SECRET_TYPE) + if err != nil { + impl.logger.Errorw("error in getting config map history config by pipelineId and wfrId ", "err", err, "pipelineId", pipelineId, "wfrid", wfrIdForDeploymentWithSpecificTrigger) + return []byte("{}"), err + } + secretDataJsonEnv = secretHistory.Data + } + configMapJson, err = impl.mergeUtil.ConfigMapMerge(configMapJsonApp, configMapJsonEnv) + if err != nil { + return []byte("{}"), err + } + chartMajorVersion, chartMinorVersion, err := util4.ExtractChartVersion(chartVersion) + if err != nil { + impl.logger.Errorw("chart version parsing", "err", err) + return []byte("{}"), err + } + secretDataJson, err = impl.mergeUtil.ConfigSecretMerge(secretDataJsonApp, secretDataJsonEnv, chartMajorVersion, chartMinorVersion, false) + if err != nil { + return []byte("{}"), err + } + configResponseR := bean.ConfigMapRootJson{} + configResponse := bean.ConfigMapJson{} + if configMapJson != "" { + err = json.Unmarshal([]byte(configMapJson), &configResponse) + if err != nil { + return []byte("{}"), err + } + } + configResponseR.ConfigMapJson = configResponse + secretResponseR := bean.ConfigSecretRootJson{} + secretResponse := bean.ConfigSecretJson{} + if configMapJson != "" { + err = json.Unmarshal([]byte(secretDataJson), &secretResponse) + if err != nil { + return []byte("{}"), err + } + } + secretResponseR.ConfigSecretJson = secretResponse + + configMapByte, err := json.Marshal(configResponseR) + if err != nil { + return []byte("{}"), err + } + secretDataByte, err := json.Marshal(secretResponseR) + if err != nil { + return []byte("{}"), err + } + + merged, err = impl.mergeUtil.JsonPatch(configMapByte, secretDataByte) + if err != nil { + return []byte("{}"), err + } + return merged, nil +} + +func (impl *WorkflowDagExecutorImpl) savePipelineOverride(overrideRequest *bean.ValuesOverrideRequest, envOverrideId int, triggeredAt time.Time) (override *chartConfig.PipelineOverride, err error) { + currentReleaseNo, err := impl.pipelineOverrideRepository.GetCurrentPipelineReleaseCounter(overrideRequest.PipelineId) + if err != nil { + return nil, err + } + po := &chartConfig.PipelineOverride{ + EnvConfigOverrideId: envOverrideId, + Status: models.CHARTSTATUS_NEW, + PipelineId: overrideRequest.PipelineId, + CiArtifactId: overrideRequest.CiArtifactId, + PipelineReleaseCounter: currentReleaseNo + 1, + CdWorkflowId: overrideRequest.CdWorkflowId, + AuditLog: sql.AuditLog{CreatedBy: overrideRequest.UserId, CreatedOn: triggeredAt, UpdatedOn: triggeredAt, UpdatedBy: overrideRequest.UserId}, + DeploymentType: overrideRequest.DeploymentType, + } + + err = impl.pipelineOverrideRepository.Save(po) + if err != nil { + return nil, err + } + err = impl.checkAndFixDuplicateReleaseNo(po) + if err != nil { + impl.logger.Errorw("error in checking release no duplicacy", "pipeline", po, "err", err) + return nil, err + } + return po, nil +} + +func (impl *WorkflowDagExecutorImpl) getReleaseOverride(envOverride *chartConfig.EnvConfigOverride, overrideRequest *bean.ValuesOverrideRequest, artifact *repository.CiArtifact, pipelineOverride *chartConfig.PipelineOverride, strategy *chartConfig.PipelineStrategy, appMetrics *bool) (releaseOverride string, err error) { + + artifactImage := artifact.Image + imageTag := strings.Split(artifactImage, ":") + + imageTagLen := len(imageTag) + + imageName := "" + + for i := 0; i < imageTagLen-1; i++ { + if i != imageTagLen-2 { + imageName = imageName + imageTag[i] + ":" + } else { + imageName = imageName + imageTag[i] + } + } + + appId := strconv.Itoa(overrideRequest.AppId) + envId := strconv.Itoa(overrideRequest.EnvId) + + deploymentStrategy := "" + if strategy != nil { + deploymentStrategy = string(strategy.Strategy) + } + releaseAttribute := app.ReleaseAttributes{ + Name: imageName, + Tag: imageTag[imageTagLen-1], + PipelineName: overrideRequest.PipelineName, + ReleaseVersion: strconv.Itoa(pipelineOverride.PipelineReleaseCounter), + DeploymentType: deploymentStrategy, + App: appId, + Env: envId, + AppMetrics: appMetrics, + } + override, err := util4.Tprintf(envOverride.Chart.ImageDescriptorTemplate, releaseAttribute) + if err != nil { + return "", &util.ApiError{InternalMessage: "unable to render ImageDescriptorTemplate"} + } + if overrideRequest.AdditionalOverride != nil { + userOverride, err := overrideRequest.AdditionalOverride.MarshalJSON() + if err != nil { + return "", err + } + data, err := impl.mergeUtil.JsonPatch(userOverride, []byte(override)) + if err != nil { + return "", err + } + override = string(data) + } + return override, nil +} + +func (impl *WorkflowDagExecutorImpl) mergeAndSave(envOverride *chartConfig.EnvConfigOverride, + overrideRequest *bean.ValuesOverrideRequest, + dbMigrationOverride []byte, + artifact *repository.CiArtifact, + pipeline *pipelineConfig.Pipeline, configMapJson, appLabelJsonByte []byte, strategy *chartConfig.PipelineStrategy, ctx context.Context, + triggeredAt time.Time, deployedBy int32, appMetrics *bool) (releaseId int, overrideId int, mergedValues string, err error) { + + //register release , obtain release id TODO: populate releaseId to template + override, err := impl.savePipelineOverride(overrideRequest, envOverride.Id, triggeredAt) + if err != nil { + return 0, 0, "", err + } + //TODO: check status and apply lock + overrideJson, err := impl.getReleaseOverride(envOverride, overrideRequest, artifact, override, strategy, appMetrics) + if err != nil { + return 0, 0, "", err + } + + //merge three values on the fly + //ordering is important here + //global < environment < db< release + var merged []byte + if !envOverride.IsOverride { + merged, err = impl.mergeUtil.JsonPatch([]byte("{}"), []byte(envOverride.Chart.GlobalOverride)) + if err != nil { + return 0, 0, "", err + } + } else { + merged, err = impl.mergeUtil.JsonPatch([]byte("{}"), []byte(envOverride.EnvOverrideValues)) + if err != nil { + return 0, 0, "", err + } + } + + //pipeline override here comes from pipeline strategy table + if strategy != nil && len(strategy.Config) > 0 { + merged, err = impl.mergeUtil.JsonPatch(merged, []byte(strategy.Config)) + if err != nil { + return 0, 0, "", err + } + } + merged, err = impl.mergeUtil.JsonPatch(merged, dbMigrationOverride) + if err != nil { + return 0, 0, "", err + } + merged, err = impl.mergeUtil.JsonPatch(merged, []byte(overrideJson)) + if err != nil { + return 0, 0, "", err + } + + if configMapJson != nil { + merged, err = impl.mergeUtil.JsonPatch(merged, configMapJson) + if err != nil { + return 0, 0, "", err + } + } + + if appLabelJsonByte != nil { + merged, err = impl.mergeUtil.JsonPatch(merged, appLabelJsonByte) + if err != nil { + return 0, 0, "", err + } + } + + appName := fmt.Sprintf("%s-%s", pipeline.App.AppName, envOverride.Environment.Name) + merged = impl.autoscalingCheckBeforeTrigger(ctx, appName, envOverride.Namespace, merged, overrideRequest) + + _, span := otel.Tracer("orchestrator").Start(ctx, "dockerRegistryIpsConfigService.HandleImagePullSecretOnApplicationDeployment") + // handle image pull secret if access given + merged, err = impl.dockerRegistryIpsConfigService.HandleImagePullSecretOnApplicationDeployment(envOverride.Environment, pipeline.CiPipelineId, merged) + span.End() + if err != nil { + return 0, 0, "", err + } + + commitHash := "" + commitTime := time.Time{} + if util.IsAcdApp(pipeline.DeploymentAppType) { + chartRepoName := impl.chartTemplateService.GetGitOpsRepoNameFromUrl(envOverride.Chart.GitRepoUrl) + _, span = otel.Tracer("orchestrator").Start(ctx, "chartTemplateService.GetUserEmailIdAndNameForGitOpsCommit") + //getting username & emailId for commit author data + userEmailId, userName := impl.chartTemplateService.GetUserEmailIdAndNameForGitOpsCommit(overrideRequest.UserId) + span.End() + chartGitAttr := &util.ChartConfig{ + FileName: fmt.Sprintf("_%d-values.yaml", envOverride.TargetEnvironment), + FileContent: string(merged), + ChartName: envOverride.Chart.ChartName, + ChartLocation: envOverride.Chart.ChartLocation, + ChartRepoName: chartRepoName, + ReleaseMessage: fmt.Sprintf("release-%d-env-%d ", override.Id, envOverride.TargetEnvironment), + UserName: userName, + UserEmailId: userEmailId, + } + gitOpsConfigBitbucket, err := impl.gitOpsConfigRepository.GetGitOpsConfigByProvider(util.BITBUCKET_PROVIDER) + if err != nil { + if err == pg.ErrNoRows { + gitOpsConfigBitbucket.BitBucketWorkspaceId = "" + } else { + return 0, 0, "", err + } + } + gitOpsConfig := &bean.GitOpsConfigDto{BitBucketWorkspaceId: gitOpsConfigBitbucket.BitBucketWorkspaceId} + _, span = otel.Tracer("orchestrator").Start(ctx, "gitFactory.Client.CommitValues") + commitHash, commitTime, err = impl.gitFactory.Client.CommitValues(chartGitAttr, gitOpsConfig) + span.End() + if err != nil { + impl.logger.Errorw("error in git commit", "err", err) + return 0, 0, "", err + } + } + if commitTime.IsZero() { + commitTime = time.Now() + } + pipelineOverride := &chartConfig.PipelineOverride{ + Id: override.Id, + GitHash: commitHash, + CommitTime: commitTime, + EnvConfigOverrideId: envOverride.Id, + PipelineOverrideValues: overrideJson, + PipelineId: overrideRequest.PipelineId, + CiArtifactId: overrideRequest.CiArtifactId, + PipelineMergedValues: string(merged), + AuditLog: sql.AuditLog{UpdatedOn: triggeredAt, UpdatedBy: deployedBy}, + } + _, span = otel.Tracer("orchestrator").Start(ctx, "pipelineOverrideRepository.Update") + err = impl.pipelineOverrideRepository.Update(pipelineOverride) + span.End() + if err != nil { + return 0, 0, "", err + } + mergedValues = string(merged) + return override.PipelineReleaseCounter, override.Id, mergedValues, nil +} + +func (impl *WorkflowDagExecutorImpl) mergeOverrideValues(envOverride *chartConfig.EnvConfigOverride, + dbMigrationOverride []byte, + releaseOverrideJson string, + configMapJson []byte, + appLabelJsonByte []byte, + strategy *chartConfig.PipelineStrategy, +) (mergedValues []byte, err error) { + + //merge three values on the fly + //ordering is important here + //global < environment < db< release + var merged []byte + if !envOverride.IsOverride { + merged, err = impl.mergeUtil.JsonPatch([]byte("{}"), []byte(envOverride.Chart.ResolvedGlobalOverride)) + if err != nil { + return nil, err + } + } else { + merged, err = impl.mergeUtil.JsonPatch([]byte("{}"), []byte(envOverride.ResolvedEnvOverrideValues)) + if err != nil { + return nil, err + } + } + if strategy != nil && len(strategy.Config) > 0 { + merged, err = impl.mergeUtil.JsonPatch(merged, []byte(strategy.Config)) + if err != nil { + return nil, err + } + } + merged, err = impl.mergeUtil.JsonPatch(merged, dbMigrationOverride) + if err != nil { + return nil, err + } + merged, err = impl.mergeUtil.JsonPatch(merged, []byte(releaseOverrideJson)) + if err != nil { + return nil, err + } + if configMapJson != nil { + merged, err = impl.mergeUtil.JsonPatch(merged, configMapJson) + if err != nil { + return nil, err + } + } + if appLabelJsonByte != nil { + merged, err = impl.mergeUtil.JsonPatch(merged, appLabelJsonByte) + if err != nil { + return nil, err + } + } + return merged, nil +} + +func (impl *WorkflowDagExecutorImpl) autoscalingCheckBeforeTrigger(ctx context.Context, appName string, namespace string, merged []byte, overrideRequest *bean.ValuesOverrideRequest) []byte { + //pipeline := overrideRequest.Pipeline + var appId = overrideRequest.AppId + pipelineId := overrideRequest.PipelineId + var appDeploymentType = overrideRequest.DeploymentAppType + var clusterId = overrideRequest.ClusterId + deploymentType := overrideRequest.DeploymentType + templateMap := make(map[string]interface{}) + err := json.Unmarshal(merged, &templateMap) + if err != nil { + return merged + } + + hpaResourceRequest := impl.getAutoScalingReplicaCount(templateMap, appName) + impl.logger.Debugw("autoscalingCheckBeforeTrigger", "hpaResourceRequest", hpaResourceRequest) + if hpaResourceRequest.IsEnable { + resourceManifest := make(map[string]interface{}) + if util.IsAcdApp(appDeploymentType) { + query := &application.ApplicationResourceRequest{ + Name: &appName, + Version: &hpaResourceRequest.Version, + Group: &hpaResourceRequest.Group, + Kind: &hpaResourceRequest.Kind, + ResourceName: &hpaResourceRequest.ResourceName, + Namespace: &namespace, + } + recv, err := impl.acdClient.GetResource(ctx, query) + impl.logger.Debugw("resource manifest get replica count", "response", recv) + if err != nil { + impl.logger.Errorw("ACD Get Resource API Failed", "err", err) + middleware.AcdGetResourceCounter.WithLabelValues(strconv.Itoa(appId), namespace, appName).Inc() + return merged + } + if recv != nil && len(*recv.Manifest) > 0 { + err := json.Unmarshal([]byte(*recv.Manifest), &resourceManifest) + if err != nil { + impl.logger.Errorw("unmarshal failed for hpa check", "err", err) + return merged + } + } + } else { + version := "v2beta2" + k8sResource, err := impl.k8sCommonService.GetResource(ctx, &k8s.ResourceRequestBean{ClusterId: clusterId, + K8sRequest: &util5.K8sRequestBean{ResourceIdentifier: util5.ResourceIdentifier{Name: hpaResourceRequest.ResourceName, + Namespace: namespace, GroupVersionKind: schema.GroupVersionKind{Group: hpaResourceRequest.Group, Kind: hpaResourceRequest.Kind, Version: version}}}}) + if err != nil { + impl.logger.Errorw("error occurred while fetching resource for app", "resourceName", hpaResourceRequest.ResourceName, "err", err) + return merged + } + resourceManifest = k8sResource.Manifest.Object + } + if len(resourceManifest) > 0 { + statusMap := resourceManifest["status"].(map[string]interface{}) + currentReplicaVal := statusMap["currentReplicas"] + currentReplicaCount, err := util4.ParseFloatNumber(currentReplicaVal) + if err != nil { + impl.logger.Errorw("error occurred while parsing replica count", "currentReplicas", currentReplicaVal, "err", err) + return merged + } + + reqReplicaCount := impl.fetchRequiredReplicaCount(currentReplicaCount, hpaResourceRequest.ReqMaxReplicas, hpaResourceRequest.ReqMinReplicas) + templateMap["replicaCount"] = reqReplicaCount + merged, err = json.Marshal(&templateMap) + if err != nil { + impl.logger.Errorw("marshaling failed for hpa check", "err", err) + return merged + } + } + } else { + impl.logger.Errorw("autoscaling is not enabled", "pipelineId", pipelineId) + } + + //check for custom chart support + if autoscalingEnabledPath, ok := templateMap[bean2.CustomAutoScalingEnabledPathKey]; ok { + if deploymentType == models.DEPLOYMENTTYPE_STOP { + merged, err = impl.setScalingValues(templateMap, bean2.CustomAutoScalingEnabledPathKey, merged, false) + if err != nil { + return merged + } + merged, err = impl.setScalingValues(templateMap, bean2.CustomAutoscalingReplicaCountPathKey, merged, 0) + if err != nil { + return merged + } + } else { + autoscalingEnabled := false + autoscalingEnabledValue := gjson.Get(string(merged), autoscalingEnabledPath.(string)).Value() + if val, ok := autoscalingEnabledValue.(bool); ok { + autoscalingEnabled = val + } + if autoscalingEnabled { + // extract replica count, min, max and check for required value + replicaCount, err := impl.getReplicaCountFromCustomChart(templateMap, merged) + if err != nil { + return merged + } + merged, err = impl.setScalingValues(templateMap, bean2.CustomAutoscalingReplicaCountPathKey, merged, replicaCount) + if err != nil { + return merged + } + } + } + } + + return merged +} + +func (impl *WorkflowDagExecutorImpl) updateArgoPipeline(appId int, pipelineName string, envOverride *chartConfig.EnvConfigOverride, ctx context.Context) (bool, error) { + //repo has been registered while helm create + if ctx == nil { + impl.logger.Errorw("err in syncing ACD, ctx is NULL", "pipelineName", pipelineName) + return false, nil + } + app, err := impl.appRepository.FindById(appId) + if err != nil { + impl.logger.Errorw("no app found ", "err", err) + return false, err + } + envModel, err := impl.envRepository.FindById(envOverride.TargetEnvironment) + if err != nil { + return false, err + } + argoAppName := fmt.Sprintf("%s-%s", app.AppName, envModel.Name) + impl.logger.Infow("received payload, updateArgoPipeline", "appId", appId, "pipelineName", pipelineName, "envId", envOverride.TargetEnvironment, "argoAppName", argoAppName, "context", ctx) + application3, err := impl.acdClient.Get(ctx, &application.ApplicationQuery{Name: &argoAppName}) + if err != nil { + impl.logger.Errorw("no argo app exists", "app", argoAppName, "pipeline", pipelineName) + return false, err + } + //if status, ok:=status.FromError(err);ok{ + appStatus, _ := status2.FromError(err) + + if appStatus.Code() == codes.OK { + impl.logger.Debugw("argo app exists", "app", argoAppName, "pipeline", pipelineName) + if application3.Spec.Source.Path != envOverride.Chart.ChartLocation || application3.Spec.Source.TargetRevision != "master" { + patchReq := v1alpha1.Application{Spec: v1alpha1.ApplicationSpec{Source: v1alpha1.ApplicationSource{Path: envOverride.Chart.ChartLocation, RepoURL: envOverride.Chart.GitRepoUrl, TargetRevision: "master"}}} + reqbyte, err := json.Marshal(patchReq) + if err != nil { + impl.logger.Errorw("error in creating patch", "err", err) + } + reqString := string(reqbyte) + patchType := "merge" + _, err = impl.acdClient.Patch(ctx, &application.ApplicationPatchRequest{Patch: &reqString, Name: &argoAppName, PatchType: &patchType}) + if err != nil { + impl.logger.Errorw("error in creating argo pipeline ", "name", pipelineName, "patch", string(reqbyte), "err", err) + return false, err + } + impl.logger.Debugw("pipeline update req ", "res", patchReq) + } else { + impl.logger.Debug("pipeline no need to update ") + } + // Doing normal refresh to avoid the sync delay in argo-cd. + err2 := impl.argoClientWrapperService.GetArgoAppWithNormalRefresh(ctx, argoAppName) + if err2 != nil { + impl.logger.Errorw("error in getting argo application with normal refresh", "argoAppName", argoAppName, "pipelineName", pipelineName) + } + return true, nil + } else if appStatus.Code() == codes.NotFound { + impl.logger.Errorw("argo app not found", "app", argoAppName, "pipeline", pipelineName) + return false, nil + } else { + impl.logger.Errorw("err in checking application on gocd", "err", err, "pipeline", pipelineName) + return false, err + } +} + +func (impl *WorkflowDagExecutorImpl) getValuesFileForEnv(environmentId int) string { + return fmt.Sprintf("_%d-values.yaml", environmentId) //-{envId}-values.yaml +} + +func (impl *WorkflowDagExecutorImpl) updatePipeline(pipeline *pipelineConfig.Pipeline, userId int32) (bool, error) { + err := impl.pipelineRepository.SetDeploymentAppCreatedInPipeline(true, pipeline.Id, userId) + if err != nil { + impl.logger.Errorw("error on updating cd pipeline for setting deployment app created", "err", err) + return false, err + } + return true, nil +} + +// helmInstallReleaseWithCustomChart performs helm install with custom chart +func (impl *WorkflowDagExecutorImpl) helmInstallReleaseWithCustomChart(ctx context.Context, releaseIdentifier *client2.ReleaseIdentifier, referenceChartByte []byte, valuesYaml string) (*client2.HelmInstallCustomResponse, error) { + + helmInstallRequest := client2.HelmInstallCustomRequest{ + ValuesYaml: valuesYaml, + ChartContent: &client2.ChartContent{Content: referenceChartByte}, + ReleaseIdentifier: releaseIdentifier, + } + + // Request exec + return impl.helmAppClient.InstallReleaseWithCustomChart(ctx, &helmInstallRequest) +} + +func (impl *WorkflowDagExecutorImpl) getResolvedTemplateWithSnapshot(deploymentTemplateHistoryId int, template string) (string, map[string]string, error) { + + variableSnapshotMap := make(map[string]string) + reference := repository5.HistoryReference{ + HistoryReferenceId: deploymentTemplateHistoryId, + HistoryReferenceType: repository5.HistoryReferenceTypeDeploymentTemplate, + } + variableSnapshot, err := impl.variableSnapshotHistoryService.GetVariableHistoryForReferences([]repository5.HistoryReference{reference}) + if err != nil { + return template, variableSnapshotMap, err + } + + if _, ok := variableSnapshot[reference]; !ok { + return template, variableSnapshotMap, nil + } + + err = json.Unmarshal(variableSnapshot[reference].VariableSnapshot, &variableSnapshotMap) + if err != nil { + return template, variableSnapshotMap, err + } + + if len(variableSnapshotMap) == 0 { + return template, variableSnapshotMap, nil + } + scopedVariableData := parsers.GetScopedVarData(variableSnapshotMap, make(map[string]bool), true) + request := parsers.VariableParserRequest{Template: template, TemplateType: parsers.JsonVariableTemplate, Variables: scopedVariableData} + parserResponse := impl.variableTemplateParser.ParseTemplate(request) + err = parserResponse.Error + if err != nil { + return template, variableSnapshotMap, err + } + resolvedTemplate := parserResponse.ResolvedTemplate + return resolvedTemplate, variableSnapshotMap, nil +} + +func (impl *WorkflowDagExecutorImpl) extractVariablesAndResolveTemplate(scope resourceQualifiers.Scope, template string, entity repository5.Entity) (string, map[string]string, error) { + + variableMap := make(map[string]string) + entityToVariables, err := impl.variableEntityMappingService.GetAllMappingsForEntities([]repository5.Entity{entity}) + if err != nil { + return template, variableMap, err + } + + if vars, ok := entityToVariables[entity]; !ok || len(vars) == 0 { + return template, variableMap, nil + } + + // pre-populating variable map with variable so that the variables which don't have any resolved data + // is saved in snapshot + for _, variable := range entityToVariables[entity] { + variableMap[variable] = impl.scopedVariableService.GetFormattedVariableForName(variable) + } + + scopedVariables, err := impl.scopedVariableService.GetScopedVariables(scope, entityToVariables[entity], true) + if err != nil { + return template, variableMap, err + } + + for _, variable := range scopedVariables { + variableMap[variable.VariableName] = variable.VariableValue.StringValue() + } + + parserRequest := parsers.VariableParserRequest{Template: template, Variables: scopedVariables, TemplateType: parsers.JsonVariableTemplate} + parserResponse := impl.variableTemplateParser.ParseTemplate(parserRequest) + err = parserResponse.Error + if err != nil { + return template, variableMap, err + } + + resolvedTemplate := parserResponse.ResolvedTemplate + return resolvedTemplate, variableMap, nil +} + +type EnvironmentOverride struct { + Enabled bool `json:"enabled"` + EnvValues []*KeyValue `json:"envValues"` +} + +type KeyValue struct { + Key string `json:"key"` + Value string `json:"value"` +} + +func (conf *EnvironmentOverride) appendEnvironmentVariable(key, value string) { + item := &KeyValue{Key: key, Value: value} + conf.EnvValues = append(conf.EnvValues, item) +} + +func (impl *WorkflowDagExecutorImpl) checkAndFixDuplicateReleaseNo(override *chartConfig.PipelineOverride) error { + + uniqueVerified := false + retryCount := 0 + + for !uniqueVerified && retryCount < 5 { + retryCount = retryCount + 1 + overrides, err := impl.pipelineOverrideRepository.GetByPipelineIdAndReleaseNo(override.PipelineId, override.PipelineReleaseCounter) + if err != nil { + return err + } + if overrides[0].Id == override.Id { + uniqueVerified = true + } else { + //duplicate might be due to concurrency, lets fix it + currentReleaseNo, err := impl.pipelineOverrideRepository.GetCurrentPipelineReleaseCounter(override.PipelineId) + if err != nil { + return err + } + override.PipelineReleaseCounter = currentReleaseNo + 1 + err = impl.pipelineOverrideRepository.Save(override) + if err != nil { + return err + } + } + } + if !uniqueVerified { + return fmt.Errorf("duplicate verification retry count exide max overrideId: %d ,count: %d", override.Id, retryCount) + } + return nil +} + +func (impl *WorkflowDagExecutorImpl) getAutoScalingReplicaCount(templateMap map[string]interface{}, appName string) *util4.HpaResourceRequest { + hasOverride := false + if _, ok := templateMap[fullnameOverride]; ok { + appNameOverride := templateMap[fullnameOverride].(string) + if len(appNameOverride) > 0 { + appName = appNameOverride + hasOverride = true + } + } + if !hasOverride { + if _, ok := templateMap[nameOverride]; ok { + nameOverride := templateMap[nameOverride].(string) + if len(nameOverride) > 0 { + appName = fmt.Sprintf("%s-%s", appName, nameOverride) + } + } + } + hpaResourceRequest := &util4.HpaResourceRequest{} + hpaResourceRequest.Version = "" + hpaResourceRequest.Group = autoscaling.ServiceName + hpaResourceRequest.Kind = horizontalPodAutoscaler + impl.logger.Infow("getAutoScalingReplicaCount", "hpaResourceRequest", hpaResourceRequest) + if _, ok := templateMap[kedaAutoscaling]; ok { + as := templateMap[kedaAutoscaling] + asd := as.(map[string]interface{}) + if _, ok := asd[enabled]; ok { + impl.logger.Infow("getAutoScalingReplicaCount", "hpaResourceRequest", hpaResourceRequest) + enable := asd[enabled].(bool) + if enable { + hpaResourceRequest.IsEnable = enable + hpaResourceRequest.ReqReplicaCount = templateMap[replicaCount].(float64) + hpaResourceRequest.ReqMaxReplicas = asd["maxReplicaCount"].(float64) + hpaResourceRequest.ReqMinReplicas = asd["minReplicaCount"].(float64) + hpaResourceRequest.ResourceName = fmt.Sprintf("%s-%s-%s", "keda-hpa", appName, "keda") + impl.logger.Infow("getAutoScalingReplicaCount", "hpaResourceRequest", hpaResourceRequest) + return hpaResourceRequest + } + } + } + + if _, ok := templateMap[autoscaling.ServiceName]; ok { + as := templateMap[autoscaling.ServiceName] + asd := as.(map[string]interface{}) + if _, ok := asd[enabled]; ok { + enable := asd[enabled].(bool) + if enable { + hpaResourceRequest.IsEnable = asd[enabled].(bool) + hpaResourceRequest.ReqReplicaCount = templateMap[replicaCount].(float64) + hpaResourceRequest.ReqMaxReplicas = asd["MaxReplicas"].(float64) + hpaResourceRequest.ReqMinReplicas = asd["MinReplicas"].(float64) + hpaResourceRequest.ResourceName = fmt.Sprintf("%s-%s", appName, "hpa") + return hpaResourceRequest + } + } + } + return hpaResourceRequest + +} + +func (impl *WorkflowDagExecutorImpl) fetchRequiredReplicaCount(currentReplicaCount float64, reqMaxReplicas float64, reqMinReplicas float64) float64 { + var reqReplicaCount float64 + if currentReplicaCount <= reqMaxReplicas && currentReplicaCount >= reqMinReplicas { + reqReplicaCount = currentReplicaCount + } else if currentReplicaCount > reqMaxReplicas { + reqReplicaCount = reqMaxReplicas + } else if currentReplicaCount < reqMinReplicas { + reqReplicaCount = reqMinReplicas + } + return reqReplicaCount +} + +func (impl *WorkflowDagExecutorImpl) getReplicaCountFromCustomChart(templateMap map[string]interface{}, merged []byte) (float64, error) { + autoscalingMinVal, err := impl.extractParamValue(templateMap, bean2.CustomAutoscalingMinPathKey, merged) + if err != nil { + return 0, err + } + autoscalingMaxVal, err := impl.extractParamValue(templateMap, bean2.CustomAutoscalingMaxPathKey, merged) + if err != nil { + return 0, err + } + autoscalingReplicaCountVal, err := impl.extractParamValue(templateMap, bean2.CustomAutoscalingReplicaCountPathKey, merged) + if err != nil { + return 0, err + } + return impl.fetchRequiredReplicaCount(autoscalingReplicaCountVal, autoscalingMaxVal, autoscalingMinVal), nil +} + +func (impl *WorkflowDagExecutorImpl) setScalingValues(templateMap map[string]interface{}, customScalingKey string, merged []byte, value interface{}) ([]byte, error) { + autoscalingJsonPath := templateMap[customScalingKey] + autoscalingJsonPathKey := autoscalingJsonPath.(string) + mergedRes, err := sjson.Set(string(merged), autoscalingJsonPathKey, value) + if err != nil { + impl.logger.Errorw("error occurred while setting autoscaling key", "JsonPathKey", autoscalingJsonPathKey, "err", err) + return []byte{}, err + } + return []byte(mergedRes), nil +} + +func (impl *WorkflowDagExecutorImpl) extractParamValue(inputMap map[string]interface{}, key string, merged []byte) (float64, error) { + if _, ok := inputMap[key]; !ok { + return 0, errors.New("empty-val-err") + } + floatNumber, err := util4.ParseFloatNumber(gjson.Get(string(merged), inputMap[key].(string)).Value()) + if err != nil { + impl.logger.Errorw("error occurred while parsing float number", "key", key, "err", err) + } + return floatNumber, err +} diff --git a/pkg/pipeline/WorkflowService.go b/pkg/pipeline/WorkflowService.go index 223c52ecb8..5ced308973 100644 --- a/pkg/pipeline/WorkflowService.go +++ b/pkg/pipeline/WorkflowService.go @@ -19,6 +19,7 @@ package pipeline import ( "context" + "encoding/json" "errors" "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1" v1alpha12 "github.com/argoproj/argo-workflows/v3/pkg/client/clientset/versioned/typed/workflow/v1alpha1" @@ -137,10 +138,21 @@ func (impl *WorkflowServiceImpl) createWorkflowTemplate(workflowRequest *Workflo workflowTemplate.Volumes = ExtractVolumesFromCmCs(workflowConfigMaps, workflowSecrets) workflowRequest.AddNodeConstraintsFromConfig(&workflowTemplate, impl.ciCdConfig) - workflowMainContainer := workflowRequest.GetWorkflowMainContainer(impl.ciCdConfig, workflowJson, workflowTemplate, workflowConfigMaps, workflowSecrets) + workflowMainContainer, err := workflowRequest.GetWorkflowMainContainer(impl.ciCdConfig, workflowJson, &workflowTemplate, workflowConfigMaps, workflowSecrets) + + if err != nil { + impl.Logger.Errorw("error occurred while getting workflow main container", "err", err) + return bean3.WorkflowTemplate{}, err + } + workflowTemplate.Containers = []v12.Container{workflowMainContainer} impl.updateBlobStorageConfig(workflowRequest, &workflowTemplate) - + if workflowRequest.Type == bean3.CI_WORKFLOW_PIPELINE_TYPE || workflowRequest.Type == bean3.JOB_WORKFLOW_PIPELINE_TYPE { + nodeSelector := impl.getAppLabelNodeSelector(workflowRequest) + if nodeSelector != nil { + workflowTemplate.NodeSelector = nodeSelector + } + } if workflowRequest.Type == bean3.CD_WORKFLOW_PIPELINE_TYPE { workflowTemplate.WfControllerInstanceID = impl.ciCdConfig.WfControllerInstanceID workflowTemplate.TerminationGracePeriod = impl.ciCdConfig.TerminationGracePeriod @@ -242,6 +254,21 @@ func (impl *WorkflowServiceImpl) updateBlobStorageConfig(workflowRequest *Workfl workflowTemplate.CloudStorageKey = workflowRequest.BlobStorageLogsKey } +func (impl *WorkflowServiceImpl) getAppLabelNodeSelector(workflowRequest *WorkflowRequest) map[string]string { + // node selector + if val, ok := workflowRequest.AppLabels[CI_NODE_SELECTOR_APP_LABEL_KEY]; ok && !(workflowRequest.CheckForJob() && workflowRequest.IsExtRun) { + var nodeSelectors map[string]string + // Unmarshal or Decode the JSON to the interface. + err := json.Unmarshal([]byte(val), &nodeSelectors) + if err != nil { + impl.Logger.Errorw("err in unmarshalling nodeSelectors", "err", err, "val", val) + return nil + } + return nodeSelectors + } + return nil +} + func (impl *WorkflowServiceImpl) getWorkflowExecutor(executorType pipelineConfig.WorkflowExecutorType) WorkflowExecutor { if executorType == pipelineConfig.WORKFLOW_EXECUTOR_TYPE_AWF { return impl.argoWorkflowExecutor @@ -268,6 +295,9 @@ func (impl *WorkflowServiceImpl) TerminateWorkflow(executorType pipelineConfig.W var err error if executorType != "" { workflowExecutor := impl.getWorkflowExecutor(executorType) + if restConfig == nil { + restConfig = impl.config + } err = workflowExecutor.TerminateWorkflow(name, namespace, restConfig) } else { wfClient, err := impl.getWfClient(environment, namespace, isExt) diff --git a/pkg/pipeline/WorkflowUtils.go b/pkg/pipeline/WorkflowUtils.go index e08da6989e..b8317a2f98 100644 --- a/pkg/pipeline/WorkflowUtils.go +++ b/pkg/pipeline/WorkflowUtils.go @@ -694,7 +694,7 @@ func (workflowRequest *WorkflowRequest) getWorkflowImage() string { return "" } } -func (workflowRequest *WorkflowRequest) GetWorkflowMainContainer(config *CiCdConfig, workflowJson []byte, workflowTemplate bean.WorkflowTemplate, workflowConfigMaps []bean2.ConfigSecretMap, workflowSecrets []bean2.ConfigSecretMap) v12.Container { +func (workflowRequest *WorkflowRequest) GetWorkflowMainContainer(config *CiCdConfig, workflowJson []byte, workflowTemplate *bean.WorkflowTemplate, workflowConfigMaps []bean2.ConfigSecretMap, workflowSecrets []bean2.ConfigSecretMap) (v12.Container, error) { privileged := true pvc := workflowRequest.getPVCForWorkflowRequest() containerEnvVariables := workflowRequest.getContainerEnvVariables(config, workflowJson) @@ -714,7 +714,12 @@ func (workflowRequest *WorkflowRequest) GetWorkflowMainContainer(config *CiCdCon Name: "app-data", ContainerPort: 9102, }} + err := updateVolumeMountsForCi(config, workflowTemplate, &workflowMainContainer) + if err != nil { + return workflowMainContainer, err + } } + if len(pvc) != 0 { buildPvcCachePath := config.BuildPvcCachePath buildxPvcCachePath := config.BuildxPvcCachePath @@ -744,7 +749,7 @@ func (workflowRequest *WorkflowRequest) GetWorkflowMainContainer(config *CiCdCon }) } UpdateContainerEnvsFromCmCs(&workflowMainContainer, workflowConfigMaps, workflowSecrets) - return workflowMainContainer + return workflowMainContainer, nil } func CheckIfReTriggerRequired(status, message, workflowRunnerStatus string) bool { @@ -752,3 +757,13 @@ func CheckIfReTriggerRequired(status, message, workflowRunnerStatus string) bool message == POD_DELETED_MESSAGE) && workflowRunnerStatus != WorkflowCancel } + +func updateVolumeMountsForCi(config *CiCdConfig, workflowTemplate *bean.WorkflowTemplate, workflowMainContainer *v12.Container) error { + volume, volumeMounts, err := config.GetWorkflowVolumeAndVolumeMounts() + if err != nil { + return err + } + workflowTemplate.Volumes = volume + workflowMainContainer.VolumeMounts = volumeMounts + return nil +} diff --git a/pkg/pipeline/bean/CustomTagService.go b/pkg/pipeline/bean/CustomTagService.go new file mode 100644 index 0000000000..b823de3aed --- /dev/null +++ b/pkg/pipeline/bean/CustomTagService.go @@ -0,0 +1,25 @@ +package bean + +import "fmt" + +const ( + EntityNull = iota + EntityTypeCiPipelineId +) + +const ( + ImagePathPattern = "%s/%s:%s" // dockerReg/dockerRepo:Tag + ImageTagUnavailableMessage = "Desired image tag already exists" + REGEX_PATTERN_FOR_ENSURING_ONLY_ONE_VARIABLE_BETWEEN_BRACKETS = `\{.{2,}\}` + REGEX_PATTERN_FOR_CHARACTER_OTHER_THEN_X_OR_x = `\{[^xX]|{}\}` + REGEX_PATTERN_FOR_IMAGE_TAG = `^[a-zA-Z0-9]+[a-zA-Z0-9._-]*$` +) + +var ( + ErrImagePathInUse = fmt.Errorf(ImageTagUnavailableMessage) +) + +const ( + IMAGE_TAG_VARIABLE_NAME_X = "{X}" + IMAGE_TAG_VARIABLE_NAME_x = "{x}" +) diff --git a/pkg/pipeline/history/DeployedConfigurationHistoryService.go b/pkg/pipeline/history/DeployedConfigurationHistoryService.go index b7c2565d06..7e35bc4a22 100644 --- a/pkg/pipeline/history/DeployedConfigurationHistoryService.go +++ b/pkg/pipeline/history/DeployedConfigurationHistoryService.go @@ -1,6 +1,7 @@ package history import ( + "context" "errors" "fmt" "github.com/devtron-labs/devtron/api/bean" @@ -14,9 +15,9 @@ import ( type DeployedConfigurationHistoryService interface { GetDeployedConfigurationByWfrId(pipelineId, wfrId int) ([]*DeploymentConfigurationDto, error) GetDeployedHistoryComponentList(pipelineId, baseConfigId int, historyComponent, historyComponentName string) ([]*DeployedHistoryComponentMetadataDto, error) - GetDeployedHistoryComponentDetail(pipelineId, id int, historyComponent, historyComponentName string, userHasAdminAccess bool) (*HistoryDetailDto, error) - GetAllDeployedConfigurationByPipelineIdAndLatestWfrId(pipelineId int, userHasAdminAccess bool) (*AllDeploymentConfigurationDetail, error) - GetAllDeployedConfigurationByPipelineIdAndWfrId(pipelineId, wfrId int, userHasAdminAccess bool) (*AllDeploymentConfigurationDetail, error) + GetDeployedHistoryComponentDetail(ctx context.Context, pipelineId, id int, historyComponent, historyComponentName string, userHasAdminAccess bool) (*HistoryDetailDto, error) + GetAllDeployedConfigurationByPipelineIdAndLatestWfrId(ctx context.Context, pipelineId int, userHasAdminAccess bool) (*AllDeploymentConfigurationDetail, error) + GetAllDeployedConfigurationByPipelineIdAndWfrId(ctx context.Context, pipelineId, wfrId int, userHasAdminAccess bool) (*AllDeploymentConfigurationDetail, error) } type DeployedConfigurationHistoryServiceImpl struct { @@ -125,11 +126,11 @@ func (impl *DeployedConfigurationHistoryServiceImpl) GetDeployedHistoryComponent return historyList, nil } -func (impl *DeployedConfigurationHistoryServiceImpl) GetDeployedHistoryComponentDetail(pipelineId, id int, historyComponent, historyComponentName string, userHasAdminAccess bool) (*HistoryDetailDto, error) { +func (impl *DeployedConfigurationHistoryServiceImpl) GetDeployedHistoryComponentDetail(ctx context.Context, pipelineId, id int, historyComponent, historyComponentName string, userHasAdminAccess bool) (*HistoryDetailDto, error) { history := &HistoryDetailDto{} var err error if historyComponent == string(DEPLOYMENT_TEMPLATE_TYPE_HISTORY_COMPONENT) { - history, err = impl.deploymentTemplateHistoryService.GetHistoryForDeployedTemplateById(id, pipelineId) + history, err = impl.deploymentTemplateHistoryService.GetHistoryForDeployedTemplateById(ctx, id, pipelineId) } else if historyComponent == string(PIPELINE_STRATEGY_TYPE_HISTORY_COMPONENT) { history, err = impl.strategyHistoryService.GetHistoryForDeployedStrategyById(id, pipelineId) } else if historyComponent == string(CONFIGMAP_TYPE_HISTORY_COMPONENT) { @@ -146,14 +147,14 @@ func (impl *DeployedConfigurationHistoryServiceImpl) GetDeployedHistoryComponent return history, nil } -func (impl *DeployedConfigurationHistoryServiceImpl) GetAllDeployedConfigurationByPipelineIdAndLatestWfrId(pipelineId int, userHasAdminAccess bool) (*AllDeploymentConfigurationDetail, error) { +func (impl *DeployedConfigurationHistoryServiceImpl) GetAllDeployedConfigurationByPipelineIdAndLatestWfrId(ctx context.Context, pipelineId int, userHasAdminAccess bool) (*AllDeploymentConfigurationDetail, error) { //getting latest wfr from pipelineId wfr, err := impl.cdWorkflowRepository.FindLastStatusByPipelineIdAndRunnerType(pipelineId, bean.CD_WORKFLOW_TYPE_DEPLOY) if err != nil { impl.logger.Errorw("error in getting latest deploy stage wfr by pipelineId", "err", err, "pipelineId", pipelineId) return nil, err } - deployedConfig, err := impl.GetAllDeployedConfigurationByPipelineIdAndWfrId(pipelineId, wfr.Id, userHasAdminAccess) + deployedConfig, err := impl.GetAllDeployedConfigurationByPipelineIdAndWfrId(ctx, pipelineId, wfr.Id, userHasAdminAccess) if err != nil { impl.logger.Errorw("error in getting GetAllDeployedConfigurationByPipelineIdAndWfrId", "err", err, "pipelineID", pipelineId, "wfrId", wfr.Id) return nil, err @@ -161,9 +162,9 @@ func (impl *DeployedConfigurationHistoryServiceImpl) GetAllDeployedConfiguration deployedConfig.WfrId = wfr.Id return deployedConfig, nil } -func (impl *DeployedConfigurationHistoryServiceImpl) GetAllDeployedConfigurationByPipelineIdAndWfrId(pipelineId, wfrId int, userHasAdminAccess bool) (*AllDeploymentConfigurationDetail, error) { +func (impl *DeployedConfigurationHistoryServiceImpl) GetAllDeployedConfigurationByPipelineIdAndWfrId(ctx context.Context, pipelineId, wfrId int, userHasAdminAccess bool) (*AllDeploymentConfigurationDetail, error) { //getting history of deployment template for latest deployment - deploymentTemplateHistory, err := impl.deploymentTemplateHistoryService.GetDeployedHistoryByPipelineIdAndWfrId(pipelineId, wfrId) + deploymentTemplateHistory, err := impl.deploymentTemplateHistoryService.GetDeployedHistoryByPipelineIdAndWfrId(ctx, pipelineId, wfrId) if err != nil && err != pg.ErrNoRows { impl.logger.Errorw("error in getting deployment template history by pipelineId and wfrId", "err", err, "pipelineId", pipelineId, "wfrId", wfrId) return nil, err diff --git a/pkg/pipeline/history/DeploymentTemplateHistoryService.go b/pkg/pipeline/history/DeploymentTemplateHistoryService.go index 7544df13b2..4dab5399be 100644 --- a/pkg/pipeline/history/DeploymentTemplateHistoryService.go +++ b/pkg/pipeline/history/DeploymentTemplateHistoryService.go @@ -1,6 +1,7 @@ package history import ( + "context" "encoding/json" repository2 "github.com/devtron-labs/devtron/internal/sql/repository" "github.com/devtron-labs/devtron/internal/sql/repository/chartConfig" @@ -12,6 +13,7 @@ import ( "github.com/devtron-labs/devtron/pkg/variables" "github.com/devtron-labs/devtron/pkg/variables/parsers" repository6 "github.com/devtron-labs/devtron/pkg/variables/repository" + "github.com/devtron-labs/devtron/util" "github.com/go-pg/pg" "go.uber.org/zap" "time" @@ -23,12 +25,12 @@ type DeploymentTemplateHistoryService interface { CreateDeploymentTemplateHistoryForDeploymentTrigger(pipeline *pipelineConfig.Pipeline, envOverride *chartConfig.EnvConfigOverride, renderedImageTemplate string, deployedOn time.Time, deployedBy int32) (*repository.DeploymentTemplateHistory, error) GetDeploymentDetailsForDeployedTemplateHistory(pipelineId, offset, limit int) ([]*DeploymentTemplateHistoryDto, error) - GetHistoryForDeployedTemplateById(id, pipelineId int) (*HistoryDetailDto, error) + GetHistoryForDeployedTemplateById(ctx context.Context, id int, pipelineId int) (*HistoryDetailDto, error) CheckIfHistoryExistsForPipelineIdAndWfrId(pipelineId, wfrId int) (historyId int, exists bool, err error) GetDeployedHistoryList(pipelineId, baseConfigId int) ([]*DeployedHistoryComponentMetadataDto, error) // used for rollback - GetDeployedHistoryByPipelineIdAndWfrId(pipelineId, wfrId int) (*HistoryDetailDto, error) + GetDeployedHistoryByPipelineIdAndWfrId(ctx context.Context, pipelineId, wfrId int) (*HistoryDetailDto, error) } type DeploymentTemplateHistoryServiceImpl struct { @@ -43,6 +45,7 @@ type DeploymentTemplateHistoryServiceImpl struct { cdWorkflowRepository pipelineConfig.CdWorkflowRepository variableSnapshotHistoryService variables.VariableSnapshotHistoryService variableTemplateParser parsers.VariableTemplateParser + scopedVariableService variables.ScopedVariableService } func NewDeploymentTemplateHistoryServiceImpl(logger *zap.SugaredLogger, deploymentTemplateHistoryRepository repository.DeploymentTemplateHistoryRepository, @@ -54,7 +57,9 @@ func NewDeploymentTemplateHistoryServiceImpl(logger *zap.SugaredLogger, deployme userService user.UserService, cdWorkflowRepository pipelineConfig.CdWorkflowRepository, variableSnapshotHistoryService variables.VariableSnapshotHistoryService, - variableTemplateParser parsers.VariableTemplateParser) *DeploymentTemplateHistoryServiceImpl { + variableTemplateParser parsers.VariableTemplateParser, + scopedVariableService variables.ScopedVariableService, +) *DeploymentTemplateHistoryServiceImpl { return &DeploymentTemplateHistoryServiceImpl{ logger: logger, deploymentTemplateHistoryRepository: deploymentTemplateHistoryRepository, @@ -67,6 +72,7 @@ func NewDeploymentTemplateHistoryServiceImpl(logger *zap.SugaredLogger, deployme cdWorkflowRepository: cdWorkflowRepository, variableSnapshotHistoryService: variableSnapshotHistoryService, variableTemplateParser: variableTemplateParser, + scopedVariableService: scopedVariableService, } } @@ -312,7 +318,7 @@ func (impl DeploymentTemplateHistoryServiceImpl) CheckIfHistoryExistsForPipeline return history.Id, true, nil } -func (impl DeploymentTemplateHistoryServiceImpl) GetDeployedHistoryByPipelineIdAndWfrId(pipelineId, wfrId int) (*HistoryDetailDto, error) { +func (impl DeploymentTemplateHistoryServiceImpl) GetDeployedHistoryByPipelineIdAndWfrId(ctx context.Context, pipelineId, wfrId int) (*HistoryDetailDto, error) { impl.logger.Debugw("received request, GetDeployedHistoryByPipelineIdAndWfrId", "pipelineId", pipelineId, "wfrId", wfrId) //checking if history exists for pipelineId and wfrId @@ -322,10 +328,14 @@ func (impl DeploymentTemplateHistoryServiceImpl) GetDeployedHistoryByPipelineIdA return nil, err } - variableSnapshotMap, err := impl.getVariableSnapshot(history.Id) + isSuperAdmin, err := util.GetIsSuperAdminFromContext(ctx) if err != nil { return nil, err } + variableSnapshotMap, resolvedTemplate, err := impl.getVariableSnapshotAndResolveTemplate(history.Template, history.Id, isSuperAdmin) + if err != nil { + impl.logger.Errorw("error while resolving template from history", "err", err, "wfrId", wfrId, "pipelineID", pipelineId) + } historyDto := &HistoryDetailDto{ TemplateName: history.TemplateName, @@ -335,28 +345,53 @@ func (impl DeploymentTemplateHistoryServiceImpl) GetDeployedHistoryByPipelineIdA DisplayName: "values.yaml", Value: history.Template, }, - VariableSnapshot: variableSnapshotMap, + VariableSnapshot: variableSnapshotMap, + ResolvedTemplateData: resolvedTemplate, } return historyDto, nil } -func (impl DeploymentTemplateHistoryServiceImpl) getVariableSnapshot(historyId int) (map[string]string, error) { +func (impl DeploymentTemplateHistoryServiceImpl) getVariableSnapshotAndResolveTemplate(template string, historyId int, isSuperAdmin bool) (map[string]string, string, error) { reference := repository6.HistoryReference{ HistoryReferenceId: historyId, HistoryReferenceType: repository6.HistoryReferenceTypeDeploymentTemplate, } + variableSnapshotMap := make(map[string]string) references, err := impl.variableSnapshotHistoryService.GetVariableHistoryForReferences([]repository6.HistoryReference{reference}) if err != nil { - return nil, err + return variableSnapshotMap, template, err } - variableSnapshotMap := make(map[string]string) + if _, ok := references[reference]; ok { err = json.Unmarshal(references[reference].VariableSnapshot, &variableSnapshotMap) if err != nil { - return nil, err + return variableSnapshotMap, template, err } } - return variableSnapshotMap, nil + + if len(variableSnapshotMap) == 0 { + return variableSnapshotMap, template, err + } + + varNames := make([]string, 0) + for varName, _ := range variableSnapshotMap { + varNames = append(varNames, varName) + } + varNameToIsSensitive, err := impl.scopedVariableService.CheckForSensitiveVariables(varNames) + if err != nil { + return variableSnapshotMap, template, err + } + + scopedVariableData := parsers.GetScopedVarData(variableSnapshotMap, varNameToIsSensitive, isSuperAdmin) + request := parsers.VariableParserRequest{Template: template, TemplateType: parsers.JsonVariableTemplate, Variables: scopedVariableData, IgnoreUnknownVariables: true} + parserResponse := impl.variableTemplateParser.ParseTemplate(request) + err = parserResponse.Error + if err != nil { + return variableSnapshotMap, template, err + } + resolvedTemplate := parserResponse.ResolvedTemplate + + return variableSnapshotMap, resolvedTemplate, nil } func (impl DeploymentTemplateHistoryServiceImpl) GetDeployedHistoryList(pipelineId, baseConfigId int) ([]*DeployedHistoryComponentMetadataDto, error) { @@ -380,29 +415,21 @@ func (impl DeploymentTemplateHistoryServiceImpl) GetDeployedHistoryList(pipeline return historyList, nil } -func (impl DeploymentTemplateHistoryServiceImpl) GetHistoryForDeployedTemplateById(id, pipelineId int) (*HistoryDetailDto, error) { +func (impl DeploymentTemplateHistoryServiceImpl) GetHistoryForDeployedTemplateById(ctx context.Context, id int, pipelineId int) (*HistoryDetailDto, error) { history, err := impl.deploymentTemplateHistoryRepository.GetHistoryForDeployedTemplateById(id, pipelineId) if err != nil { impl.logger.Errorw("error in getting deployment template history", "err", err, "id", id, "pipelineId", pipelineId) return nil, err } - variableSnapshotMap, err := impl.getVariableSnapshot(history.Id) + isSuperAdmin, err := util.GetIsSuperAdminFromContext(ctx) if err != nil { return nil, err } - resolvedTemplate := history.Template - if len(variableSnapshotMap) > 0 { - scopedVariableData := parsers.GetScopedVarData(variableSnapshotMap) - request := parsers.VariableParserRequest{Template: history.Template, TemplateType: parsers.JsonVariableTemplate, Variables: scopedVariableData, IgnoreUnknownVariables: true} - parserResponse := impl.variableTemplateParser.ParseTemplate(request) - err = parserResponse.Error - if err != nil { - return nil, err - } - resolvedTemplate = parserResponse.ResolvedTemplate + variableSnapshotMap, resolvedTemplate, err := impl.getVariableSnapshotAndResolveTemplate(history.Template, history.Id, isSuperAdmin) + if err != nil { + impl.logger.Errorw("error while resolving template from history", "err", err, "id", id, "pipelineID", pipelineId) } - historyDto := &HistoryDetailDto{ TemplateName: history.TemplateName, TemplateVersion: history.TemplateVersion, @@ -411,8 +438,8 @@ func (impl DeploymentTemplateHistoryServiceImpl) GetHistoryForDeployedTemplateBy DisplayName: "values.yaml", Value: history.Template, }, - VariableSnapshot: variableSnapshotMap, - ResolvedTemplate: resolvedTemplate, + VariableSnapshot: variableSnapshotMap, + ResolvedTemplateData: resolvedTemplate, } return historyDto, nil } diff --git a/pkg/pipeline/history/bean.go b/pkg/pipeline/history/bean.go index 80392c60c3..740d2747e4 100644 --- a/pkg/pipeline/history/bean.go +++ b/pkg/pipeline/history/bean.go @@ -50,16 +50,16 @@ type HistoryDetailDto struct { PipelineTriggerType pipelineConfig.TriggerType `json:"pipelineTriggerType,omitempty"` Strategy string `json:"strategy,omitempty"` //for configmap and secret - Type string `json:"type,omitempty"` - External *bool `json:"external,omitempty"` - MountPath string `json:"mountPath,omitempty"` - ExternalSecretType string `json:"externalType,omitempty"` - RoleARN string `json:"roleARN,omitempty"` - SubPath *bool `json:"subPath,omitempty"` - FilePermission string `json:"filePermission,omitempty"` - CodeEditorValue *HistoryDetailConfig `json:"codeEditorValue"` - VariableSnapshot map[string]string `json:"variableSnapshot"` - ResolvedTemplate string `json:"-"` + Type string `json:"type,omitempty"` + External *bool `json:"external,omitempty"` + MountPath string `json:"mountPath,omitempty"` + ExternalSecretType string `json:"externalType,omitempty"` + RoleARN string `json:"roleARN,omitempty"` + SubPath *bool `json:"subPath,omitempty"` + FilePermission string `json:"filePermission,omitempty"` + CodeEditorValue *HistoryDetailConfig `json:"codeEditorValue"` + VariableSnapshot map[string]string `json:"variableSnapshot"` + ResolvedTemplateData string `json:"resolvedTemplateData"` } type HistoryDetailConfig struct { diff --git a/pkg/pipeline/repository/PipelineStageRepository.go b/pkg/pipeline/repository/PipelineStageRepository.go index 56ce81b330..360a69840b 100644 --- a/pkg/pipeline/repository/PipelineStageRepository.go +++ b/pkg/pipeline/repository/PipelineStageRepository.go @@ -29,7 +29,7 @@ const ( PIPELINE_STAGE_STEP_VARIABLE_VALUE_TYPE_GLOBAL PipelineStageStepVariableValueType = "GLOBAL" PIPELINE_STAGE_STEP_CONDITION_TYPE_SKIP PipelineStageStepConditionType = "SKIP" PIPELINE_STAGE_STEP_CONDITION_TYPE_TRIGGER PipelineStageStepConditionType = "TRIGGER" - PIPELINE_STAGE_STEP_CONDITION_TYPE_SUCCESS PipelineStageStepConditionType = "SUCCESS" + PIPELINE_STAGE_STEP_CONDITION_TYPE_SUCCESS PipelineStageStepConditionType = "PASS" PIPELINE_STAGE_STEP_CONDITION_TYPE_FAIL PipelineStageStepConditionType = "FAIL" PIPELINE_STAGE_STEP_VARIABLE_FORMAT_TYPE_STRING PipelineStageStepVariableFormatType = "STRING" PIPELINE_STAGE_STEP_VARIABLE_FORMAT_TYPE_NUMBER PipelineStageStepVariableFormatType = "NUMBER" diff --git a/pkg/resourceQualifiers/bean.go b/pkg/resourceQualifiers/bean.go index fc6fe6e431..09a235e226 100644 --- a/pkg/resourceQualifiers/bean.go +++ b/pkg/resourceQualifiers/bean.go @@ -13,6 +13,7 @@ type SystemMetadata struct { ClusterName string Namespace string ImageTag string + Image string AppName string } @@ -28,6 +29,8 @@ func (metadata *SystemMetadata) GetDataFromSystemVariable(variable SystemVariabl return metadata.ImageTag case DevtronAppName: return metadata.AppName + case DevtronImage: + return metadata.Image } return "" } diff --git a/pkg/resourceQualifiers/constants.go b/pkg/resourceQualifiers/constants.go index f3541b84e0..cf0644e782 100644 --- a/pkg/resourceQualifiers/constants.go +++ b/pkg/resourceQualifiers/constants.go @@ -7,7 +7,15 @@ const ( DevtronClusterName SystemVariableName = "DEVTRON_CLUSTER_NAME" DevtronEnvName SystemVariableName = "DEVTRON_ENV_NAME" DevtronImageTag SystemVariableName = "DEVTRON_IMAGE_TAG" + DevtronImage SystemVariableName = "DEVTRON_IMAGE" DevtronAppName SystemVariableName = "DEVTRON_APP_NAME" ) -var SystemVariables = []SystemVariableName{DevtronNamespace, DevtronClusterName, DevtronEnvName, DevtronImageTag, DevtronAppName} +var SystemVariables = []SystemVariableName{ + DevtronNamespace, + DevtronClusterName, + DevtronEnvName, + DevtronImageTag, + DevtronAppName, + DevtronImage, +} diff --git a/pkg/user/casbin/rbac.go b/pkg/user/casbin/rbac.go index 189f6a3505..edc050169e 100644 --- a/pkg/user/casbin/rbac.go +++ b/pkg/user/casbin/rbac.go @@ -115,8 +115,7 @@ func (e *EnforcerImpl) Enforce(token string, resource string, action string, res } func (e *EnforcerImpl) EnforceByEmail(emailId string, resource string, action string, resourceItem string) bool { - allowed := e.enforceByEmail(emailId, resource, action, resourceItem) - return allowed + return e.enforceByEmail(emailId, resource, action, strings.ToLower(resourceItem)) } func (e *EnforcerImpl) ReloadPolicy() error { @@ -167,6 +166,10 @@ func (e *EnforcerImpl) EnforceByEmailInBatch(emailId string, resource string, ac batchRequestLock.Lock() defer batchRequestLock.Unlock() + for index, val := range vals { + vals[index] = strings.ToLower(val) + } + var metrics = make(map[int]int64) result, notFoundItemList := e.batchEnforceFromCache(emailId, resource, action, vals) if len(result) > 0 { diff --git a/pkg/util/artifact-utils.go b/pkg/util/artifact-utils.go new file mode 100644 index 0000000000..7841d6e117 --- /dev/null +++ b/pkg/util/artifact-utils.go @@ -0,0 +1,12 @@ +package util + +import "strings" + +func GetImageTagFromImage(image string) string { + parts := strings.Split(image, ":") + + if len(parts) < 1 { + return "" + } + return parts[len(parts)-1] +} diff --git a/pkg/variables/ScopedVariableService.go b/pkg/variables/ScopedVariableService.go index 692d1b42f6..03de59e610 100644 --- a/pkg/variables/ScopedVariableService.go +++ b/pkg/variables/ScopedVariableService.go @@ -2,6 +2,7 @@ package variables import ( "fmt" + "github.com/argoproj/argo-workflows/v3/errors" "github.com/caarlos0/env" "github.com/devtron-labs/devtron/pkg/devtronResource" "github.com/devtron-labs/devtron/pkg/resourceQualifiers" @@ -22,8 +23,10 @@ import ( type ScopedVariableService interface { CreateVariables(payload models.Payload) error - GetScopedVariables(scope resourceQualifiers.Scope, varNames []string, maskSensitiveData bool) (scopedVariableDataObj []*models.ScopedVariableData, err error) + GetScopedVariables(scope resourceQualifiers.Scope, varNames []string, unmaskSensitiveData bool) (scopedVariableDataObj []*models.ScopedVariableData, err error) GetJsonForVariables() (*models.Payload, error) + CheckForSensitiveVariables(variableNames []string) (map[string]bool, error) + GetFormattedVariableForName(name string) string } type ScopedVariableServiceImpl struct { @@ -56,6 +59,7 @@ type VariableConfig struct { VariableNameRegex string `env:"SCOPED_VARIABLE_NAME_REGEX" envDefault:"^[a-zA-Z][a-zA-Z0-9_-]{0,62}[a-zA-Z0-9]$"` VariableCacheEnabled bool `env:"VARIABLE_CACHE_ENABLED" envDefault:"true"` SystemVariablePrefix string `env:"SYSTEM_VAR_PREFIX" envDefault:"DEVTRON_"` + ScopedVariableFormat string `env:"SCOPED_VARIABLE_FORMAT" envDefault:"@{{%s}}"` } func loadVariableCache(cfg *VariableConfig, service *ScopedVariableServiceImpl) { @@ -86,6 +90,43 @@ func (impl *ScopedVariableServiceImpl) loadVarCache() { impl.logger.Info("variable cache loaded successfully") } +func (impl *ScopedVariableServiceImpl) GetFormattedVariableForName(name string) string { + return fmt.Sprintf(impl.VariableNameConfig.ScopedVariableFormat, name) +} + +func (impl *ScopedVariableServiceImpl) CheckForSensitiveVariables(variableNames []string) (map[string]bool, error) { + + // getting all variables from cache + allVariableDefinitions := impl.VariableCache.GetData() + + var err error + // cache is not loaded get from repo + if allVariableDefinitions == nil { + allVariableDefinitions, err = impl.scopedVariableRepository.GetVariableTypeForVariableNames(variableNames) + if err != nil { + return nil, errors.Wrap(err, "400", "error in fetching variable type") + } + } + + variableNameToType := make(map[string]models.VariableType) + for _, definition := range allVariableDefinitions { + variableNameToType[definition.Name] = definition.VarType + } + + varNameToIsSensitive := make(map[string]bool) + for _, name := range variableNames { + + // by default all variables are marked sensitive to handle deleted variables + // only super admin will be able to see the values once variable is deleted from system + if varType, ok := variableNameToType[name]; ok { + varNameToIsSensitive[name] = varType.IsTypeSensitive() + } else { + varNameToIsSensitive[name] = true + } + } + return varNameToIsSensitive, nil +} + func (impl *ScopedVariableServiceImpl) CreateVariables(payload models.Payload) error { err, _ := impl.isValidPayload(payload) if err != nil { @@ -319,7 +360,7 @@ func (impl *ScopedVariableServiceImpl) selectScopeForCompoundQualifier(scopes [] return selectedParentScope } -func (impl *ScopedVariableServiceImpl) GetScopedVariables(scope resourceQualifiers.Scope, varNames []string, maskSensitiveData bool) (scopedVariableDataObj []*models.ScopedVariableData, err error) { +func (impl *ScopedVariableServiceImpl) GetScopedVariables(scope resourceQualifiers.Scope, varNames []string, unmaskSensitiveData bool) (scopedVariableDataObj []*models.ScopedVariableData, err error) { //populating system variables from system metadata var systemVariableData, allSystemVariables []*models.ScopedVariableData @@ -336,7 +377,7 @@ func (impl *ScopedVariableServiceImpl) GetScopedVariables(scope resourceQualifie return scopedVariableDataObj, nil } - // Need to get from repo for isSensitive even if cache is loaded since cache only contains metadata + // Cache is not loaded if allVariableDefinitions == nil { allVariableDefinitions, err = impl.scopedVariableRepository.GetAllVariables() @@ -356,18 +397,22 @@ func (impl *ScopedVariableServiceImpl) GetScopedVariables(scope resourceQualifie } variableIds := make([]int, 0) - variableIdToDefinition := make(map[int]*repository2.VariableDefinition) for _, definition := range variableDefinitions { variableIds = append(variableIds, definition.Id) - variableIdToDefinition[definition.Id] = definition } - // This to prevent corner case where no variables were found for the provided names if len(varNames) > 0 && len(variableIds) == 0 { return scopedVariableDataObj, nil } - varScope, err := impl.qualifierMappingService.GetQualifierMappings(resourceQualifiers.Variable, &scope, variableIds) + allVariableIds := make([]int, 0) + variableIdToDefinition := make(map[int]*repository2.VariableDefinition) + for _, definition := range allVariableDefinitions { + allVariableIds = append(allVariableIds, definition.Id) + variableIdToDefinition[definition.Id] = definition + } + + varScope, err := impl.qualifierMappingService.GetQualifierMappings(resourceQualifiers.Variable, &scope, allVariableIds) if err != nil { impl.logger.Errorw("error in getting varScope", "err", err) return nil, err @@ -405,8 +450,8 @@ func (impl *ScopedVariableServiceImpl) GetScopedVariables(scope resourceQualifie var varValue *models.VariableValue var isRedacted bool - if !maskSensitiveData && variableIdToDefinition[varId].VarType == models.PRIVATE { - varValue = &models.VariableValue{Value: ""} + if !unmaskSensitiveData && variableIdToDefinition[varId].VarType == models.PRIVATE { + varValue = &models.VariableValue{Value: models.HiddenValue} isRedacted = true } else { varValue = &models.VariableValue{Value: value} @@ -420,20 +465,30 @@ func (impl *ScopedVariableServiceImpl) GetScopedVariables(scope resourceQualifie scopedVariableDataObj = append(scopedVariableDataObj, scopedVariableData) } + allScopedVariableDataObj := scopedVariableDataObj + usedScopedVariableDataObj := make([]*models.ScopedVariableData, 0) + for _, data := range scopedVariableDataObj { + if varNames == nil || slices.Contains(varNames, data.VariableName) { + usedScopedVariableDataObj = append(usedScopedVariableDataObj, data) + } + } + //adding variable def for variables which don't have any scoped data defined // This only happens when passed var names is null (called from UI to get all variables with or without data) if varNames == nil { for _, definition := range allVariableDefinitions { if !slices.Contains(foundVarIds, definition.Id) { - scopedVariableDataObj = append(scopedVariableDataObj, &models.ScopedVariableData{ + usedScopedVariableDataObj = append(usedScopedVariableDataObj, &models.ScopedVariableData{ VariableName: definition.Name, ShortDescription: definition.ShortDescription, }) } } } - impl.deduceVariables(scopedVariableDataObj, allSystemVariables) - return scopedVariableDataObj, err + + allScopedVariableDataObj = append(allScopedVariableDataObj, allSystemVariables...) + impl.deduceVariables(usedScopedVariableDataObj, allScopedVariableDataObj) + return usedScopedVariableDataObj, err } func resolveExpressionWithVariableValues(expr string, varNameToData map[string]*models.ScopedVariableData) (string, error) { diff --git a/pkg/variables/ScopedVariableValidator.go b/pkg/variables/ScopedVariableValidator.go index cffe5983ab..8bb674b960 100644 --- a/pkg/variables/ScopedVariableValidator.go +++ b/pkg/variables/ScopedVariableValidator.go @@ -30,6 +30,11 @@ func (impl *ScopedVariableServiceImpl) isValidPayload(payload models.Payload) (e variableNamesList = append(variableNamesList, variable.Definition.VarName) uniqueVariableMap := make(map[string]interface{}) for _, attributeValue := range variable.AttributeValues { + + if !utils.IsStringType(attributeValue.VariableValue.Value) && variable.Definition.VarType.IsTypeSensitive() { + return models.ValidationError{Err: fmt.Errorf("data type other than string cannot be sensitive")}, false + } + validIdentifierTypeList := helper.GetIdentifierTypeFromAttributeType(attributeValue.AttributeType) if len(validIdentifierTypeList) != len(attributeValue.AttributeParams) { return models.ValidationError{Err: fmt.Errorf("attribute selectors are not valid for given category %s", attributeValue.AttributeType)}, false diff --git a/pkg/variables/models/variable-payload.go b/pkg/variables/models/variable-payload.go index 3ee5e522b6..d5a9885afb 100644 --- a/pkg/variables/models/variable-payload.go +++ b/pkg/variables/models/variable-payload.go @@ -42,6 +42,9 @@ const ( PRIMITIVE_TYPE DataType = "primitive" ) +const HiddenValue = "hidden-value" +const UndefinedValue = "undefined-variable-value" + func (variableType VariableType) IsTypeSensitive() bool { if variableType == PRIVATE { return true @@ -74,3 +77,17 @@ func (value VariableValue) StringValue() string { } return value.Value.(string) } + +func GetInterfacedValue(input string) interface{} { + var interfaceValue interface{} + if intValue, err := strconv.Atoi(input); err == nil { + interfaceValue = intValue + } else if floatValue, err := strconv.ParseFloat(input, 64); err == nil { + interfaceValue = floatValue + } else if boolValue, err := strconv.ParseBool(input); err == nil { + interfaceValue = boolValue + } else { + interfaceValue = input + } + return interfaceValue +} diff --git a/pkg/variables/parsers/VariableTemplateParser.go b/pkg/variables/parsers/VariableTemplateParser.go index a745e18441..6fab245a6c 100644 --- a/pkg/variables/parsers/VariableTemplateParser.go +++ b/pkg/variables/parsers/VariableTemplateParser.go @@ -40,12 +40,14 @@ func NewVariableTemplateParserImpl(logger *zap.SugaredLogger) (*VariableTemplate return impl, nil } -const VariableRegex = `@\{\{[a-zA-Z0-9-+/*%_\s]+\}\}` -const VariableSubRegexWithQuotes = `\"@{{([a-zA-Z0-9-+/*%_\s]+)}}\"` - type VariableTemplateParserConfig struct { - ScopedVariableEnabled bool `env:"SCOPED_VARIABLE_ENABLED" envDefault:"false"` - ScopedVariableHandlePrimitives bool `env:"SCOPED_VARIABLE_HANDLE_PRIMITIVES" envDefault:"false"` + ScopedVariableEnabled bool `env:"SCOPED_VARIABLE_ENABLED" envDefault:"false"` + ScopedVariableHandlePrimitives bool `env:"SCOPED_VARIABLE_HANDLE_PRIMITIVES" envDefault:"false"` + VariableExpressionRegex string `env:"VARIABLE_EXPRESSION_REGEX" envDefault:"@{{([^}]+)}}"` +} + +func (cfg VariableTemplateParserConfig) isScopedVariablesDisabled() bool { + return !cfg.ScopedVariableEnabled } func getVariableTemplateParserConfig() (*VariableTemplateParserConfig, error) { @@ -54,10 +56,22 @@ func getVariableTemplateParserConfig() (*VariableTemplateParserConfig, error) { return cfg, err } -func preProcessPlaceholder(template string, variableValueMap map[string]interface{}) string { +func getRegexSubMatches(regex string, input string) [][]string { + re := regexp.MustCompile(regex) + matches := re.FindAllStringSubmatch(input, -1) + return matches +} + +const quote = "\"" +const escapedQuote = `\\"` + +func (impl *VariableTemplateParserImpl) preProcessPlaceholder(template string, variableValueMap map[string]interface{}) string { - re := regexp.MustCompile(VariableSubRegexWithQuotes) - matches := re.FindAllStringSubmatch(template, -1) + variableSubRegexWithQuotes := quote + impl.variableTemplateParserConfig.VariableExpressionRegex + quote + variableSubRegexWithEscapedQuotes := escapedQuote + impl.variableTemplateParserConfig.VariableExpressionRegex + escapedQuote + + matches := getRegexSubMatches(variableSubRegexWithQuotes, template) + matches = append(matches, getRegexSubMatches(variableSubRegexWithEscapedQuotes, template)...) // Replace the surrounding quotes for variables whose value is known // and type is primitive @@ -76,30 +90,23 @@ func preProcessPlaceholder(template string, variableValueMap map[string]interfac func (impl *VariableTemplateParserImpl) ParseTemplate(parserRequest VariableParserRequest) VariableParserResponse { - if !impl.variableTemplateParserConfig.ScopedVariableEnabled { - return VariableParserResponse{ - Request: parserRequest, - ResolvedTemplate: parserRequest.Template, - } + if impl.variableTemplateParserConfig.isScopedVariablesDisabled() { + return parserRequest.GetEmptyResponse() } - - if impl.variableTemplateParserConfig.ScopedVariableHandlePrimitives && parserRequest.TemplateType == JsonVariableTemplate { - - var variableToValue = make(map[string]interface{}, 0) - for _, variable := range parserRequest.Variables { - variableToValue[variable.VariableName] = variable.VariableValue.Value - } - template := preProcessPlaceholder(parserRequest.Template, variableToValue) - request := VariableParserRequest{ - TemplateType: StringVariableTemplate, - Template: template, - Variables: parserRequest.Variables, - IgnoreUnknownVariables: parserRequest.IgnoreUnknownVariables, - } - return impl.parseTemplate(request) - } else { - return impl.parseTemplate(parserRequest) + request := parserRequest + if impl.handlePrimitivesForJson(parserRequest) { + variableToValue := parserRequest.GetOriginalValuesMap() + template := impl.preProcessPlaceholder(parserRequest.Template, variableToValue) + + //overriding request to handle primitives in json request + request.TemplateType = StringVariableTemplate + request.Template = template } + return impl.parseTemplate(request) +} + +func (impl *VariableTemplateParserImpl) handlePrimitivesForJson(parserRequest VariableParserRequest) bool { + return impl.variableTemplateParserConfig.ScopedVariableHandlePrimitives && parserRequest.TemplateType == JsonVariableTemplate } func (impl *VariableTemplateParserImpl) ExtractVariables(template string, templateType VariableTemplateType) ([]string, error) { @@ -275,6 +282,7 @@ func (impl *VariableTemplateParserImpl) getDefaultMappedFunc() map[string]functi "upper": stdlib.UpperFunc, "toInt": stdlib.IntFunc, "toBool": ParseBoolFunc, + "split": stdlib.SplitFunc, } } @@ -315,7 +323,7 @@ func (impl *VariableTemplateParserImpl) diluteExistingHclVars(template string, t func (impl *VariableTemplateParserImpl) convertToHclExpression(template string) string { - var devtronRegexCompiledPattern = regexp.MustCompile(VariableRegex) //TODO KB: add support of Braces () also + var devtronRegexCompiledPattern = regexp.MustCompile(impl.variableTemplateParserConfig.VariableExpressionRegex) indexesData := devtronRegexCompiledPattern.FindAllIndex([]byte(template), -1) var strBuilder strings.Builder strBuilder.Grow(len(template)) diff --git a/pkg/variables/parsers/bean.go b/pkg/variables/parsers/bean.go index c883449286..3d9cc4db84 100644 --- a/pkg/variables/parsers/bean.go +++ b/pkg/variables/parsers/bean.go @@ -24,6 +24,13 @@ type VariableParserRequest struct { IgnoreUnknownVariables bool } +func (request VariableParserRequest) GetEmptyResponse() VariableParserResponse { + return VariableParserResponse{ + Request: request, + ResolvedTemplate: request.Template, + } +} + type VariableParserResponse struct { Request VariableParserRequest ResolvedTemplate string @@ -40,10 +47,23 @@ func (request VariableParserRequest) GetValuesMap() map[string]string { return variablesMap } -func GetScopedVarData(varData map[string]string) []*models.ScopedVariableData { +func (request VariableParserRequest) GetOriginalValuesMap() map[string]interface{} { + var variableToValue = make(map[string]interface{}, 0) + for _, variable := range request.Variables { + variableToValue[variable.VariableName] = variable.VariableValue.Value + } + return variableToValue +} + +func GetScopedVarData(varData map[string]string, nameToIsSensitive map[string]bool, isSuperAdmin bool) []*models.ScopedVariableData { scopedVarData := make([]*models.ScopedVariableData, 0) for key, value := range varData { - scopedVarData = append(scopedVarData, &models.ScopedVariableData{VariableName: key, VariableValue: &models.VariableValue{Value: value}}) + + finalValue := value + if !isSuperAdmin && nameToIsSensitive[key] { + finalValue = models.HiddenValue + } + scopedVarData = append(scopedVarData, &models.ScopedVariableData{VariableName: key, VariableValue: &models.VariableValue{Value: models.GetInterfacedValue(finalValue)}}) } return scopedVarData } diff --git a/pkg/variables/repository/ScopedVariableRepository.go b/pkg/variables/repository/ScopedVariableRepository.go index 4615c878bf..fc6023a48b 100644 --- a/pkg/variables/repository/ScopedVariableRepository.go +++ b/pkg/variables/repository/ScopedVariableRepository.go @@ -23,6 +23,8 @@ type ScopedVariableRepository interface { // Delete DeleteVariables(auditLog sql.AuditLog, tx *pg.Tx) error + + GetVariableTypeForVariableNames(variableNames []string) ([]*VariableDefinition, error) } type ScopedVariableRepositoryImpl struct { @@ -73,6 +75,20 @@ func (impl *ScopedVariableRepositoryImpl) GetAllVariableMetadata() ([]*VariableD return variableDefinition, err } +func (impl *ScopedVariableRepositoryImpl) GetVariableTypeForVariableNames(variableNames []string) ([]*VariableDefinition, error) { + variableDefinition := make([]*VariableDefinition, 0) + err := impl. + dbConnection.Model(&variableDefinition). + Column("name", "var_type"). + Where("active = ?", true). + Where("name in (?)", pg.In(variableNames)). + Select() + if err == pg.ErrNoRows { + err = nil + } + return variableDefinition, err +} + func (impl *ScopedVariableRepositoryImpl) GetVariablesForVarIds(ids []int) ([]*VariableDefinition, error) { var variableDefinition []*VariableDefinition err := impl. diff --git a/releasenotes.md b/releasenotes.md index 56918bb5d4..e7cc215a5c 100644 --- a/releasenotes.md +++ b/releasenotes.md @@ -1,38 +1,74 @@ -## v0.6.22 +## v0.6.23 ## Bugs -- fix: updated adapter for cluster object (#3900) -- fix: rbac-modification for cluster list (#3767) -- fix: Helm app deployment history page breaking due to user details not found (#3873) -- fix: ci pip status query optmization (#3877) -- fix: migration script for virtual cluster v3 (#3870) -- fix: cloning app cmcs global boolean value fix (#3862) -- fix: Makefile correction (#3852) -- fix: deleting pipeline stage and related data if no stage steps are found (#3832) -- fix: Port number fix in helm app (#3843) -- fix: External cm and secret in jobs not getting added as env variable in container (#3815) -- fix: pre-cd pod not getting scheduled when node affinity is not present in external cluster. (#3806) -- fix: k8s permission and chart-group permission not getting deleted from orchestrator (#3824) -- fix: added missing audit logs while deleting cd pipeline (#3822) +- fix: DT19-v1 bug fixes (#3962) +- fix: ci pod request correction (#3980) +- fix: pipelineOverride id being sent instead of pipelineId (#3984) +- fix: Iam role handling script for plugin pull image from CR (#3955) +- fix: Deployment Template HCL parsing with % keyword (#4012) +- fix: handled releaseNotExists case for helm type cd pipeline resource tree fetch (#4016) +- fix: auto post cd not working in case of multiple parallel gitOps pipeline (#4018) +- fix: handled error in bulk trigger deploy (#4034) +- fix: The manager(non-admin user) of the application is unable to select a list of apps when assigning permissions (#4053) +- fix: ci job handling in app create api (#4054) +- fix: Deploying currently Active image using TriggerDeploy API from devtctl tool is broken (#4056) +- fix: Unable to delete ci pipeline in case you configure multi git (#4072) +- fix: env for specific deployment (#4085) +- fix: update build configuration fix (#4093) +- fix: Artifacts filter in CD trigger view (#4064) +- fix: Bugathon DT-19 version-2 fixes (#4105) +- fix: App Labels node selector not getting attach in ci-workflow (#4084) +- fix: Update cd pipeline create empty pre post cd steps (#4113) +- fix: normal Refresh after triggering gitops deployment to avoid sync delay in argo (#4066) +- fix: helm chart delete when no rows are found (#4124) +- fix: Unable to abort pre-cd and post-cd workflow (#4121) +- fix: Helm Apps permissions do not allow Terminal or Logs view (#4110) +- fix: port service mapping (#4132) ## Enhancements -- feat: added new statefulset-5-0-0 chart in reference chart (#3909) -- feat: added configurable provenance flag for buildx builds (#3905) -- feat: deployment history release not found err handling (#3811) -- feat: added validation for create app workflow API (#3842) -- feat: custom chart download (#3801) -- feat: Virtual cluster v3 (#3764) -- feat: Maintaining audit logs (#3763) -- feat: Capability to block deployments in case of vulnerabilities only if FIXED IN VERSION available (#3796) +- feat: Helm async install (#3856) +- feat: handle CI success event auto trigger in batch (#3951) +- feat: added env variable to skip gitops validation on create/update (#3956) +- feat: added flag to configure ecr repo creation (#3963) +- feat: Ability to change branch for all selected applications during bulk build from Application Groups (#3955) +- feat: Variables support in pre-post CI, CD and Jobs (#3911) +- feat: Poll Images from ECR Container Repository Plugin (#3971) +- feat: resource groups CRUD and environment filtering (#3974) +- feat: Scoped variables primitive handling (#4033) +- feat: adding DEVTRON_APP_NAME system variable for deployment template (#4041) +- feat: wf pod restart (#3892) +- feat: added deduction for system variables (#4075) +- feat: manifest comparision (#3844) +- feat: multiple images handling for single workflow for ECR Plugin Poll Images (#4027) +- feat: Jenkins plugin migration (#4039) +- feat: clone cd pipelines while cloning app across project (#4087) ## Documentation -- doc: draft version of Graviton benchmark (#3890) -- doc: Okta SSO Configuration Doc (#3876) +- doc: Glossary of jargonish terms for layman in the context of Devtron (#3820) +- docs: Ephemeral Container Doc (#3912) +- docs: New Image Alignment in Ephemeral doc (#3959) +- docs: Snapshot updation in PVC docs + PreBuild CI-CD (#3964) +- doc: Fixed issuer url in okta docs (#4062) +- docs: Config Approval Draft (#3981) +- docs: Modified Existing Container Registry Doc (#4048) +- docs: Added OCI Pull in Usecases (#4112) ## Others -- chore: changes for migration no conflict (#3919) -- chore: Changed in Docker file for SQL file (#3904) -- chore: adjust duplicate action threshold (#3879) -- chore: find potential-duplicate issues (#3858) -- chore: Update pr-issue-validator.yaml (#3849) - - +- chore: added workflow to escalate pager-duty issue (#3927) +- chore: changed loop from for to while (#3928) +- chore: scheduled escalate pager duty issue workflow (#3933) +- chore: added log config for dev mode (#3953) +- chore: minor correction in devtron reference charts (#3957) +- chore: workflow refactoring (#3714) +- chore: pr-issue-validator permissions fix (#3967) +- chore: added CODEOWNERS (#3966) +- chore: Scoped variable refactoring (#3977) +- chore: modified labels of keda autoscale object in deployment chart (#3999) +- chore: Update pr-issue-validator.yaml (#3854) +- chore: refactoring around PipelineBuilder (#4043) +- chore: moved k8s library to common-lib and added scripts for adding sshTunnel config to clusters (#3848) +- chore: Add pager-duty issue template (#3988) +- chore: first cut refactor ci-pipeline (#4091) +- chore: refactored appartifact manager and cimaterialconfigservice (#4096) +- chore: Remove the EnvVariablesFromFieldPath from values.yaml in refcharts (#4111) +- chore: Updated schema for Scope Variable (#4079) +- chore: skip validation for release PRs (#4128) diff --git a/scripts/devtron-reference-helm-charts/cronjob-chart_1-2-0/schema.json b/scripts/devtron-reference-helm-charts/cronjob-chart_1-2-0/schema.json index 5761da51aa..0a29c23603 100644 --- a/scripts/devtron-reference-helm-charts/cronjob-chart_1-2-0/schema.json +++ b/scripts/devtron-reference-helm-charts/cronjob-chart_1-2-0/schema.json @@ -11,7 +11,11 @@ "type": "object", "properties": { "envoyPort": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "envoy port for the container", "title": "Envoy Port" }, @@ -26,22 +30,38 @@ "title": "Name" }, "port": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "Port", "title": "port for the container" }, "servicePort": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "port of the corresponding kubernetes service", "title": "Service Port" }, "supportStreaming": { - "type": "boolean", + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "field to enable/disable timeout for high performance protocols like grpc", "title": "Support Streaming" }, "useHTTP2": { - "type": "boolean", + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": " field for setting if envoy container can accept(or not) HTTP2 requests", "title": "Use HTTP2" } @@ -55,7 +75,11 @@ "title": "Environment Variables" }, "GracePeriod": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "time for which Kubernetes waits before terminating the pods", "title": "Grace Period" }, @@ -76,7 +100,11 @@ "title": "Command" }, "failureThreshold": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "defines the maximum number of failures that are acceptable before a given container is not considered as live", "title": "Failure Threshold" }, @@ -98,17 +126,29 @@ } }, "initialDelaySeconds": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "defines the time to wait before a given container is checked for liveness", "title": "Initial Delay Seconds" }, "periodSeconds": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "defines the time to check a given container for liveness", "title": "Period Seconds" }, "port": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "port to access on the container", "title": "Port" }, @@ -118,34 +158,58 @@ "title": "Scheme" }, "successThreshold": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "defines the number of successes required before a given container is said to fulfil the liveness probe", "title": "Success Threshold" }, "tcp": { - "type": "boolean", + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "If enabled, the kubelet will attempt to open a socket to container. If connection is established, the container is considered healthy", "title": "TCP" }, "timeoutSeconds": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "defines the time for checking timeout", "title": "Timeout Seconds" } } }, "MaxSurge": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "maximum number of pods that can be created over the desired number of pods", "title": "Maximum Surge" }, "MaxUnavailable": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "maximum number of pods that can be unavailable during the update process", "title": "Maximum Unavailable" }, "MinReadySeconds": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "minimum number of seconds for which a newly created Pod should be ready without any of its containers crashing, for it to be considered available", "title": "Minimum Ready Seconds" }, @@ -166,7 +230,11 @@ "title": "Command" }, "failureThreshold": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "defines the maximum number of failures that are acceptable before a given container is not considered as ready", "title": "Failure Threshold" }, @@ -188,17 +256,29 @@ } }, "initialDelaySeconds": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "defines the time to wait before a given container is checked for readiness", "title": "Initial Delay Seconds" }, "periodSeconds": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "defines the time to check a given container for readiness", "title": "Period Seconds" }, "port": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "port to access on the container", "title": "Port" }, @@ -208,17 +288,29 @@ "title": "Scheme" }, "successThreshold": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "defines the number of successes required before a given container is said to fulfil the readiness probe", "title": "Success Threshold" }, "tcp": { - "type": "boolean", + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "If enabled, the kubelet will attempt to open a socket to container. If connection is established, the container is considered healthy", "title": "TCP" }, "timeoutSeconds": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "defines the time for checking timeout", "title": "Timeout Seconds" } @@ -264,7 +356,11 @@ "title": "Arguments", "properties": { "enabled": { - "type": "boolean", + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "used for enabling/disabling aruguments", "title": "Enabled" }, @@ -292,27 +388,47 @@ "title": "Autoscaling", "properties": { "MaxReplicas": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "Maximum number of replicas allowed for scaling", "title": "Maximum Replicas" }, "MinReplicas": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "Minimum number of replicas allowed for scaling", "title": "Minimum Replicas" }, "TargetCPUUtilizationPercentage": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "The target CPU utilization that is expected for a container", "title": "TargetCPUUtilizationPercentage" }, "TargetMemoryUtilizationPercentage": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "The target memory utilization that is expected for a container", "title": "TargetMemoryUtilizationPercentage" }, "enabled": { - "type": "boolean", + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "used for enabling/disabling autoscaling", "title": "Enabled" }, @@ -330,7 +446,11 @@ "title": "Command", "properties": { "enabled": { - "type": "boolean", + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "used for enabling/disabling commands" }, "value": { @@ -358,7 +478,11 @@ "enum": ["Forbid","Allow"] }, "failedJobsHistoryLimit": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "", "title": "Failed Job History Limit" }, @@ -374,17 +498,29 @@ "title": "Schedule" }, "startingDeadlineSeconds": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "if a CronJob controller cannot start a job run on its schedule, it will keep retrying until this value is reached", "title": "Starting Deadline Seconds" }, "successfulJobsHistoryLimit": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "specifies how many completed and failed jobs should be kept", "title": "Successful Jobs History Limit" }, "suspend": { - "type": "boolean", + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "used to suspend subsequent executions, does not apply on already started executions", "title": "Suspend", "default": false @@ -397,7 +533,11 @@ "title": "Db Migration Config", "properties": { "enabled": { - "type": "boolean", + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "used for enabling/disabling the config", "title": "Enabled" } @@ -510,7 +650,11 @@ } }, "enabled": { - "type": "boolean", + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "used to enable or disable ingress", "title": "Enabled" }, @@ -559,7 +703,11 @@ "title": "Annotations" }, "enabled": { - "type": "boolean", + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "used to enable or disable ingress", "title": "Enabled" }, @@ -617,42 +765,70 @@ "title": "Job Configs", "properties": { "activeDeadlineSeconds": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "once a Job reaches activeDeadlineSeconds, all of its running Pods are terminated", "title": "Active Deadline Seconds" }, "backoffLimit": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "number of retries after which a job is failed", "title": "BackOff Limit", "default": 6 }, "completions": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "used for getting fixed completion count Job", "title": "Completions" }, "parallelism": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "used to run aKubernetes Job with multiple parallel worker processes in a given pod", "title": "Parallelism", "default": 1 }, "suspend": { - "type": "boolean", + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "used to suspend subsequent executions, does not apply on already started executions", "title": "Suspend", "default": false }, "ttlSecondsAfterFinished": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "feature used for clean up of finished Jobs (Complete or Failed)", "title": "TTL Seconds After Finished" } } }, "pauseForSecondsBeforeSwitchActive": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "tell how much to wait for given period of time before switch active the container", "title": "Pause For Seconds Before SwitchActive" }, @@ -684,7 +860,11 @@ "title": "Raw YAML" }, "replicaCount": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "count of Replicas of pod", "title": "REplica Count" }, @@ -820,7 +1000,11 @@ "title": "Volumes" }, "waitForSecondsBeforeScalingDown": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "Wait for given period of time before scaling down the container", "title": "Wait For Seconds Before Scaling Down" } diff --git a/scripts/devtron-reference-helm-charts/deployment-chart_1-0-0/schema.json b/scripts/devtron-reference-helm-charts/deployment-chart_1-0-0/schema.json index 61a2b36a7b..c50940d861 100644 --- a/scripts/devtron-reference-helm-charts/deployment-chart_1-0-0/schema.json +++ b/scripts/devtron-reference-helm-charts/deployment-chart_1-0-0/schema.json @@ -16,7 +16,11 @@ "type": "object", "properties": { "envoyPort": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "envoy port for the container", "title": "Envoy Port" }, @@ -31,22 +35,38 @@ "title": "Name" }, "port": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "Port", "title": "port for the container" }, "servicePort": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "port of the corresponding kubernetes service", "title": "Service Port" }, "supportStreaming": { - "type": "boolean", + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "field to enable/disable timeout for high performance protocols like grpc", "title": "Support Streaming" }, "useHTTP2": { - "type": "boolean", + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": " field for setting if envoy container can accept(or not) HTTP2 requests", "title": "Use HTTP2" } @@ -82,7 +102,11 @@ ] }, "GracePeriod": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "time for which Kubernetes waits before terminating the pods", "title": "Grace Period" }, @@ -103,7 +127,11 @@ "title": "Command" }, "failureThreshold": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "defines the maximum number of failures that are acceptable before a given container is not considered as live", "title": "Failure Threshold" }, @@ -114,17 +142,29 @@ "title": "HTTP headers" }, "initialDelaySeconds": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "defines the time to wait before a given container is checked for liveness", "title": "Initial Delay Seconds" }, "periodSeconds": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "defines the time to check a given container for liveness", "title": "Period Seconds" }, "port": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "port to access on the container", "title": "Port" }, @@ -134,34 +174,58 @@ "title": "Scheme" }, "successThreshold": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "defines the number of successes required before a given container is said to fulfil the liveness probe", "title": "Success Threshold" }, "tcp": { - "type": "boolean", + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "If enabled, the kubelet will attempt to open a socket to container. If connection is established, the container is considered healthy", "title": "TCP" }, "timeoutSeconds": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "defines the time for checking timeout", "title": "Timeout Seconds" } } }, "MaxSurge": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "maximum number of pods that can be created over the desired number of pods", "title": "Maximum Surge" }, "MaxUnavailable": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "maximum number of pods that can be unavailable during the update process", "title": "Maximum Unavailable" }, "MinReadySeconds": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "minimum number of seconds for which a newly created Pod should be ready without any of its containers crashing, for it to be considered available", "title": "Minimum Ready Seconds" }, @@ -182,7 +246,11 @@ "title": "Command" }, "failureThreshold": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "defines the maximum number of failures that are acceptable before a given container is not considered as ready", "title": "Failure Threshold" }, @@ -193,17 +261,29 @@ "title": "HTTP headers" }, "initialDelaySeconds": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "defines the time to wait before a given container is checked for readiness", "title": "Initial Delay Seconds" }, "periodSeconds": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "defines the time to check a given container for readiness", "title": "Period Seconds" }, "port": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "port to access on the container", "title": "Port" }, @@ -213,17 +293,29 @@ "title": "Scheme" }, "successThreshold": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "defines the number of successes required before a given container is said to fulfil the readiness probe", "title": "Success Threshold" }, "tcp": { - "type": "boolean", + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "If enabled, the kubelet will attempt to open a socket to container. If connection is established, the container is considered healthy", "title": "TCP" }, "timeoutSeconds": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "defines the time for checking timeout", "title": "Timeout Seconds" } @@ -269,7 +361,11 @@ "title": "Arguments", "properties": { "enabled": { - "type": "boolean", + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "used for enabling/disabling aruguments", "title": "Enabled" }, @@ -297,22 +393,38 @@ "title": "Autoscaling", "properties": { "MaxReplicas": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "Maximum number of replicas allowed for scaling", "title": "Maximum Replicas" }, "MinReplicas": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "Minimum number of replicas allowed for scaling", "title": "Minimum Replicas" }, "TargetCPUUtilizationPercentage": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "The target CPU utilization that is expected for a container", "title": "TargetCPUUtilizationPercentage" }, "TargetMemoryUtilizationPercentage": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "The target memory utilization that is expected for a container", "title": "TargetMemoryUtilizationPercentage" }, @@ -322,7 +434,11 @@ "title": "Behavior" }, "enabled": { - "type": "boolean", + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "used for enabling/disabling autoscaling", "title": "Enabled" }, @@ -350,7 +466,11 @@ "title": "Command", "properties": { "enabled": { - "type": "boolean", + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "used for enabling/disabling commands" }, "value": { @@ -383,7 +503,11 @@ "title": "Db Migration Config", "properties": { "enabled": { - "type": "boolean", + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "used for enabling/disabling the config", "title": "Enabled" } @@ -538,7 +662,11 @@ "default": "" }, "enabled": { - "type": "boolean", + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "used to enable or disable ingress", "title": "Enabled" }, @@ -598,7 +726,11 @@ "default": "nginx-internal" }, "enabled": { - "type": "boolean", + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "used to enable or disable ingress", "title": "Enabled" }, @@ -719,7 +851,11 @@ } }, "pauseForSecondsBeforeSwitchActive": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "tell how much to wait for given period of time before switch active the container", "title": "Pause For Seconds Before SwitchActive" }, @@ -777,7 +913,11 @@ "title": "Raw YAML" }, "replicaCount": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "count of Replicas of pod", "title": "REplica Count" }, @@ -939,7 +1079,11 @@ "title": "Volumes" }, "waitForSecondsBeforeScalingDown": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "Wait for given period of time before scaling down the container", "title": "Wait For Seconds Before Scaling Down" } diff --git a/scripts/devtron-reference-helm-charts/deployment-chart_1-0-0/values.yaml b/scripts/devtron-reference-helm-charts/deployment-chart_1-0-0/values.yaml index 31e7c1360e..be1cc73050 100644 --- a/scripts/devtron-reference-helm-charts/deployment-chart_1-0-0/values.yaml +++ b/scripts/devtron-reference-helm-charts/deployment-chart_1-0-0/values.yaml @@ -126,9 +126,9 @@ server: image_tag: 1-95af053 image: "" -EnvVariablesFromFieldPath: -- name: POD_NAME - fieldPath: metadata.name +EnvVariablesFromFieldPath: [] +# - name: POD_NAME +# fieldPath: metadata.name EnvVariables: - name: FLASK_ENV diff --git a/scripts/devtron-reference-helm-charts/deployment-chart_1-1-0/schema.json b/scripts/devtron-reference-helm-charts/deployment-chart_1-1-0/schema.json index 3b8b371c5b..7cb1b96315 100644 --- a/scripts/devtron-reference-helm-charts/deployment-chart_1-1-0/schema.json +++ b/scripts/devtron-reference-helm-charts/deployment-chart_1-1-0/schema.json @@ -16,7 +16,11 @@ "type": "object", "properties": { "envoyPort": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "envoy port for the container", "title": "Envoy Port" }, @@ -31,27 +35,47 @@ "title": "Name" }, "port": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "Port", "title": "port for the container" }, "servicePort": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "port of the corresponding kubernetes service", "title": "Service Port" }, "nodePort": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "nodeport of the corresponding kubernetes service", "title": "Node Port" }, "supportStreaming": { - "type": "boolean", + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "field to enable/disable timeout for high performance protocols like grpc", "title": "Support Streaming" }, "useHTTP2": { - "type": "boolean", + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": " field for setting if envoy container can accept(or not) HTTP2 requests", "title": "Use HTTP2" } @@ -87,7 +111,11 @@ ] }, "GracePeriod": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "time for which Kubernetes waits before terminating the pods", "title": "Grace Period" }, @@ -108,7 +136,11 @@ "title": "Command" }, "failureThreshold": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "defines the maximum number of failures that are acceptable before a given container is not considered as live", "title": "Failure Threshold" }, @@ -119,17 +151,29 @@ "title": "HTTP headers" }, "initialDelaySeconds": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "defines the time to wait before a given container is checked for liveness", "title": "Initial Delay Seconds" }, "periodSeconds": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "defines the time to check a given container for liveness", "title": "Period Seconds" }, "port": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "port to access on the container", "title": "Port" }, @@ -139,34 +183,58 @@ "title": "Scheme" }, "successThreshold": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "defines the number of successes required before a given container is said to fulfil the liveness probe", "title": "Success Threshold" }, "tcp": { - "type": "boolean", + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "If enabled, the kubelet will attempt to open a socket to container. If connection is established, the container is considered healthy", "title": "TCP" }, "timeoutSeconds": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "defines the time for checking timeout", "title": "Timeout Seconds" } } }, "MaxSurge": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "maximum number of pods that can be created over the desired number of pods", "title": "Maximum Surge" }, "MaxUnavailable": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "maximum number of pods that can be unavailable during the update process", "title": "Maximum Unavailable" }, "MinReadySeconds": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "minimum number of seconds for which a newly created Pod should be ready without any of its containers crashing, for it to be considered available", "title": "Minimum Ready Seconds" }, @@ -187,7 +255,11 @@ "title": "Command" }, "failureThreshold": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "defines the maximum number of failures that are acceptable before a given container is not considered as ready", "title": "Failure Threshold" }, @@ -198,17 +270,29 @@ "title": "HTTP headers" }, "initialDelaySeconds": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "defines the time to wait before a given container is checked for readiness", "title": "Initial Delay Seconds" }, "periodSeconds": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "defines the time to check a given container for readiness", "title": "Period Seconds" }, "port": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "port to access on the container", "title": "Port" }, @@ -218,17 +302,29 @@ "title": "Scheme" }, "successThreshold": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "defines the number of successes required before a given container is said to fulfil the readiness probe", "title": "Success Threshold" }, "tcp": { - "type": "boolean", + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "If enabled, the kubelet will attempt to open a socket to container. If connection is established, the container is considered healthy", "title": "TCP" }, "timeoutSeconds": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "defines the time for checking timeout", "title": "Timeout Seconds" } @@ -284,12 +380,20 @@ "title": "CORS" }, "enabled": { - "type": "boolean", + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "used to specify whether to create an ambassador mapping or not", "title": "Enabled" }, "weight": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "used to specify weight for canary ambassador mappings" }, "hostname": { @@ -335,7 +439,11 @@ "title": "Arguments", "properties": { "enabled": { - "type": "boolean", + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "used for enabling/disabling aruguments", "title": "Enabled" }, @@ -363,22 +471,38 @@ "title": "Autoscaling", "properties": { "MaxReplicas": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "Maximum number of replicas allowed for scaling", "title": "Maximum Replicas" }, "MinReplicas": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "Minimum number of replicas allowed for scaling", "title": "Minimum Replicas" }, "TargetCPUUtilizationPercentage": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "The target CPU utilization that is expected for a container", "title": "TargetCPUUtilizationPercentage" }, "TargetMemoryUtilizationPercentage": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "The target memory utilization that is expected for a container", "title": "TargetMemoryUtilizationPercentage" }, @@ -388,7 +512,11 @@ "title": "Behavior" }, "enabled": { - "type": "boolean", + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "used for enabling/disabling autoscaling", "title": "Enabled" }, @@ -416,7 +544,11 @@ "title": "Command", "properties": { "enabled": { - "type": "boolean", + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "used for enabling/disabling commands" }, "value": { @@ -449,7 +581,11 @@ "title": "Db Migration Config", "properties": { "enabled": { - "type": "boolean", + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "used for enabling/disabling the config", "title": "Enabled" } @@ -604,7 +740,11 @@ "default": "" }, "enabled": { - "type": "boolean", + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "used to enable or disable ingress", "title": "Enabled" }, @@ -664,7 +804,11 @@ "default": "nginx-internal" }, "enabled": { - "type": "boolean", + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "used to enable or disable ingress", "title": "Enabled" }, @@ -785,7 +929,11 @@ } }, "pauseForSecondsBeforeSwitchActive": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "tell how much to wait for given period of time before switch active the container", "title": "Pause For Seconds Before SwitchActive" }, @@ -843,7 +991,11 @@ "title": "Raw YAML" }, "replicaCount": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "count of Replicas of pod", "title": "REplica Count" }, @@ -1005,7 +1157,11 @@ "title": "Volumes" }, "waitForSecondsBeforeScalingDown": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "Wait for given period of time before scaling down the container", "title": "Wait For Seconds Before Scaling Down" } diff --git a/scripts/devtron-reference-helm-charts/deployment-chart_1-1-0/values.yaml b/scripts/devtron-reference-helm-charts/deployment-chart_1-1-0/values.yaml index d8e740b5e5..d934a0abbc 100644 --- a/scripts/devtron-reference-helm-charts/deployment-chart_1-1-0/values.yaml +++ b/scripts/devtron-reference-helm-charts/deployment-chart_1-1-0/values.yaml @@ -280,9 +280,9 @@ server: image_tag: 1-95af053 image: "" -EnvVariablesFromFieldPath: -- name: POD_NAME - fieldPath: metadata.name +EnvVariablesFromFieldPath: [] +# - name: POD_NAME +# fieldPath: metadata.name EnvVariables: - name: FLASK_ENV diff --git a/scripts/devtron-reference-helm-charts/deployment-chart_4-18-0/schema.json b/scripts/devtron-reference-helm-charts/deployment-chart_4-18-0/schema.json index c58f0bdf97..6a332631a9 100644 --- a/scripts/devtron-reference-helm-charts/deployment-chart_4-18-0/schema.json +++ b/scripts/devtron-reference-helm-charts/deployment-chart_4-18-0/schema.json @@ -16,7 +16,11 @@ "type": "object", "properties": { "envoyPort": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "envoy port for the container", "title": "Envoy Port" }, @@ -31,27 +35,47 @@ "title": "Name" }, "port": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "Port", "title": "port for the container" }, "servicePort": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "port of the corresponding kubernetes service", "title": "Service Port" }, "nodePort": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "nodeport of the corresponding kubernetes service", "title": "Node Port" }, "supportStreaming": { - "type": "boolean", + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "field to enable/disable timeout for high performance protocols like grpc", "title": "Support Streaming" }, "useHTTP2": { - "type": "boolean", + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": " field for setting if envoy container can accept(or not) HTTP2 requests", "title": "Use HTTP2" } @@ -141,7 +165,11 @@ ] }, "GracePeriod": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "time for which Kubernetes waits before terminating the pods", "title": "Grace Period" }, @@ -162,7 +190,11 @@ "title": "Command" }, "failureThreshold": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "defines the maximum number of failures that are acceptable before a given container is not considered as live", "title": "Failure Threshold" }, @@ -173,17 +205,29 @@ "title": "HTTP headers" }, "initialDelaySeconds": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "defines the time to wait before a given container is checked for liveness", "title": "Initial Delay Seconds" }, "periodSeconds": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "defines the time to check a given container for liveness", "title": "Period Seconds" }, "port": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "port to access on the container", "title": "Port" }, @@ -193,34 +237,58 @@ "title": "Scheme" }, "successThreshold": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "defines the number of successes required before a given container is said to fulfil the liveness probe", "title": "Success Threshold" }, "tcp": { - "type": "boolean", + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "If enabled, the kubelet will attempt to open a socket to container. If connection is established, the container is considered healthy", "title": "TCP" }, "timeoutSeconds": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "defines the time for checking timeout", "title": "Timeout Seconds" } } }, "MaxSurge": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "maximum number of pods that can be created over the desired number of pods", "title": "Maximum Surge" }, "MaxUnavailable": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "maximum number of pods that can be unavailable during the update process", "title": "Maximum Unavailable" }, "MinReadySeconds": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "minimum number of seconds for which a newly created Pod should be ready without any of its containers crashing, for it to be considered available", "title": "Minimum Ready Seconds" }, @@ -241,7 +309,11 @@ "title": "Command" }, "failureThreshold": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "defines the maximum number of failures that are acceptable before a given container is not considered as ready", "title": "Failure Threshold" }, @@ -252,17 +324,29 @@ "title": "HTTP headers" }, "initialDelaySeconds": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "defines the time to wait before a given container is checked for readiness", "title": "Initial Delay Seconds" }, "periodSeconds": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "defines the time to check a given container for readiness", "title": "Period Seconds" }, "port": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "port to access on the container", "title": "Port" }, @@ -272,17 +356,29 @@ "title": "Scheme" }, "successThreshold": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "defines the number of successes required before a given container is said to fulfil the readiness probe", "title": "Success Threshold" }, "tcp": { - "type": "boolean", + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "If enabled, the kubelet will attempt to open a socket to container. If connection is established, the container is considered healthy", "title": "TCP" }, "timeoutSeconds": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "defines the time for checking timeout", "title": "Timeout Seconds" } @@ -393,12 +489,20 @@ "title": "CORS" }, "enabled": { - "type": "boolean", + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "used to specify whether to create an ambassador mapping or not", "title": "Enabled" }, "weight": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "used to specify weight for canary ambassador mappings" }, "hostname": { @@ -444,7 +548,11 @@ "title": "Arguments", "properties": { "enabled": { - "type": "boolean", + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "used for enabling/disabling aruguments", "title": "Enabled" }, @@ -472,22 +580,38 @@ "title": "Autoscaling", "properties": { "MaxReplicas": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "Maximum number of replicas allowed for scaling", "title": "Maximum Replicas" }, "MinReplicas": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "Minimum number of replicas allowed for scaling", "title": "Minimum Replicas" }, "TargetCPUUtilizationPercentage": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "The target CPU utilization that is expected for a container", "title": "TargetCPUUtilizationPercentage" }, "TargetMemoryUtilizationPercentage": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "The target memory utilization that is expected for a container", "title": "TargetMemoryUtilizationPercentage" }, @@ -497,7 +621,11 @@ "title": "Behavior" }, "enabled": { - "type": "boolean", + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "used for enabling/disabling autoscaling", "title": "Enabled" }, @@ -525,7 +653,11 @@ "title": "Command", "properties": { "enabled": { - "type": "boolean", + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "used for enabling/disabling commands" }, "value": { @@ -558,7 +690,11 @@ "title": "Db Migration Config", "properties": { "enabled": { - "type": "boolean", + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "used for enabling/disabling the config", "title": "Enabled" } @@ -717,7 +853,11 @@ "default": "" }, "enabled": { - "type": "boolean", + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "used to enable or disable ingress", "title": "Enabled" }, @@ -788,7 +928,11 @@ "default": "" }, "enabled": { - "type": "boolean", + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "used to enable or disable ingress", "title": "Enabled" }, @@ -848,7 +992,11 @@ "default": "nginx-internal" }, "enabled": { - "type": "boolean", + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "used to enable or disable ingress", "title": "Enabled" }, @@ -969,7 +1117,11 @@ } }, "pauseForSecondsBeforeSwitchActive": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "tell how much to wait for given period of time before switch active the container", "title": "Pause For Seconds Before SwitchActive" }, @@ -994,7 +1146,17 @@ } } }, - "podExtraSpecs":{ + "deploymentLabels": { + "type": "object", + "description": "deploymentLabels is an object to define the label on deployment.", + "title": "DeploymentLabels" + }, + "deploymentAnnotations": { + "type": "object", + "description": "deploymentAnnotations is an object to define the annotations on deployment.", + "title": "DeploymentAnnotations" + }, + "podExtraSpecs":{ "type": "object", "description": "ExtraSpec for the pods to be configured", "title": "podExtraSpecs" @@ -1027,7 +1189,11 @@ "title": "Raw YAML" }, "replicaCount": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "count of Replicas of pod", "title": "REplica Count" }, @@ -1189,7 +1355,11 @@ "title": "Volumes" }, "waitForSecondsBeforeScalingDown": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "Wait for given period of time before scaling down the container", "title": "Wait For Seconds Before Scaling Down" } diff --git a/scripts/devtron-reference-helm-charts/reference-chart_3-10-0/schema.json b/scripts/devtron-reference-helm-charts/reference-chart_3-10-0/schema.json index 070eb2c8d8..026d009169 100644 --- a/scripts/devtron-reference-helm-charts/reference-chart_3-10-0/schema.json +++ b/scripts/devtron-reference-helm-charts/reference-chart_3-10-0/schema.json @@ -11,7 +11,11 @@ "type": "object", "properties": { "envoyPort": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "envoy port for the container", "title": "Envoy Port" }, @@ -26,22 +30,38 @@ "title": "Name" }, "port": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "Port", "title": "port for the container" }, "servicePort": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "port of the corresponding kubernetes service", "title": "Service Port" }, "supportStreaming": { - "type": "boolean", + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "field to enable/disable timeout for high performance protocols like grpc", "title": "Support Streaming" }, "useHTTP2": { - "type": "boolean", + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": " field for setting if envoy container can accept(or not) HTTP2 requests", "title": "Use HTTP2" } @@ -55,7 +75,11 @@ "title": "Environment Variables" }, "GracePeriod": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "time for which Kubernetes waits before terminating the pods", "title": "Grace Period" }, @@ -76,7 +100,11 @@ "title": "Command" }, "failureThreshold": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "defines the maximum number of failures that are acceptable before a given container is not considered as live", "title": "Failure Threshold" }, @@ -98,17 +126,29 @@ } }, "initialDelaySeconds": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "defines the time to wait before a given container is checked for liveness", "title": "Initial Delay Seconds" }, "periodSeconds": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "defines the time to check a given container for liveness", "title": "Period Seconds" }, "port": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "port to access on the container", "title": "Port" }, @@ -118,34 +158,58 @@ "title": "Scheme" }, "successThreshold": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "defines the number of successes required before a given container is said to fulfil the liveness probe", "title": "Success Threshold" }, "tcp": { - "type": "boolean", + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "If enabled, the kubelet will attempt to open a socket to container. If connection is established, the container is considered healthy", "title": "TCP" }, "timeoutSeconds": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "defines the time for checking timeout", "title": "Timeout Seconds" } } }, "MaxSurge": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "maximum number of pods that can be created over the desired number of pods", "title": "Maximum Surge" }, "MaxUnavailable": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "maximum number of pods that can be unavailable during the update process", "title": "Maximum Unavailable" }, "MinReadySeconds": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "minimum number of seconds for which a newly created Pod should be ready without any of its containers crashing, for it to be considered available", "title": "Minimum Ready Seconds" }, @@ -166,7 +230,11 @@ "title": "Command" }, "failureThreshold": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "defines the maximum number of failures that are acceptable before a given container is not considered as ready", "title": "Failure Threshold" }, @@ -188,17 +256,29 @@ } }, "initialDelaySeconds": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "defines the time to wait before a given container is checked for readiness", "title": "Initial Delay Seconds" }, "periodSeconds": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "defines the time to check a given container for readiness", "title": "Period Seconds" }, "port": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "port to access on the container", "title": "Port" }, @@ -208,17 +288,29 @@ "title": "Scheme" }, "successThreshold": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "defines the number of successes required before a given container is said to fulfil the readiness probe", "title": "Success Threshold" }, "tcp": { - "type": "boolean", + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "If enabled, the kubelet will attempt to open a socket to container. If connection is established, the container is considered healthy", "title": "TCP" }, "timeoutSeconds": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "defines the time for checking timeout", "title": "Timeout Seconds" } @@ -264,7 +356,11 @@ "title": "Arguments", "properties": { "enabled": { - "type": "boolean", + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "used for enabling/disabling aruguments", "title": "Enabled" }, @@ -292,27 +388,47 @@ "title": "Autoscaling", "properties": { "MaxReplicas": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "Maximum number of replicas allowed for scaling", "title": "Maximum Replicas" }, "MinReplicas": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "Minimum number of replicas allowed for scaling", "title": "Minimum Replicas" }, "TargetCPUUtilizationPercentage": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "The target CPU utilization that is expected for a container", "title": "TargetCPUUtilizationPercentage" }, "TargetMemoryUtilizationPercentage": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "The target memory utilization that is expected for a container", "title": "TargetMemoryUtilizationPercentage" }, "enabled": { - "type": "boolean", + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "used for enabling/disabling autoscaling", "title": "Enabled" }, @@ -340,7 +456,11 @@ "title": "Command", "properties": { "enabled": { - "type": "boolean", + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "used for enabling/disabling commands" }, "value": { @@ -362,7 +482,11 @@ "title": "Db Migration Config", "properties": { "enabled": { - "type": "boolean", + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "used for enabling/disabling the config", "title": "Enabled" } @@ -479,7 +603,11 @@ "default": "" }, "enabled": { - "type": "boolean", + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "used to enable or disable ingress", "title": "Enabled" }, @@ -511,7 +639,11 @@ "title": "Annotations" }, "enabled": { - "type": "boolean", + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "used to enable or disable ingress", "title": "Enabled" }, @@ -530,7 +662,11 @@ "title": "Init Containers" }, "pauseForSecondsBeforeSwitchActive": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "tell how much to wait for given period of time before switch active the container", "title": "Pause For Seconds Before SwitchActive" }, @@ -552,7 +688,11 @@ "title": "Raw YAML" }, "replicaCount": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "count of Replicas of pod", "title": "REplica Count" }, @@ -688,7 +828,11 @@ "title": "Volumes" }, "waitForSecondsBeforeScalingDown": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "Wait for given period of time before scaling down the container", "title": "Wait For Seconds Before Scaling Down" } diff --git a/scripts/devtron-reference-helm-charts/reference-chart_3-11-0/schema.json b/scripts/devtron-reference-helm-charts/reference-chart_3-11-0/schema.json index 875b922249..6dcd0150b5 100644 --- a/scripts/devtron-reference-helm-charts/reference-chart_3-11-0/schema.json +++ b/scripts/devtron-reference-helm-charts/reference-chart_3-11-0/schema.json @@ -11,7 +11,11 @@ "type": "object", "properties": { "envoyPort": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "envoy port for the container", "title": "Envoy Port" }, @@ -26,22 +30,38 @@ "title": "Name" }, "port": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "Port", "title": "port for the container" }, "servicePort": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "port of the corresponding kubernetes service", "title": "Service Port" }, "supportStreaming": { - "type": "boolean", + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "field to enable/disable timeout for high performance protocols like grpc", "title": "Support Streaming" }, "useHTTP2": { - "type": "boolean", + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": " field for setting if envoy container can accept(or not) HTTP2 requests", "title": "Use HTTP2" } @@ -55,7 +75,11 @@ "title": "Environment Variables" }, "GracePeriod": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "time for which Kubernetes waits before terminating the pods", "title": "Grace Period" }, @@ -76,7 +100,11 @@ "title": "Command" }, "failureThreshold": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "defines the maximum number of failures that are acceptable before a given container is not considered as live", "title": "Failure Threshold" }, @@ -98,17 +126,29 @@ } }, "initialDelaySeconds": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "defines the time to wait before a given container is checked for liveness", "title": "Initial Delay Seconds" }, "periodSeconds": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "defines the time to check a given container for liveness", "title": "Period Seconds" }, "port": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "port to access on the container", "title": "Port" }, @@ -118,34 +158,58 @@ "title": "Scheme" }, "successThreshold": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "defines the number of successes required before a given container is said to fulfil the liveness probe", "title": "Success Threshold" }, "tcp": { - "type": "boolean", + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "If enabled, the kubelet will attempt to open a socket to container. If connection is established, the container is considered healthy", "title": "TCP" }, "timeoutSeconds": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "defines the time for checking timeout", "title": "Timeout Seconds" } } }, "MaxSurge": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "maximum number of pods that can be created over the desired number of pods", "title": "Maximum Surge" }, "MaxUnavailable": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "maximum number of pods that can be unavailable during the update process", "title": "Maximum Unavailable" }, "MinReadySeconds": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "minimum number of seconds for which a newly created Pod should be ready without any of its containers crashing, for it to be considered available", "title": "Minimum Ready Seconds" }, @@ -166,7 +230,11 @@ "title": "Command" }, "failureThreshold": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "defines the maximum number of failures that are acceptable before a given container is not considered as ready", "title": "Failure Threshold" }, @@ -188,17 +256,29 @@ } }, "initialDelaySeconds": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "defines the time to wait before a given container is checked for readiness", "title": "Initial Delay Seconds" }, "periodSeconds": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "defines the time to check a given container for readiness", "title": "Period Seconds" }, "port": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "port to access on the container", "title": "Port" }, @@ -208,17 +288,29 @@ "title": "Scheme" }, "successThreshold": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "defines the number of successes required before a given container is said to fulfil the readiness probe", "title": "Success Threshold" }, "tcp": { - "type": "boolean", + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "If enabled, the kubelet will attempt to open a socket to container. If connection is established, the container is considered healthy", "title": "TCP" }, "timeoutSeconds": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "defines the time for checking timeout", "title": "Timeout Seconds" } @@ -264,7 +356,11 @@ "title": "Arguments", "properties": { "enabled": { - "type": "boolean", + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "used for enabling/disabling aruguments", "title": "Enabled" }, @@ -292,27 +388,47 @@ "title": "Autoscaling", "properties": { "MaxReplicas": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "Maximum number of replicas allowed for scaling", "title": "Maximum Replicas" }, "MinReplicas": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "Minimum number of replicas allowed for scaling", "title": "Minimum Replicas" }, "TargetCPUUtilizationPercentage": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "The target CPU utilization that is expected for a container", "title": "TargetCPUUtilizationPercentage" }, "TargetMemoryUtilizationPercentage": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "The target memory utilization that is expected for a container", "title": "TargetMemoryUtilizationPercentage" }, "enabled": { - "type": "boolean", + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "used for enabling/disabling autoscaling", "title": "Enabled" }, @@ -340,7 +456,11 @@ "title": "Command", "properties": { "enabled": { - "type": "boolean", + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "used for enabling/disabling commands" }, "value": { @@ -362,7 +482,11 @@ "title": "Db Migration Config", "properties": { "enabled": { - "type": "boolean", + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "used for enabling/disabling the config", "title": "Enabled" } @@ -481,7 +605,11 @@ "default": "" }, "enabled": { - "type": "boolean", + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "used to enable or disable ingress", "title": "Enabled" }, @@ -530,7 +658,11 @@ "title": "Annotations" }, "enabled": { - "type": "boolean", + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "used to enable or disable ingress", "title": "Enabled" }, @@ -576,7 +708,11 @@ "title": "Init Containers" }, "pauseForSecondsBeforeSwitchActive": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "tell how much to wait for given period of time before switch active the container", "title": "Pause For Seconds Before SwitchActive" }, @@ -608,7 +744,11 @@ "title": "Raw YAML" }, "replicaCount": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "count of Replicas of pod", "title": "REplica Count" }, @@ -744,7 +884,11 @@ "title": "Volumes" }, "waitForSecondsBeforeScalingDown": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "Wait for given period of time before scaling down the container", "title": "Wait For Seconds Before Scaling Down" } diff --git a/scripts/devtron-reference-helm-charts/reference-chart_3-12-0/schema.json b/scripts/devtron-reference-helm-charts/reference-chart_3-12-0/schema.json index 6dd78d8cb7..37e7a2a7b7 100644 --- a/scripts/devtron-reference-helm-charts/reference-chart_3-12-0/schema.json +++ b/scripts/devtron-reference-helm-charts/reference-chart_3-12-0/schema.json @@ -11,7 +11,11 @@ "type": "object", "properties": { "envoyPort": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "envoy port for the container", "title": "Envoy Port" }, @@ -26,22 +30,38 @@ "title": "Name" }, "port": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "Port", "title": "port for the container" }, "servicePort": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "port of the corresponding kubernetes service", "title": "Service Port" }, "supportStreaming": { - "type": "boolean", + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "field to enable/disable timeout for high performance protocols like grpc", "title": "Support Streaming" }, "useHTTP2": { - "type": "boolean", + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": " field for setting if envoy container can accept(or not) HTTP2 requests", "title": "Use HTTP2" } @@ -55,7 +75,11 @@ "title": "Environment Variables" }, "GracePeriod": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "time for which Kubernetes waits before terminating the pods", "title": "Grace Period" }, @@ -76,7 +100,11 @@ "title": "Command" }, "failureThreshold": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "defines the maximum number of failures that are acceptable before a given container is not considered as live", "title": "Failure Threshold" }, @@ -98,17 +126,29 @@ } }, "initialDelaySeconds": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "defines the time to wait before a given container is checked for liveness", "title": "Initial Delay Seconds" }, "periodSeconds": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "defines the time to check a given container for liveness", "title": "Period Seconds" }, "port": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "port to access on the container", "title": "Port" }, @@ -118,34 +158,58 @@ "title": "Scheme" }, "successThreshold": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "defines the number of successes required before a given container is said to fulfil the liveness probe", "title": "Success Threshold" }, "tcp": { - "type": "boolean", + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "If enabled, the kubelet will attempt to open a socket to container. If connection is established, the container is considered healthy", "title": "TCP" }, "timeoutSeconds": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "defines the time for checking timeout", "title": "Timeout Seconds" } } }, "MaxSurge": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "maximum number of pods that can be created over the desired number of pods", "title": "Maximum Surge" }, "MaxUnavailable": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "maximum number of pods that can be unavailable during the update process", "title": "Maximum Unavailable" }, "MinReadySeconds": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "minimum number of seconds for which a newly created Pod should be ready without any of its containers crashing, for it to be considered available", "title": "Minimum Ready Seconds" }, @@ -166,7 +230,11 @@ "title": "Command" }, "failureThreshold": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "defines the maximum number of failures that are acceptable before a given container is not considered as ready", "title": "Failure Threshold" }, @@ -188,17 +256,29 @@ } }, "initialDelaySeconds": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "defines the time to wait before a given container is checked for readiness", "title": "Initial Delay Seconds" }, "periodSeconds": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "defines the time to check a given container for readiness", "title": "Period Seconds" }, "port": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "port to access on the container", "title": "Port" }, @@ -208,17 +288,29 @@ "title": "Scheme" }, "successThreshold": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "defines the number of successes required before a given container is said to fulfil the readiness probe", "title": "Success Threshold" }, "tcp": { - "type": "boolean", + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "If enabled, the kubelet will attempt to open a socket to container. If connection is established, the container is considered healthy", "title": "TCP" }, "timeoutSeconds": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "defines the time for checking timeout", "title": "Timeout Seconds" } @@ -264,7 +356,11 @@ "title": "Arguments", "properties": { "enabled": { - "type": "boolean", + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "used for enabling/disabling aruguments", "title": "Enabled" }, @@ -292,27 +388,47 @@ "title": "Autoscaling", "properties": { "MaxReplicas": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "Maximum number of replicas allowed for scaling", "title": "Maximum Replicas" }, "MinReplicas": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "Minimum number of replicas allowed for scaling", "title": "Minimum Replicas" }, "TargetCPUUtilizationPercentage": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "The target CPU utilization that is expected for a container", "title": "TargetCPUUtilizationPercentage" }, "TargetMemoryUtilizationPercentage": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "The target memory utilization that is expected for a container", "title": "TargetMemoryUtilizationPercentage" }, "enabled": { - "type": "boolean", + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "used for enabling/disabling autoscaling", "title": "Enabled" }, @@ -340,7 +456,11 @@ "title": "Command", "properties": { "enabled": { - "type": "boolean", + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "used for enabling/disabling commands" }, "value": { @@ -362,7 +482,11 @@ "title": "Db Migration Config", "properties": { "enabled": { - "type": "boolean", + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "used for enabling/disabling the config", "title": "Enabled" } @@ -481,7 +605,11 @@ "default": "" }, "enabled": { - "type": "boolean", + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "used to enable or disable ingress", "title": "Enabled" }, @@ -530,7 +658,11 @@ "title": "Annotations" }, "enabled": { - "type": "boolean", + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "used to enable or disable ingress", "title": "Enabled" }, @@ -576,7 +708,11 @@ "title": "Init Containers" }, "pauseForSecondsBeforeSwitchActive": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "tell how much to wait for given period of time before switch active the container", "title": "Pause For Seconds Before SwitchActive" }, @@ -608,7 +744,11 @@ "title": "Raw YAML" }, "replicaCount": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "count of Replicas of pod", "title": "REplica Count" }, @@ -744,7 +884,11 @@ "title": "Volumes" }, "waitForSecondsBeforeScalingDown": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "Wait for given period of time before scaling down the container", "title": "Wait For Seconds Before Scaling Down" } diff --git a/scripts/devtron-reference-helm-charts/reference-chart_3-13-0/schema.json b/scripts/devtron-reference-helm-charts/reference-chart_3-13-0/schema.json index 07558526be..83959192f6 100644 --- a/scripts/devtron-reference-helm-charts/reference-chart_3-13-0/schema.json +++ b/scripts/devtron-reference-helm-charts/reference-chart_3-13-0/schema.json @@ -11,7 +11,11 @@ "type": "object", "properties": { "envoyPort": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "envoy port for the container", "title": "Envoy Port" }, @@ -26,22 +30,38 @@ "title": "Name" }, "port": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "Port", "title": "port for the container" }, "servicePort": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "port of the corresponding kubernetes service", "title": "Service Port" }, "supportStreaming": { - "type": "boolean", + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "field to enable/disable timeout for high performance protocols like grpc", "title": "Support Streaming" }, "useHTTP2": { - "type": "boolean", + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": " field for setting if envoy container can accept(or not) HTTP2 requests", "title": "Use HTTP2" } @@ -55,7 +75,11 @@ "title": "Environment Variables" }, "GracePeriod": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "time for which Kubernetes waits before terminating the pods", "title": "Grace Period" }, @@ -76,7 +100,11 @@ "title": "Command" }, "failureThreshold": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "defines the maximum number of failures that are acceptable before a given container is not considered as live", "title": "Failure Threshold" }, @@ -98,17 +126,29 @@ } }, "initialDelaySeconds": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "defines the time to wait before a given container is checked for liveness", "title": "Initial Delay Seconds" }, "periodSeconds": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "defines the time to check a given container for liveness", "title": "Period Seconds" }, "port": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "port to access on the container", "title": "Port" }, @@ -118,34 +158,58 @@ "title": "Scheme" }, "successThreshold": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "defines the number of successes required before a given container is said to fulfil the liveness probe", "title": "Success Threshold" }, "tcp": { - "type": "boolean", + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "If enabled, the kubelet will attempt to open a socket to container. If connection is established, the container is considered healthy", "title": "TCP" }, "timeoutSeconds": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "defines the time for checking timeout", "title": "Timeout Seconds" } } }, "MaxSurge": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "maximum number of pods that can be created over the desired number of pods", "title": "Maximum Surge" }, "MaxUnavailable": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "maximum number of pods that can be unavailable during the update process", "title": "Maximum Unavailable" }, "MinReadySeconds": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "minimum number of seconds for which a newly created Pod should be ready without any of its containers crashing, for it to be considered available", "title": "Minimum Ready Seconds" }, @@ -166,7 +230,11 @@ "title": "Command" }, "failureThreshold": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "defines the maximum number of failures that are acceptable before a given container is not considered as ready", "title": "Failure Threshold" }, @@ -188,17 +256,29 @@ } }, "initialDelaySeconds": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "defines the time to wait before a given container is checked for readiness", "title": "Initial Delay Seconds" }, "periodSeconds": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "defines the time to check a given container for readiness", "title": "Period Seconds" }, "port": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "port to access on the container", "title": "Port" }, @@ -208,17 +288,29 @@ "title": "Scheme" }, "successThreshold": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "defines the number of successes required before a given container is said to fulfil the readiness probe", "title": "Success Threshold" }, "tcp": { - "type": "boolean", + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "If enabled, the kubelet will attempt to open a socket to container. If connection is established, the container is considered healthy", "title": "TCP" }, "timeoutSeconds": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "defines the time for checking timeout", "title": "Timeout Seconds" } @@ -264,7 +356,11 @@ "title": "Arguments", "properties": { "enabled": { - "type": "boolean", + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "used for enabling/disabling aruguments", "title": "Enabled" }, @@ -292,27 +388,47 @@ "title": "Autoscaling", "properties": { "MaxReplicas": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "Maximum number of replicas allowed for scaling", "title": "Maximum Replicas" }, "MinReplicas": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "Minimum number of replicas allowed for scaling", "title": "Minimum Replicas" }, "TargetCPUUtilizationPercentage": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "The target CPU utilization that is expected for a container", "title": "TargetCPUUtilizationPercentage" }, "TargetMemoryUtilizationPercentage": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "The target memory utilization that is expected for a container", "title": "TargetMemoryUtilizationPercentage" }, "enabled": { - "type": "boolean", + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "used for enabling/disabling autoscaling", "title": "Enabled" }, @@ -340,7 +456,11 @@ "title": "Command", "properties": { "enabled": { - "type": "boolean", + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "used for enabling/disabling commands" }, "value": { @@ -362,7 +482,11 @@ "title": "Db Migration Config", "properties": { "enabled": { - "type": "boolean", + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "used for enabling/disabling the config", "title": "Enabled" } @@ -481,7 +605,11 @@ "default": "" }, "enabled": { - "type": "boolean", + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "used to enable or disable ingress", "title": "Enabled" }, @@ -530,7 +658,11 @@ "title": "Annotations" }, "enabled": { - "type": "boolean", + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "used to enable or disable ingress", "title": "Enabled" }, @@ -576,7 +708,11 @@ "title": "Init Containers" }, "pauseForSecondsBeforeSwitchActive": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "tell how much to wait for given period of time before switch active the container", "title": "Pause For Seconds Before SwitchActive" }, @@ -608,7 +744,11 @@ "title": "Raw YAML" }, "replicaCount": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "count of Replicas of pod", "title": "REplica Count" }, @@ -744,7 +884,11 @@ "title": "Volumes" }, "waitForSecondsBeforeScalingDown": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "Wait for given period of time before scaling down the container", "title": "Wait For Seconds Before Scaling Down" } diff --git a/scripts/devtron-reference-helm-charts/reference-chart_3-9-0/schema.json b/scripts/devtron-reference-helm-charts/reference-chart_3-9-0/schema.json index d9a6d5bd2b..b36b76fedc 100644 --- a/scripts/devtron-reference-helm-charts/reference-chart_3-9-0/schema.json +++ b/scripts/devtron-reference-helm-charts/reference-chart_3-9-0/schema.json @@ -11,7 +11,11 @@ "type": "object", "properties": { "envoyPort": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "envoy port for the container", "title": "Envoy Port" }, @@ -26,22 +30,38 @@ "title": "Name" }, "port": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "Port", "title": "port for the container" }, "servicePort": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "port of the corresponding kubernetes service", "title": "Service Port" }, "supportStreaming": { - "type": "boolean", + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "field to enable/disable timeout for high performance protocols like grpc", "title": "Support Streaming" }, "useHTTP2": { - "type": "boolean", + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": " field for setting if envoy container can accept(or not) HTTP2 requests", "title": "Use HTTP2" } @@ -55,7 +75,11 @@ "title": "Environment Variables" }, "GracePeriod": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "time for which Kubernetes waits before terminating the pods", "title": "Grace Period" }, @@ -76,7 +100,11 @@ "title": "Command" }, "failureThreshold": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "defines the maximum number of failures that are acceptable before a given container is not considered as live", "title": "Failure Threshold" }, @@ -98,17 +126,29 @@ } }, "initialDelaySeconds": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "defines the time to wait before a given container is checked for liveness", "title": "Initial Delay Seconds" }, "periodSeconds": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "defines the time to check a given container for liveness", "title": "Period Seconds" }, "port": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "port to access on the container", "title": "Port" }, @@ -118,34 +158,58 @@ "title": "Scheme" }, "successThreshold": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "defines the number of successes required before a given container is said to fulfil the liveness probe", "title": "Success Threshold" }, "tcp": { - "type": "boolean", + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "If enabled, the kubelet will attempt to open a socket to container. If connection is established, the container is considered healthy", "title": "TCP" }, "timeoutSeconds": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "defines the time for checking timeout", "title": "Timeout Seconds" } } }, "MaxSurge": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "maximum number of pods that can be created over the desired number of pods", "title": "Maximum Surge" }, "MaxUnavailable": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "maximum number of pods that can be unavailable during the update process", "title": "Maximum Unavailable" }, "MinReadySeconds": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "minimum number of seconds for which a newly created Pod should be ready without any of its containers crashing, for it to be considered available", "title": "Minimum Ready Seconds" }, @@ -166,7 +230,11 @@ "title": "Command" }, "failureThreshold": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "defines the maximum number of failures that are acceptable before a given container is not considered as ready", "title": "Failure Threshold" }, @@ -188,17 +256,29 @@ } }, "initialDelaySeconds": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "defines the time to wait before a given container is checked for readiness", "title": "Initial Delay Seconds" }, "periodSeconds": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "defines the time to check a given container for readiness", "title": "Period Seconds" }, "port": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "port to access on the container", "title": "Port" }, @@ -208,17 +288,29 @@ "title": "Scheme" }, "successThreshold": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "defines the number of successes required before a given container is said to fulfil the readiness probe", "title": "Success Threshold" }, "tcp": { - "type": "boolean", + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "If enabled, the kubelet will attempt to open a socket to container. If connection is established, the container is considered healthy", "title": "TCP" }, "timeoutSeconds": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "defines the time for checking timeout", "title": "Timeout Seconds" } @@ -264,7 +356,11 @@ "title": "Arguments", "properties": { "enabled": { - "type": "boolean", + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "used for enabling/disabling aruguments", "title": "Enabled" }, @@ -292,27 +388,47 @@ "title": "Autoscaling", "properties": { "MaxReplicas": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "Maximum number of replicas allowed for scaling", "title": "Maximum Replicas" }, "MinReplicas": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "Minimum number of replicas allowed for scaling", "title": "Minimum Replicas" }, "TargetCPUUtilizationPercentage": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "The target CPU utilization that is expected for a container", "title": "TargetCPUUtilizationPercentage" }, "TargetMemoryUtilizationPercentage": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "The target memory utilization that is expected for a container", "title": "TargetMemoryUtilizationPercentage" }, "enabled": { - "type": "boolean", + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "used for enabling/disabling autoscaling", "title": "Enabled" }, @@ -340,7 +456,11 @@ "title": "Command", "properties": { "enabled": { - "type": "boolean", + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "used for enabling/disabling commands" }, "value": { @@ -362,7 +482,11 @@ "title": "Db Migration Config", "properties": { "enabled": { - "type": "boolean", + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "used for enabling/disabling the config", "title": "Enabled" } @@ -479,7 +603,11 @@ "default": "" }, "enabled": { - "type": "boolean", + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "used to enable or disable ingress", "title": "Enabled" }, @@ -511,7 +639,11 @@ "title": "Annotations" }, "enabled": { - "type": "boolean", + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "used to enable or disable ingress", "title": "Enabled" }, @@ -530,7 +662,11 @@ "title": "Init Containers" }, "pauseForSecondsBeforeSwitchActive": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "tell how much to wait for given period of time before switch active the container", "title": "Pause For Seconds Before SwitchActive" }, @@ -552,7 +688,11 @@ "title": "Raw YAML" }, "replicaCount": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "count of Replicas of pod", "title": "REplica Count" }, @@ -688,7 +828,11 @@ "title": "Volumes" }, "waitForSecondsBeforeScalingDown": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "Wait for given period of time before scaling down the container", "title": "Wait For Seconds Before Scaling Down" } diff --git a/scripts/devtron-reference-helm-charts/reference-chart_4-10-0/schema.json b/scripts/devtron-reference-helm-charts/reference-chart_4-10-0/schema.json index aa2dc38e2b..df09be02c8 100644 --- a/scripts/devtron-reference-helm-charts/reference-chart_4-10-0/schema.json +++ b/scripts/devtron-reference-helm-charts/reference-chart_4-10-0/schema.json @@ -11,7 +11,11 @@ "type": "object", "properties": { "envoyPort": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "envoy port for the container", "title": "Envoy Port" }, @@ -26,22 +30,38 @@ "title": "Name" }, "port": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "Port", "title": "port for the container" }, "servicePort": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "port of the corresponding kubernetes service", "title": "Service Port" }, "supportStreaming": { - "type": "boolean", + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "field to enable/disable timeout for high performance protocols like grpc", "title": "Support Streaming" }, "useHTTP2": { - "type": "boolean", + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": " field for setting if envoy container can accept(or not) HTTP2 requests", "title": "Use HTTP2" } @@ -55,7 +75,11 @@ "title": "Environment Variables" }, "GracePeriod": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "time for which Kubernetes waits before terminating the pods", "title": "Grace Period" }, @@ -76,7 +100,11 @@ "title": "Command" }, "failureThreshold": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "defines the maximum number of failures that are acceptable before a given container is not considered as live", "title": "Failure Threshold" }, @@ -87,17 +115,29 @@ "title": "HTTP headers" }, "initialDelaySeconds": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "defines the time to wait before a given container is checked for liveness", "title": "Initial Delay Seconds" }, "periodSeconds": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "defines the time to check a given container for liveness", "title": "Period Seconds" }, "port": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "port to access on the container", "title": "Port" }, @@ -107,34 +147,58 @@ "title": "Scheme" }, "successThreshold": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "defines the number of successes required before a given container is said to fulfil the liveness probe", "title": "Success Threshold" }, "tcp": { - "type": "boolean", + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "If enabled, the kubelet will attempt to open a socket to container. If connection is established, the container is considered healthy", "title": "TCP" }, "timeoutSeconds": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "defines the time for checking timeout", "title": "Timeout Seconds" } } }, "MaxSurge": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "maximum number of pods that can be created over the desired number of pods", "title": "Maximum Surge" }, "MaxUnavailable": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "maximum number of pods that can be unavailable during the update process", "title": "Maximum Unavailable" }, "MinReadySeconds": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "minimum number of seconds for which a newly created Pod should be ready without any of its containers crashing, for it to be considered available", "title": "Minimum Ready Seconds" }, @@ -155,7 +219,11 @@ "title": "Command" }, "failureThreshold": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "defines the maximum number of failures that are acceptable before a given container is not considered as ready", "title": "Failure Threshold" }, @@ -166,17 +234,29 @@ "title": "HTTP headers" }, "initialDelaySeconds": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "defines the time to wait before a given container is checked for readiness", "title": "Initial Delay Seconds" }, "periodSeconds": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "defines the time to check a given container for readiness", "title": "Period Seconds" }, "port": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "port to access on the container", "title": "Port" }, @@ -186,17 +266,29 @@ "title": "Scheme" }, "successThreshold": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "defines the number of successes required before a given container is said to fulfil the readiness probe", "title": "Success Threshold" }, "tcp": { - "type": "boolean", + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "If enabled, the kubelet will attempt to open a socket to container. If connection is established, the container is considered healthy", "title": "TCP" }, "timeoutSeconds": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "defines the time for checking timeout", "title": "Timeout Seconds" } @@ -242,7 +334,11 @@ "title": "Arguments", "properties": { "enabled": { - "type": "boolean", + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "used for enabling/disabling aruguments", "title": "Enabled" }, @@ -270,22 +366,38 @@ "title": "Autoscaling", "properties": { "MaxReplicas": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "Maximum number of replicas allowed for scaling", "title": "Maximum Replicas" }, "MinReplicas": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "Minimum number of replicas allowed for scaling", "title": "Minimum Replicas" }, "TargetCPUUtilizationPercentage": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "The target CPU utilization that is expected for a container", "title": "TargetCPUUtilizationPercentage" }, "TargetMemoryUtilizationPercentage": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "The target memory utilization that is expected for a container", "title": "TargetMemoryUtilizationPercentage" }, @@ -295,7 +407,11 @@ "title": "Behavior" }, "enabled": { - "type": "boolean", + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "used for enabling/disabling autoscaling", "title": "Enabled" }, @@ -323,7 +439,11 @@ "title": "Command", "properties": { "enabled": { - "type": "boolean", + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "used for enabling/disabling commands" }, "value": { @@ -351,7 +471,11 @@ "title": "Db Migration Config", "properties": { "enabled": { - "type": "boolean", + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "used for enabling/disabling the config", "title": "Enabled" } @@ -460,7 +584,11 @@ "default": "" }, "enabled": { - "type": "boolean", + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "used to enable or disable ingress", "title": "Enabled" }, @@ -520,7 +648,11 @@ "default": "nginx-internal" }, "enabled": { - "type": "boolean", + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "used to enable or disable ingress", "title": "Enabled" }, @@ -571,7 +703,11 @@ "title": "Init Containers" }, "pauseForSecondsBeforeSwitchActive": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "tell how much to wait for given period of time before switch active the container", "title": "Pause For Seconds Before SwitchActive" }, @@ -603,7 +739,11 @@ "title": "Raw YAML" }, "replicaCount": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "count of Replicas of pod", "title": "REplica Count" }, @@ -739,7 +879,11 @@ "title": "Volumes" }, "waitForSecondsBeforeScalingDown": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "Wait for given period of time before scaling down the container", "title": "Wait For Seconds Before Scaling Down" } diff --git a/scripts/devtron-reference-helm-charts/reference-chart_4-11-0/schema.json b/scripts/devtron-reference-helm-charts/reference-chart_4-11-0/schema.json index fcc54107ed..2c65f79d3d 100644 --- a/scripts/devtron-reference-helm-charts/reference-chart_4-11-0/schema.json +++ b/scripts/devtron-reference-helm-charts/reference-chart_4-11-0/schema.json @@ -11,7 +11,11 @@ "type": "object", "properties": { "envoyPort": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "envoy port for the container", "title": "Envoy Port" }, @@ -26,22 +30,38 @@ "title": "Name" }, "port": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "Port", "title": "port for the container" }, "servicePort": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "port of the corresponding kubernetes service", "title": "Service Port" }, "supportStreaming": { - "type": "boolean", + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "field to enable/disable timeout for high performance protocols like grpc", "title": "Support Streaming" }, "useHTTP2": { - "type": "boolean", + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": " field for setting if envoy container can accept(or not) HTTP2 requests", "title": "Use HTTP2" } @@ -55,7 +75,11 @@ "title": "Environment Variables" }, "GracePeriod": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "time for which Kubernetes waits before terminating the pods", "title": "Grace Period" }, @@ -76,7 +100,11 @@ "title": "Command" }, "failureThreshold": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "defines the maximum number of failures that are acceptable before a given container is not considered as live", "title": "Failure Threshold" }, @@ -87,17 +115,29 @@ "title": "HTTP headers" }, "initialDelaySeconds": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "defines the time to wait before a given container is checked for liveness", "title": "Initial Delay Seconds" }, "periodSeconds": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "defines the time to check a given container for liveness", "title": "Period Seconds" }, "port": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "port to access on the container", "title": "Port" }, @@ -107,34 +147,58 @@ "title": "Scheme" }, "successThreshold": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "defines the number of successes required before a given container is said to fulfil the liveness probe", "title": "Success Threshold" }, "tcp": { - "type": "boolean", + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "If enabled, the kubelet will attempt to open a socket to container. If connection is established, the container is considered healthy", "title": "TCP" }, "timeoutSeconds": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "defines the time for checking timeout", "title": "Timeout Seconds" } } }, "MaxSurge": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "maximum number of pods that can be created over the desired number of pods", "title": "Maximum Surge" }, "MaxUnavailable": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "maximum number of pods that can be unavailable during the update process", "title": "Maximum Unavailable" }, "MinReadySeconds": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "minimum number of seconds for which a newly created Pod should be ready without any of its containers crashing, for it to be considered available", "title": "Minimum Ready Seconds" }, @@ -155,7 +219,11 @@ "title": "Command" }, "failureThreshold": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "defines the maximum number of failures that are acceptable before a given container is not considered as ready", "title": "Failure Threshold" }, @@ -166,17 +234,29 @@ "title": "HTTP headers" }, "initialDelaySeconds": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "defines the time to wait before a given container is checked for readiness", "title": "Initial Delay Seconds" }, "periodSeconds": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "defines the time to check a given container for readiness", "title": "Period Seconds" }, "port": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "port to access on the container", "title": "Port" }, @@ -186,17 +266,29 @@ "title": "Scheme" }, "successThreshold": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "defines the number of successes required before a given container is said to fulfil the readiness probe", "title": "Success Threshold" }, "tcp": { - "type": "boolean", + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "If enabled, the kubelet will attempt to open a socket to container. If connection is established, the container is considered healthy", "title": "TCP" }, "timeoutSeconds": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "defines the time for checking timeout", "title": "Timeout Seconds" } @@ -242,7 +334,11 @@ "title": "Arguments", "properties": { "enabled": { - "type": "boolean", + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "used for enabling/disabling aruguments", "title": "Enabled" }, @@ -270,22 +366,38 @@ "title": "Autoscaling", "properties": { "MaxReplicas": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "Maximum number of replicas allowed for scaling", "title": "Maximum Replicas" }, "MinReplicas": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "Minimum number of replicas allowed for scaling", "title": "Minimum Replicas" }, "TargetCPUUtilizationPercentage": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "The target CPU utilization that is expected for a container", "title": "TargetCPUUtilizationPercentage" }, "TargetMemoryUtilizationPercentage": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "The target memory utilization that is expected for a container", "title": "TargetMemoryUtilizationPercentage" }, @@ -295,7 +407,11 @@ "title": "Behavior" }, "enabled": { - "type": "boolean", + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "used for enabling/disabling autoscaling", "title": "Enabled" }, @@ -323,7 +439,11 @@ "title": "Command", "properties": { "enabled": { - "type": "boolean", + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "used for enabling/disabling commands" }, "value": { @@ -356,7 +476,11 @@ "title": "Db Migration Config", "properties": { "enabled": { - "type": "boolean", + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "used for enabling/disabling the config", "title": "Enabled" } @@ -465,7 +589,11 @@ "default": "" }, "enabled": { - "type": "boolean", + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "used to enable or disable ingress", "title": "Enabled" }, @@ -525,7 +653,11 @@ "default": "nginx-internal" }, "enabled": { - "type": "boolean", + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "used to enable or disable ingress", "title": "Enabled" }, @@ -619,7 +751,11 @@ } }, "pauseForSecondsBeforeSwitchActive": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "tell how much to wait for given period of time before switch active the container", "title": "Pause For Seconds Before SwitchActive" }, @@ -656,7 +792,11 @@ "title": "Raw YAML" }, "replicaCount": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "count of Replicas of pod", "title": "REplica Count" }, @@ -798,7 +938,11 @@ "title": "Volumes" }, "waitForSecondsBeforeScalingDown": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "Wait for given period of time before scaling down the container", "title": "Wait For Seconds Before Scaling Down" } diff --git a/scripts/devtron-reference-helm-charts/reference-chart_4-12-0/schema.json b/scripts/devtron-reference-helm-charts/reference-chart_4-12-0/schema.json index 01711de132..14c0d9645d 100644 --- a/scripts/devtron-reference-helm-charts/reference-chart_4-12-0/schema.json +++ b/scripts/devtron-reference-helm-charts/reference-chart_4-12-0/schema.json @@ -11,7 +11,11 @@ "type": "object", "properties": { "envoyPort": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "envoy port for the container", "title": "Envoy Port" }, @@ -26,22 +30,38 @@ "title": "Name" }, "port": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "Port", "title": "port for the container" }, "servicePort": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "port of the corresponding kubernetes service", "title": "Service Port" }, "supportStreaming": { - "type": "boolean", + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "field to enable/disable timeout for high performance protocols like grpc", "title": "Support Streaming" }, "useHTTP2": { - "type": "boolean", + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": " field for setting if envoy container can accept(or not) HTTP2 requests", "title": "Use HTTP2" } @@ -55,7 +75,11 @@ "title": "Environment Variables" }, "GracePeriod": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "time for which Kubernetes waits before terminating the pods", "title": "Grace Period" }, @@ -76,7 +100,11 @@ "title": "Command" }, "failureThreshold": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "defines the maximum number of failures that are acceptable before a given container is not considered as live", "title": "Failure Threshold" }, @@ -87,17 +115,29 @@ "title": "HTTP headers" }, "initialDelaySeconds": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "defines the time to wait before a given container is checked for liveness", "title": "Initial Delay Seconds" }, "periodSeconds": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "defines the time to check a given container for liveness", "title": "Period Seconds" }, "port": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "port to access on the container", "title": "Port" }, @@ -107,34 +147,58 @@ "title": "Scheme" }, "successThreshold": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "defines the number of successes required before a given container is said to fulfil the liveness probe", "title": "Success Threshold" }, "tcp": { - "type": "boolean", + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "If enabled, the kubelet will attempt to open a socket to container. If connection is established, the container is considered healthy", "title": "TCP" }, "timeoutSeconds": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "defines the time for checking timeout", "title": "Timeout Seconds" } } }, "MaxSurge": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "maximum number of pods that can be created over the desired number of pods", "title": "Maximum Surge" }, "MaxUnavailable": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "maximum number of pods that can be unavailable during the update process", "title": "Maximum Unavailable" }, "MinReadySeconds": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "minimum number of seconds for which a newly created Pod should be ready without any of its containers crashing, for it to be considered available", "title": "Minimum Ready Seconds" }, @@ -155,7 +219,11 @@ "title": "Command" }, "failureThreshold": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "defines the maximum number of failures that are acceptable before a given container is not considered as ready", "title": "Failure Threshold" }, @@ -166,17 +234,29 @@ "title": "HTTP headers" }, "initialDelaySeconds": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "defines the time to wait before a given container is checked for readiness", "title": "Initial Delay Seconds" }, "periodSeconds": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "defines the time to check a given container for readiness", "title": "Period Seconds" }, "port": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "port to access on the container", "title": "Port" }, @@ -186,17 +266,29 @@ "title": "Scheme" }, "successThreshold": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "defines the number of successes required before a given container is said to fulfil the readiness probe", "title": "Success Threshold" }, "tcp": { - "type": "boolean", + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "If enabled, the kubelet will attempt to open a socket to container. If connection is established, the container is considered healthy", "title": "TCP" }, "timeoutSeconds": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "defines the time for checking timeout", "title": "Timeout Seconds" } @@ -242,7 +334,11 @@ "title": "Arguments", "properties": { "enabled": { - "type": "boolean", + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "used for enabling/disabling aruguments", "title": "Enabled" }, @@ -270,22 +366,38 @@ "title": "Autoscaling", "properties": { "MaxReplicas": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "Maximum number of replicas allowed for scaling", "title": "Maximum Replicas" }, "MinReplicas": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "Minimum number of replicas allowed for scaling", "title": "Minimum Replicas" }, "TargetCPUUtilizationPercentage": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "The target CPU utilization that is expected for a container", "title": "TargetCPUUtilizationPercentage" }, "TargetMemoryUtilizationPercentage": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "The target memory utilization that is expected for a container", "title": "TargetMemoryUtilizationPercentage" }, @@ -295,7 +407,11 @@ "title": "Behavior" }, "enabled": { - "type": "boolean", + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "used for enabling/disabling autoscaling", "title": "Enabled" }, @@ -323,7 +439,11 @@ "title": "Command", "properties": { "enabled": { - "type": "boolean", + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "used for enabling/disabling commands" }, "value": { @@ -356,7 +476,11 @@ "title": "Db Migration Config", "properties": { "enabled": { - "type": "boolean", + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "used for enabling/disabling the config", "title": "Enabled" } @@ -465,7 +589,11 @@ "default": "" }, "enabled": { - "type": "boolean", + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "used to enable or disable ingress", "title": "Enabled" }, @@ -525,7 +653,11 @@ "default": "nginx-internal" }, "enabled": { - "type": "boolean", + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "used to enable or disable ingress", "title": "Enabled" }, @@ -619,7 +751,11 @@ } }, "pauseForSecondsBeforeSwitchActive": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "tell how much to wait for given period of time before switch active the container", "title": "Pause For Seconds Before SwitchActive" }, @@ -656,7 +792,11 @@ "title": "Raw YAML" }, "replicaCount": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "count of Replicas of pod", "title": "REplica Count" }, @@ -818,7 +958,11 @@ "title": "Volumes" }, "waitForSecondsBeforeScalingDown": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "Wait for given period of time before scaling down the container", "title": "Wait For Seconds Before Scaling Down" } diff --git a/scripts/devtron-reference-helm-charts/reference-chart_4-13-0/schema.json b/scripts/devtron-reference-helm-charts/reference-chart_4-13-0/schema.json index 48585478f7..dafb60372d 100644 --- a/scripts/devtron-reference-helm-charts/reference-chart_4-13-0/schema.json +++ b/scripts/devtron-reference-helm-charts/reference-chart_4-13-0/schema.json @@ -11,7 +11,11 @@ "type": "object", "properties": { "envoyPort": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "envoy port for the container", "title": "Envoy Port" }, @@ -26,22 +30,38 @@ "title": "Name" }, "port": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "Port", "title": "port for the container" }, "servicePort": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "port of the corresponding kubernetes service", "title": "Service Port" }, "supportStreaming": { - "type": "boolean", + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "field to enable/disable timeout for high performance protocols like grpc", "title": "Support Streaming" }, "useHTTP2": { - "type": "boolean", + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": " field for setting if envoy container can accept(or not) HTTP2 requests", "title": "Use HTTP2" } @@ -55,7 +75,11 @@ "title": "Environment Variables" }, "GracePeriod": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "time for which Kubernetes waits before terminating the pods", "title": "Grace Period" }, @@ -76,7 +100,11 @@ "title": "Command" }, "failureThreshold": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "defines the maximum number of failures that are acceptable before a given container is not considered as live", "title": "Failure Threshold" }, @@ -87,17 +115,29 @@ "title": "HTTP headers" }, "initialDelaySeconds": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "defines the time to wait before a given container is checked for liveness", "title": "Initial Delay Seconds" }, "periodSeconds": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "defines the time to check a given container for liveness", "title": "Period Seconds" }, "port": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "port to access on the container", "title": "Port" }, @@ -107,34 +147,58 @@ "title": "Scheme" }, "successThreshold": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "defines the number of successes required before a given container is said to fulfil the liveness probe", "title": "Success Threshold" }, "tcp": { - "type": "boolean", + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "If enabled, the kubelet will attempt to open a socket to container. If connection is established, the container is considered healthy", "title": "TCP" }, "timeoutSeconds": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "defines the time for checking timeout", "title": "Timeout Seconds" } } }, "MaxSurge": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "maximum number of pods that can be created over the desired number of pods", "title": "Maximum Surge" }, "MaxUnavailable": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "maximum number of pods that can be unavailable during the update process", "title": "Maximum Unavailable" }, "MinReadySeconds": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "minimum number of seconds for which a newly created Pod should be ready without any of its containers crashing, for it to be considered available", "title": "Minimum Ready Seconds" }, @@ -155,7 +219,11 @@ "title": "Command" }, "failureThreshold": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "defines the maximum number of failures that are acceptable before a given container is not considered as ready", "title": "Failure Threshold" }, @@ -166,17 +234,29 @@ "title": "HTTP headers" }, "initialDelaySeconds": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "defines the time to wait before a given container is checked for readiness", "title": "Initial Delay Seconds" }, "periodSeconds": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "defines the time to check a given container for readiness", "title": "Period Seconds" }, "port": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "port to access on the container", "title": "Port" }, @@ -186,17 +266,29 @@ "title": "Scheme" }, "successThreshold": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "defines the number of successes required before a given container is said to fulfil the readiness probe", "title": "Success Threshold" }, "tcp": { - "type": "boolean", + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "If enabled, the kubelet will attempt to open a socket to container. If connection is established, the container is considered healthy", "title": "TCP" }, "timeoutSeconds": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "defines the time for checking timeout", "title": "Timeout Seconds" } @@ -242,7 +334,11 @@ "title": "Arguments", "properties": { "enabled": { - "type": "boolean", + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "used for enabling/disabling aruguments", "title": "Enabled" }, @@ -296,22 +392,38 @@ "title": "Autoscaling", "properties": { "MaxReplicas": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "Maximum number of replicas allowed for scaling", "title": "Maximum Replicas" }, "MinReplicas": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "Minimum number of replicas allowed for scaling", "title": "Minimum Replicas" }, "TargetCPUUtilizationPercentage": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "The target CPU utilization that is expected for a container", "title": "TargetCPUUtilizationPercentage" }, "TargetMemoryUtilizationPercentage": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "The target memory utilization that is expected for a container", "title": "TargetMemoryUtilizationPercentage" }, @@ -321,7 +433,11 @@ "title": "Behavior" }, "enabled": { - "type": "boolean", + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "used for enabling/disabling autoscaling", "title": "Enabled" }, @@ -349,7 +465,11 @@ "title": "Command", "properties": { "enabled": { - "type": "boolean", + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "used for enabling/disabling commands" }, "value": { @@ -382,7 +502,11 @@ "title": "Db Migration Config", "properties": { "enabled": { - "type": "boolean", + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "used for enabling/disabling the config", "title": "Enabled" } @@ -491,7 +615,11 @@ "default": "" }, "enabled": { - "type": "boolean", + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "used to enable or disable ingress", "title": "Enabled" }, @@ -551,7 +679,11 @@ "default": "nginx-internal" }, "enabled": { - "type": "boolean", + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "used to enable or disable ingress", "title": "Enabled" }, @@ -645,7 +777,11 @@ } }, "pauseForSecondsBeforeSwitchActive": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "tell how much to wait for given period of time before switch active the container", "title": "Pause For Seconds Before SwitchActive" }, @@ -682,7 +818,11 @@ "title": "Raw YAML" }, "replicaCount": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "count of Replicas of pod", "title": "REplica Count" }, @@ -844,7 +984,11 @@ "title": "Volumes" }, "waitForSecondsBeforeScalingDown": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "Wait for given period of time before scaling down the container", "title": "Wait For Seconds Before Scaling Down" } diff --git a/scripts/devtron-reference-helm-charts/reference-chart_4-14-0/schema.json b/scripts/devtron-reference-helm-charts/reference-chart_4-14-0/schema.json index 2beea7006c..81591200f5 100644 --- a/scripts/devtron-reference-helm-charts/reference-chart_4-14-0/schema.json +++ b/scripts/devtron-reference-helm-charts/reference-chart_4-14-0/schema.json @@ -11,7 +11,11 @@ "type": "object", "properties": { "envoyPort": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "envoy port for the container", "title": "Envoy Port" }, @@ -26,22 +30,38 @@ "title": "Name" }, "port": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "Port", "title": "port for the container" }, "servicePort": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "port of the corresponding kubernetes service", "title": "Service Port" }, "supportStreaming": { - "type": "boolean", + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "field to enable/disable timeout for high performance protocols like grpc", "title": "Support Streaming" }, "useHTTP2": { - "type": "boolean", + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": " field for setting if envoy container can accept(or not) HTTP2 requests", "title": "Use HTTP2" } @@ -55,7 +75,11 @@ "title": "Environment Variables" }, "GracePeriod": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "time for which Kubernetes waits before terminating the pods", "title": "Grace Period" }, @@ -76,7 +100,11 @@ "title": "Command" }, "failureThreshold": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "defines the maximum number of failures that are acceptable before a given container is not considered as live", "title": "Failure Threshold" }, @@ -87,17 +115,29 @@ "title": "HTTP headers" }, "initialDelaySeconds": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "defines the time to wait before a given container is checked for liveness", "title": "Initial Delay Seconds" }, "periodSeconds": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "defines the time to check a given container for liveness", "title": "Period Seconds" }, "port": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "port to access on the container", "title": "Port" }, @@ -107,34 +147,58 @@ "title": "Scheme" }, "successThreshold": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "defines the number of successes required before a given container is said to fulfil the liveness probe", "title": "Success Threshold" }, "tcp": { - "type": "boolean", + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "If enabled, the kubelet will attempt to open a socket to container. If connection is established, the container is considered healthy", "title": "TCP" }, "timeoutSeconds": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "defines the time for checking timeout", "title": "Timeout Seconds" } } }, "MaxSurge": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "maximum number of pods that can be created over the desired number of pods", "title": "Maximum Surge" }, "MaxUnavailable": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "maximum number of pods that can be unavailable during the update process", "title": "Maximum Unavailable" }, "MinReadySeconds": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "minimum number of seconds for which a newly created Pod should be ready without any of its containers crashing, for it to be considered available", "title": "Minimum Ready Seconds" }, @@ -155,7 +219,11 @@ "title": "Command" }, "failureThreshold": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "defines the maximum number of failures that are acceptable before a given container is not considered as ready", "title": "Failure Threshold" }, @@ -166,17 +234,29 @@ "title": "HTTP headers" }, "initialDelaySeconds": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "defines the time to wait before a given container is checked for readiness", "title": "Initial Delay Seconds" }, "periodSeconds": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "defines the time to check a given container for readiness", "title": "Period Seconds" }, "port": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "port to access on the container", "title": "Port" }, @@ -186,17 +266,29 @@ "title": "Scheme" }, "successThreshold": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "defines the number of successes required before a given container is said to fulfil the readiness probe", "title": "Success Threshold" }, "tcp": { - "type": "boolean", + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "If enabled, the kubelet will attempt to open a socket to container. If connection is established, the container is considered healthy", "title": "TCP" }, "timeoutSeconds": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "defines the time for checking timeout", "title": "Timeout Seconds" } @@ -242,7 +334,11 @@ "title": "Arguments", "properties": { "enabled": { - "type": "boolean", + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "used for enabling/disabling aruguments", "title": "Enabled" }, @@ -296,22 +392,38 @@ "title": "Autoscaling", "properties": { "MaxReplicas": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "Maximum number of replicas allowed for scaling", "title": "Maximum Replicas" }, "MinReplicas": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "Minimum number of replicas allowed for scaling", "title": "Minimum Replicas" }, "TargetCPUUtilizationPercentage": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "The target CPU utilization that is expected for a container", "title": "TargetCPUUtilizationPercentage" }, "TargetMemoryUtilizationPercentage": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "The target memory utilization that is expected for a container", "title": "TargetMemoryUtilizationPercentage" }, @@ -321,7 +433,11 @@ "title": "Behavior" }, "enabled": { - "type": "boolean", + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "used for enabling/disabling autoscaling", "title": "Enabled" }, @@ -349,7 +465,11 @@ "title": "Command", "properties": { "enabled": { - "type": "boolean", + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "used for enabling/disabling commands" }, "value": { @@ -382,7 +502,11 @@ "title": "Db Migration Config", "properties": { "enabled": { - "type": "boolean", + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "used for enabling/disabling the config", "title": "Enabled" } @@ -491,7 +615,11 @@ "default": "" }, "enabled": { - "type": "boolean", + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "used to enable or disable ingress", "title": "Enabled" }, @@ -551,7 +679,11 @@ "default": "nginx-internal" }, "enabled": { - "type": "boolean", + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "used to enable or disable ingress", "title": "Enabled" }, @@ -645,7 +777,11 @@ } }, "pauseForSecondsBeforeSwitchActive": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "tell how much to wait for given period of time before switch active the container", "title": "Pause For Seconds Before SwitchActive" }, @@ -682,7 +818,11 @@ "title": "Raw YAML" }, "replicaCount": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "count of Replicas of pod", "title": "REplica Count" }, @@ -844,7 +984,11 @@ "title": "Volumes" }, "waitForSecondsBeforeScalingDown": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "Wait for given period of time before scaling down the container", "title": "Wait For Seconds Before Scaling Down" } diff --git a/scripts/devtron-reference-helm-charts/reference-chart_4-14-0/values.yaml b/scripts/devtron-reference-helm-charts/reference-chart_4-14-0/values.yaml index 4ef8a04163..67e33f31eb 100644 --- a/scripts/devtron-reference-helm-charts/reference-chart_4-14-0/values.yaml +++ b/scripts/devtron-reference-helm-charts/reference-chart_4-14-0/values.yaml @@ -125,9 +125,9 @@ server: image_tag: 1-95af053 image: "" -EnvVariablesFromFieldPath: -- name: POD_NAME - fieldPath: metadata.name +EnvVariablesFromFieldPath: [] +# - name: POD_NAME +# fieldPath: metadata.name EnvVariables: - name: FLASK_ENV diff --git a/scripts/devtron-reference-helm-charts/reference-chart_4-15-0/schema.json b/scripts/devtron-reference-helm-charts/reference-chart_4-15-0/schema.json index d4c636b588..081e95cb05 100644 --- a/scripts/devtron-reference-helm-charts/reference-chart_4-15-0/schema.json +++ b/scripts/devtron-reference-helm-charts/reference-chart_4-15-0/schema.json @@ -16,7 +16,11 @@ "type": "object", "properties": { "envoyPort": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "envoy port for the container", "title": "Envoy Port" }, @@ -31,22 +35,38 @@ "title": "Name" }, "port": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "Port", "title": "port for the container" }, "servicePort": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "port of the corresponding kubernetes service", "title": "Service Port" }, "supportStreaming": { - "type": "boolean", + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "field to enable/disable timeout for high performance protocols like grpc", "title": "Support Streaming" }, "useHTTP2": { - "type": "boolean", + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": " field for setting if envoy container can accept(or not) HTTP2 requests", "title": "Use HTTP2" } @@ -82,7 +102,11 @@ ] }, "GracePeriod": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "time for which Kubernetes waits before terminating the pods", "title": "Grace Period" }, @@ -103,7 +127,11 @@ "title": "Command" }, "failureThreshold": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "defines the maximum number of failures that are acceptable before a given container is not considered as live", "title": "Failure Threshold" }, @@ -114,17 +142,29 @@ "title": "HTTP headers" }, "initialDelaySeconds": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "defines the time to wait before a given container is checked for liveness", "title": "Initial Delay Seconds" }, "periodSeconds": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "defines the time to check a given container for liveness", "title": "Period Seconds" }, "port": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "port to access on the container", "title": "Port" }, @@ -134,34 +174,58 @@ "title": "Scheme" }, "successThreshold": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "defines the number of successes required before a given container is said to fulfil the liveness probe", "title": "Success Threshold" }, "tcp": { - "type": "boolean", + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "If enabled, the kubelet will attempt to open a socket to container. If connection is established, the container is considered healthy", "title": "TCP" }, "timeoutSeconds": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "defines the time for checking timeout", "title": "Timeout Seconds" } } }, "MaxSurge": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "maximum number of pods that can be created over the desired number of pods", "title": "Maximum Surge" }, "MaxUnavailable": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "maximum number of pods that can be unavailable during the update process", "title": "Maximum Unavailable" }, "MinReadySeconds": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "minimum number of seconds for which a newly created Pod should be ready without any of its containers crashing, for it to be considered available", "title": "Minimum Ready Seconds" }, @@ -182,7 +246,11 @@ "title": "Command" }, "failureThreshold": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "defines the maximum number of failures that are acceptable before a given container is not considered as ready", "title": "Failure Threshold" }, @@ -193,17 +261,29 @@ "title": "HTTP headers" }, "initialDelaySeconds": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "defines the time to wait before a given container is checked for readiness", "title": "Initial Delay Seconds" }, "periodSeconds": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "defines the time to check a given container for readiness", "title": "Period Seconds" }, "port": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "port to access on the container", "title": "Port" }, @@ -213,17 +293,29 @@ "title": "Scheme" }, "successThreshold": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "defines the number of successes required before a given container is said to fulfil the readiness probe", "title": "Success Threshold" }, "tcp": { - "type": "boolean", + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "If enabled, the kubelet will attempt to open a socket to container. If connection is established, the container is considered healthy", "title": "TCP" }, "timeoutSeconds": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "defines the time for checking timeout", "title": "Timeout Seconds" } @@ -269,7 +361,11 @@ "title": "Arguments", "properties": { "enabled": { - "type": "boolean", + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "used for enabling/disabling aruguments", "title": "Enabled" }, @@ -297,22 +393,38 @@ "title": "Autoscaling", "properties": { "MaxReplicas": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "Maximum number of replicas allowed for scaling", "title": "Maximum Replicas" }, "MinReplicas": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "Minimum number of replicas allowed for scaling", "title": "Minimum Replicas" }, "TargetCPUUtilizationPercentage": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "The target CPU utilization that is expected for a container", "title": "TargetCPUUtilizationPercentage" }, "TargetMemoryUtilizationPercentage": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "The target memory utilization that is expected for a container", "title": "TargetMemoryUtilizationPercentage" }, @@ -322,7 +434,11 @@ "title": "Behavior" }, "enabled": { - "type": "boolean", + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "used for enabling/disabling autoscaling", "title": "Enabled" }, @@ -350,7 +466,11 @@ "title": "Command", "properties": { "enabled": { - "type": "boolean", + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "used for enabling/disabling commands" }, "value": { @@ -383,7 +503,11 @@ "title": "Db Migration Config", "properties": { "enabled": { - "type": "boolean", + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "used for enabling/disabling the config", "title": "Enabled" } @@ -538,7 +662,11 @@ "default": "" }, "enabled": { - "type": "boolean", + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "used to enable or disable ingress", "title": "Enabled" }, @@ -598,7 +726,11 @@ "default": "nginx-internal" }, "enabled": { - "type": "boolean", + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "used to enable or disable ingress", "title": "Enabled" }, @@ -719,7 +851,11 @@ } }, "pauseForSecondsBeforeSwitchActive": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "tell how much to wait for given period of time before switch active the container", "title": "Pause For Seconds Before SwitchActive" }, @@ -777,7 +913,11 @@ "title": "Raw YAML" }, "replicaCount": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "count of Replicas of pod", "title": "REplica Count" }, @@ -939,7 +1079,11 @@ "title": "Volumes" }, "waitForSecondsBeforeScalingDown": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "Wait for given period of time before scaling down the container", "title": "Wait For Seconds Before Scaling Down" } diff --git a/scripts/devtron-reference-helm-charts/reference-chart_4-15-0/values.yaml b/scripts/devtron-reference-helm-charts/reference-chart_4-15-0/values.yaml index 28c9563974..2a8b11d47c 100644 --- a/scripts/devtron-reference-helm-charts/reference-chart_4-15-0/values.yaml +++ b/scripts/devtron-reference-helm-charts/reference-chart_4-15-0/values.yaml @@ -125,9 +125,9 @@ server: image_tag: 1-95af053 image: "" -EnvVariablesFromFieldPath: -- name: POD_NAME - fieldPath: metadata.name +EnvVariablesFromFieldPath: [] +# - name: POD_NAME +# fieldPath: metadata.name EnvVariables: - name: FLASK_ENV diff --git a/scripts/devtron-reference-helm-charts/reference-chart_4-16-0/schema.json b/scripts/devtron-reference-helm-charts/reference-chart_4-16-0/schema.json index d4c636b588..081e95cb05 100644 --- a/scripts/devtron-reference-helm-charts/reference-chart_4-16-0/schema.json +++ b/scripts/devtron-reference-helm-charts/reference-chart_4-16-0/schema.json @@ -16,7 +16,11 @@ "type": "object", "properties": { "envoyPort": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "envoy port for the container", "title": "Envoy Port" }, @@ -31,22 +35,38 @@ "title": "Name" }, "port": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "Port", "title": "port for the container" }, "servicePort": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "port of the corresponding kubernetes service", "title": "Service Port" }, "supportStreaming": { - "type": "boolean", + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "field to enable/disable timeout for high performance protocols like grpc", "title": "Support Streaming" }, "useHTTP2": { - "type": "boolean", + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": " field for setting if envoy container can accept(or not) HTTP2 requests", "title": "Use HTTP2" } @@ -82,7 +102,11 @@ ] }, "GracePeriod": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "time for which Kubernetes waits before terminating the pods", "title": "Grace Period" }, @@ -103,7 +127,11 @@ "title": "Command" }, "failureThreshold": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "defines the maximum number of failures that are acceptable before a given container is not considered as live", "title": "Failure Threshold" }, @@ -114,17 +142,29 @@ "title": "HTTP headers" }, "initialDelaySeconds": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "defines the time to wait before a given container is checked for liveness", "title": "Initial Delay Seconds" }, "periodSeconds": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "defines the time to check a given container for liveness", "title": "Period Seconds" }, "port": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "port to access on the container", "title": "Port" }, @@ -134,34 +174,58 @@ "title": "Scheme" }, "successThreshold": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "defines the number of successes required before a given container is said to fulfil the liveness probe", "title": "Success Threshold" }, "tcp": { - "type": "boolean", + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "If enabled, the kubelet will attempt to open a socket to container. If connection is established, the container is considered healthy", "title": "TCP" }, "timeoutSeconds": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "defines the time for checking timeout", "title": "Timeout Seconds" } } }, "MaxSurge": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "maximum number of pods that can be created over the desired number of pods", "title": "Maximum Surge" }, "MaxUnavailable": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "maximum number of pods that can be unavailable during the update process", "title": "Maximum Unavailable" }, "MinReadySeconds": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "minimum number of seconds for which a newly created Pod should be ready without any of its containers crashing, for it to be considered available", "title": "Minimum Ready Seconds" }, @@ -182,7 +246,11 @@ "title": "Command" }, "failureThreshold": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "defines the maximum number of failures that are acceptable before a given container is not considered as ready", "title": "Failure Threshold" }, @@ -193,17 +261,29 @@ "title": "HTTP headers" }, "initialDelaySeconds": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "defines the time to wait before a given container is checked for readiness", "title": "Initial Delay Seconds" }, "periodSeconds": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "defines the time to check a given container for readiness", "title": "Period Seconds" }, "port": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "port to access on the container", "title": "Port" }, @@ -213,17 +293,29 @@ "title": "Scheme" }, "successThreshold": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "defines the number of successes required before a given container is said to fulfil the readiness probe", "title": "Success Threshold" }, "tcp": { - "type": "boolean", + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "If enabled, the kubelet will attempt to open a socket to container. If connection is established, the container is considered healthy", "title": "TCP" }, "timeoutSeconds": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "defines the time for checking timeout", "title": "Timeout Seconds" } @@ -269,7 +361,11 @@ "title": "Arguments", "properties": { "enabled": { - "type": "boolean", + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "used for enabling/disabling aruguments", "title": "Enabled" }, @@ -297,22 +393,38 @@ "title": "Autoscaling", "properties": { "MaxReplicas": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "Maximum number of replicas allowed for scaling", "title": "Maximum Replicas" }, "MinReplicas": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "Minimum number of replicas allowed for scaling", "title": "Minimum Replicas" }, "TargetCPUUtilizationPercentage": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "The target CPU utilization that is expected for a container", "title": "TargetCPUUtilizationPercentage" }, "TargetMemoryUtilizationPercentage": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "The target memory utilization that is expected for a container", "title": "TargetMemoryUtilizationPercentage" }, @@ -322,7 +434,11 @@ "title": "Behavior" }, "enabled": { - "type": "boolean", + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "used for enabling/disabling autoscaling", "title": "Enabled" }, @@ -350,7 +466,11 @@ "title": "Command", "properties": { "enabled": { - "type": "boolean", + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "used for enabling/disabling commands" }, "value": { @@ -383,7 +503,11 @@ "title": "Db Migration Config", "properties": { "enabled": { - "type": "boolean", + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "used for enabling/disabling the config", "title": "Enabled" } @@ -538,7 +662,11 @@ "default": "" }, "enabled": { - "type": "boolean", + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "used to enable or disable ingress", "title": "Enabled" }, @@ -598,7 +726,11 @@ "default": "nginx-internal" }, "enabled": { - "type": "boolean", + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "used to enable or disable ingress", "title": "Enabled" }, @@ -719,7 +851,11 @@ } }, "pauseForSecondsBeforeSwitchActive": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "tell how much to wait for given period of time before switch active the container", "title": "Pause For Seconds Before SwitchActive" }, @@ -777,7 +913,11 @@ "title": "Raw YAML" }, "replicaCount": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "count of Replicas of pod", "title": "REplica Count" }, @@ -939,7 +1079,11 @@ "title": "Volumes" }, "waitForSecondsBeforeScalingDown": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "Wait for given period of time before scaling down the container", "title": "Wait For Seconds Before Scaling Down" } diff --git a/scripts/devtron-reference-helm-charts/reference-chart_4-16-0/values.yaml b/scripts/devtron-reference-helm-charts/reference-chart_4-16-0/values.yaml index 28c9563974..2a8b11d47c 100644 --- a/scripts/devtron-reference-helm-charts/reference-chart_4-16-0/values.yaml +++ b/scripts/devtron-reference-helm-charts/reference-chart_4-16-0/values.yaml @@ -125,9 +125,9 @@ server: image_tag: 1-95af053 image: "" -EnvVariablesFromFieldPath: -- name: POD_NAME - fieldPath: metadata.name +EnvVariablesFromFieldPath: [] +# - name: POD_NAME +# fieldPath: metadata.name EnvVariables: - name: FLASK_ENV diff --git a/scripts/devtron-reference-helm-charts/reference-chart_4-17-0/schema.json b/scripts/devtron-reference-helm-charts/reference-chart_4-17-0/schema.json index ef3cb091de..214a2baf6b 100644 --- a/scripts/devtron-reference-helm-charts/reference-chart_4-17-0/schema.json +++ b/scripts/devtron-reference-helm-charts/reference-chart_4-17-0/schema.json @@ -16,7 +16,11 @@ "type": "object", "properties": { "envoyPort": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "envoy port for the container", "title": "Envoy Port" }, @@ -31,27 +35,47 @@ "title": "Name" }, "port": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "Port", "title": "port for the container" }, "servicePort": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "port of the corresponding kubernetes service", "title": "Service Port" }, "nodePort": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "nodeport of the corresponding kubernetes service", "title": "Node Port" }, "supportStreaming": { - "type": "boolean", + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "field to enable/disable timeout for high performance protocols like grpc", "title": "Support Streaming" }, "useHTTP2": { - "type": "boolean", + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": " field for setting if envoy container can accept(or not) HTTP2 requests", "title": "Use HTTP2" } @@ -87,7 +111,11 @@ ] }, "GracePeriod": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "time for which Kubernetes waits before terminating the pods", "title": "Grace Period" }, @@ -108,7 +136,11 @@ "title": "Command" }, "failureThreshold": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "defines the maximum number of failures that are acceptable before a given container is not considered as live", "title": "Failure Threshold" }, @@ -119,17 +151,29 @@ "title": "HTTP headers" }, "initialDelaySeconds": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "defines the time to wait before a given container is checked for liveness", "title": "Initial Delay Seconds" }, "periodSeconds": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "defines the time to check a given container for liveness", "title": "Period Seconds" }, "port": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "port to access on the container", "title": "Port" }, @@ -139,34 +183,58 @@ "title": "Scheme" }, "successThreshold": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "defines the number of successes required before a given container is said to fulfil the liveness probe", "title": "Success Threshold" }, "tcp": { - "type": "boolean", + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "If enabled, the kubelet will attempt to open a socket to container. If connection is established, the container is considered healthy", "title": "TCP" }, "timeoutSeconds": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "defines the time for checking timeout", "title": "Timeout Seconds" } } }, "MaxSurge": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "maximum number of pods that can be created over the desired number of pods", "title": "Maximum Surge" }, "MaxUnavailable": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "maximum number of pods that can be unavailable during the update process", "title": "Maximum Unavailable" }, "MinReadySeconds": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "minimum number of seconds for which a newly created Pod should be ready without any of its containers crashing, for it to be considered available", "title": "Minimum Ready Seconds" }, @@ -187,7 +255,11 @@ "title": "Command" }, "failureThreshold": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "defines the maximum number of failures that are acceptable before a given container is not considered as ready", "title": "Failure Threshold" }, @@ -198,17 +270,29 @@ "title": "HTTP headers" }, "initialDelaySeconds": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "defines the time to wait before a given container is checked for readiness", "title": "Initial Delay Seconds" }, "periodSeconds": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "defines the time to check a given container for readiness", "title": "Period Seconds" }, "port": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "port to access on the container", "title": "Port" }, @@ -218,17 +302,29 @@ "title": "Scheme" }, "successThreshold": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "defines the number of successes required before a given container is said to fulfil the readiness probe", "title": "Success Threshold" }, "tcp": { - "type": "boolean", + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "If enabled, the kubelet will attempt to open a socket to container. If connection is established, the container is considered healthy", "title": "TCP" }, "timeoutSeconds": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "defines the time for checking timeout", "title": "Timeout Seconds" } @@ -284,12 +380,20 @@ "title": "CORS" }, "enabled": { - "type": "boolean", + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "used to specify whether to create an ambassador mapping or not", "title": "Enabled" }, "weight": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "used to specify weight for canary ambassador mappings" }, "hostname": { @@ -335,7 +439,11 @@ "title": "Arguments", "properties": { "enabled": { - "type": "boolean", + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "used for enabling/disabling aruguments", "title": "Enabled" }, @@ -363,22 +471,38 @@ "title": "Autoscaling", "properties": { "MaxReplicas": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "Maximum number of replicas allowed for scaling", "title": "Maximum Replicas" }, "MinReplicas": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "Minimum number of replicas allowed for scaling", "title": "Minimum Replicas" }, "TargetCPUUtilizationPercentage": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "The target CPU utilization that is expected for a container", "title": "TargetCPUUtilizationPercentage" }, "TargetMemoryUtilizationPercentage": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "The target memory utilization that is expected for a container", "title": "TargetMemoryUtilizationPercentage" }, @@ -388,7 +512,11 @@ "title": "Behavior" }, "enabled": { - "type": "boolean", + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "used for enabling/disabling autoscaling", "title": "Enabled" }, @@ -416,7 +544,11 @@ "title": "Command", "properties": { "enabled": { - "type": "boolean", + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "used for enabling/disabling commands" }, "value": { @@ -449,7 +581,11 @@ "title": "Db Migration Config", "properties": { "enabled": { - "type": "boolean", + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "used for enabling/disabling the config", "title": "Enabled" } @@ -604,7 +740,11 @@ "default": "" }, "enabled": { - "type": "boolean", + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "used to enable or disable ingress", "title": "Enabled" }, @@ -664,7 +804,11 @@ "default": "nginx-internal" }, "enabled": { - "type": "boolean", + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "used to enable or disable ingress", "title": "Enabled" }, @@ -785,7 +929,11 @@ } }, "pauseForSecondsBeforeSwitchActive": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "tell how much to wait for given period of time before switch active the container", "title": "Pause For Seconds Before SwitchActive" }, @@ -843,7 +991,11 @@ "title": "Raw YAML" }, "replicaCount": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "count of Replicas of pod", "title": "REplica Count" }, @@ -1005,7 +1157,11 @@ "title": "Volumes" }, "waitForSecondsBeforeScalingDown": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "Wait for given period of time before scaling down the container", "title": "Wait For Seconds Before Scaling Down" } diff --git a/scripts/devtron-reference-helm-charts/reference-chart_4-17-0/values.yaml b/scripts/devtron-reference-helm-charts/reference-chart_4-17-0/values.yaml index 5f31067eb3..d8f2cd026f 100644 --- a/scripts/devtron-reference-helm-charts/reference-chart_4-17-0/values.yaml +++ b/scripts/devtron-reference-helm-charts/reference-chart_4-17-0/values.yaml @@ -125,9 +125,9 @@ server: image_tag: 1-95af053 image: "" -EnvVariablesFromFieldPath: -- name: POD_NAME - fieldPath: metadata.name +EnvVariablesFromFieldPath: [] +# - name: POD_NAME +# fieldPath: metadata.name EnvVariables: - name: FLASK_ENV diff --git a/scripts/devtron-reference-helm-charts/reference-chart_4-18-0/schema.json b/scripts/devtron-reference-helm-charts/reference-chart_4-18-0/schema.json index 2b9778bab0..da5cce59ea 100644 --- a/scripts/devtron-reference-helm-charts/reference-chart_4-18-0/schema.json +++ b/scripts/devtron-reference-helm-charts/reference-chart_4-18-0/schema.json @@ -15,7 +15,11 @@ "type": "object", "properties": { "envoyPort": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "envoy port for the container", "title": "Envoy Port" }, @@ -30,27 +34,47 @@ "title": "Name" }, "port": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "Port", "title": "port for the container" }, "servicePort": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "port of the corresponding kubernetes service", "title": "Service Port" }, "nodePort": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "nodeport of the corresponding kubernetes service", "title": "Node Port" }, "supportStreaming": { - "type": "boolean", + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "field to enable/disable timeout for high performance protocols like grpc", "title": "Support Streaming" }, "useHTTP2": { - "type": "boolean", + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": " field for setting if envoy container can accept(or not) HTTP2 requests", "title": "Use HTTP2" } @@ -140,7 +164,11 @@ ] }, "GracePeriod": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "time for which Kubernetes waits before terminating the pods", "title": "Grace Period" }, @@ -161,7 +189,11 @@ "title": "Command" }, "failureThreshold": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "defines the maximum number of failures that are acceptable before a given container is not considered as live", "title": "Failure Threshold" }, @@ -172,17 +204,29 @@ "title": "HTTP headers" }, "initialDelaySeconds": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "defines the time to wait before a given container is checked for liveness", "title": "Initial Delay Seconds" }, "periodSeconds": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "defines the time to check a given container for liveness", "title": "Period Seconds" }, "port": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "port to access on the container", "title": "Port" }, @@ -192,34 +236,58 @@ "title": "Scheme" }, "successThreshold": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "defines the number of successes required before a given container is said to fulfil the liveness probe", "title": "Success Threshold" }, "tcp": { - "type": "boolean", + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "If enabled, the kubelet will attempt to open a socket to container. If connection is established, the container is considered healthy", "title": "TCP" }, "timeoutSeconds": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "defines the time for checking timeout", "title": "Timeout Seconds" } } }, "MaxSurge": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "maximum number of pods that can be created over the desired number of pods", "title": "Maximum Surge" }, "MaxUnavailable": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "maximum number of pods that can be unavailable during the update process", "title": "Maximum Unavailable" }, "MinReadySeconds": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "minimum number of seconds for which a newly created Pod should be ready without any of its containers crashing, for it to be considered available", "title": "Minimum Ready Seconds" }, @@ -240,7 +308,11 @@ "title": "Command" }, "failureThreshold": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "defines the maximum number of failures that are acceptable before a given container is not considered as ready", "title": "Failure Threshold" }, @@ -251,17 +323,29 @@ "title": "HTTP headers" }, "initialDelaySeconds": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "defines the time to wait before a given container is checked for readiness", "title": "Initial Delay Seconds" }, "periodSeconds": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "defines the time to check a given container for readiness", "title": "Period Seconds" }, "port": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "port to access on the container", "title": "Port" }, @@ -271,17 +355,29 @@ "title": "Scheme" }, "successThreshold": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "defines the number of successes required before a given container is said to fulfil the readiness probe", "title": "Success Threshold" }, "tcp": { - "type": "boolean", + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "If enabled, the kubelet will attempt to open a socket to container. If connection is established, the container is considered healthy", "title": "TCP" }, "timeoutSeconds": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "defines the time for checking timeout", "title": "Timeout Seconds" } @@ -337,12 +433,20 @@ "title": "CORS" }, "enabled": { - "type": "boolean", + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "used to specify whether to create an ambassador mapping or not", "title": "Enabled" }, "weight": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "used to specify weight for canary ambassador mappings" }, "hostname": { @@ -388,7 +492,11 @@ "title": "Arguments", "properties": { "enabled": { - "type": "boolean", + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "used for enabling/disabling aruguments", "title": "Enabled" }, @@ -416,22 +524,38 @@ "title": "Autoscaling", "properties": { "MaxReplicas": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "Maximum number of replicas allowed for scaling", "title": "Maximum Replicas" }, "MinReplicas": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "Minimum number of replicas allowed for scaling", "title": "Minimum Replicas" }, "TargetCPUUtilizationPercentage": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "The target CPU utilization that is expected for a container", "title": "TargetCPUUtilizationPercentage" }, "TargetMemoryUtilizationPercentage": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "The target memory utilization that is expected for a container", "title": "TargetMemoryUtilizationPercentage" }, @@ -441,7 +565,11 @@ "title": "Behavior" }, "enabled": { - "type": "boolean", + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "used for enabling/disabling autoscaling", "title": "Enabled" }, @@ -469,7 +597,11 @@ "title": "Command", "properties": { "enabled": { - "type": "boolean", + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "used for enabling/disabling commands" }, "value": { @@ -502,7 +634,11 @@ "title": "Db Migration Config", "properties": { "enabled": { - "type": "boolean", + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "used for enabling/disabling the config", "title": "Enabled" } @@ -664,7 +800,11 @@ "default": "" }, "enabled": { - "type": "boolean", + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "used to enable or disable ingress", "title": "Enabled" }, @@ -735,7 +875,11 @@ "default": "" }, "enabled": { - "type": "boolean", + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "used to enable or disable ingress", "title": "Enabled" }, @@ -795,7 +939,11 @@ "default": "nginx-internal" }, "enabled": { - "type": "boolean", + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "used to enable or disable ingress", "title": "Enabled" }, @@ -966,7 +1114,11 @@ } }, "pauseForSecondsBeforeSwitchActive": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "tell how much to wait for given period of time before switch active the container", "title": "Pause For Seconds Before SwitchActive" }, @@ -1024,7 +1176,11 @@ "title": "Raw YAML" }, "replicaCount": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "count of Replicas of pod", "title": "REplica Count" }, @@ -1130,7 +1286,11 @@ ] }, "enabled": { - "type": "boolean", + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "used to enable or disable service", "title": "Enabled" } @@ -1191,7 +1351,11 @@ "title": "Volumes" }, "waitForSecondsBeforeScalingDown": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "Wait for given period of time before scaling down the container", "title": "Wait For Seconds Before Scaling Down" } diff --git a/scripts/devtron-reference-helm-charts/reference-chart_5-0-0/schema.json b/scripts/devtron-reference-helm-charts/reference-chart_5-0-0/schema.json index 2b9778bab0..2a43e937cd 100644 --- a/scripts/devtron-reference-helm-charts/reference-chart_5-0-0/schema.json +++ b/scripts/devtron-reference-helm-charts/reference-chart_5-0-0/schema.json @@ -15,7 +15,11 @@ "type": "object", "properties": { "envoyPort": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "envoy port for the container", "title": "Envoy Port" }, @@ -30,27 +34,47 @@ "title": "Name" }, "port": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "Port", "title": "port for the container" }, "servicePort": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "port of the corresponding kubernetes service", "title": "Service Port" }, "nodePort": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "nodeport of the corresponding kubernetes service", "title": "Node Port" }, "supportStreaming": { - "type": "boolean", + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "field to enable/disable timeout for high performance protocols like grpc", "title": "Support Streaming" }, "useHTTP2": { - "type": "boolean", + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": " field for setting if envoy container can accept(or not) HTTP2 requests", "title": "Use HTTP2" } @@ -140,7 +164,11 @@ ] }, "GracePeriod": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "time for which Kubernetes waits before terminating the pods", "title": "Grace Period" }, @@ -161,7 +189,11 @@ "title": "Command" }, "failureThreshold": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "defines the maximum number of failures that are acceptable before a given container is not considered as live", "title": "Failure Threshold" }, @@ -172,17 +204,29 @@ "title": "HTTP headers" }, "initialDelaySeconds": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "defines the time to wait before a given container is checked for liveness", "title": "Initial Delay Seconds" }, "periodSeconds": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "defines the time to check a given container for liveness", "title": "Period Seconds" }, "port": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "port to access on the container", "title": "Port" }, @@ -192,34 +236,58 @@ "title": "Scheme" }, "successThreshold": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "defines the number of successes required before a given container is said to fulfil the liveness probe", "title": "Success Threshold" }, "tcp": { - "type": "boolean", + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "If enabled, the kubelet will attempt to open a socket to container. If connection is established, the container is considered healthy", "title": "TCP" }, "timeoutSeconds": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "defines the time for checking timeout", "title": "Timeout Seconds" } } }, "MaxSurge": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "maximum number of pods that can be created over the desired number of pods", "title": "Maximum Surge" }, "MaxUnavailable": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "maximum number of pods that can be unavailable during the update process", "title": "Maximum Unavailable" }, "MinReadySeconds": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "minimum number of seconds for which a newly created Pod should be ready without any of its containers crashing, for it to be considered available", "title": "Minimum Ready Seconds" }, @@ -240,7 +308,11 @@ "title": "Command" }, "failureThreshold": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "defines the maximum number of failures that are acceptable before a given container is not considered as ready", "title": "Failure Threshold" }, @@ -251,17 +323,29 @@ "title": "HTTP headers" }, "initialDelaySeconds": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "defines the time to wait before a given container is checked for readiness", "title": "Initial Delay Seconds" }, "periodSeconds": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "defines the time to check a given container for readiness", "title": "Period Seconds" }, "port": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "port to access on the container", "title": "Port" }, @@ -271,17 +355,29 @@ "title": "Scheme" }, "successThreshold": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "defines the number of successes required before a given container is said to fulfil the readiness probe", "title": "Success Threshold" }, "tcp": { - "type": "boolean", + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "If enabled, the kubelet will attempt to open a socket to container. If connection is established, the container is considered healthy", "title": "TCP" }, "timeoutSeconds": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "defines the time for checking timeout", "title": "Timeout Seconds" } @@ -337,12 +433,20 @@ "title": "CORS" }, "enabled": { - "type": "boolean", + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "used to specify whether to create an ambassador mapping or not", "title": "Enabled" }, "weight": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "used to specify weight for canary ambassador mappings" }, "hostname": { @@ -388,7 +492,11 @@ "title": "Arguments", "properties": { "enabled": { - "type": "boolean", + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "used for enabling/disabling aruguments", "title": "Enabled" }, @@ -416,22 +524,38 @@ "title": "Autoscaling", "properties": { "MaxReplicas": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "Maximum number of replicas allowed for scaling", "title": "Maximum Replicas" }, "MinReplicas": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "Minimum number of replicas allowed for scaling", "title": "Minimum Replicas" }, "TargetCPUUtilizationPercentage": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "The target CPU utilization that is expected for a container", "title": "TargetCPUUtilizationPercentage" }, "TargetMemoryUtilizationPercentage": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "The target memory utilization that is expected for a container", "title": "TargetMemoryUtilizationPercentage" }, @@ -441,7 +565,11 @@ "title": "Behavior" }, "enabled": { - "type": "boolean", + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "used for enabling/disabling autoscaling", "title": "Enabled" }, @@ -469,7 +597,11 @@ "title": "Command", "properties": { "enabled": { - "type": "boolean", + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "used for enabling/disabling commands" }, "value": { @@ -502,7 +634,11 @@ "title": "Db Migration Config", "properties": { "enabled": { - "type": "boolean", + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "used for enabling/disabling the config", "title": "Enabled" } @@ -664,7 +800,11 @@ "default": "" }, "enabled": { - "type": "boolean", + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "used to enable or disable ingress", "title": "Enabled" }, @@ -735,7 +875,11 @@ "default": "" }, "enabled": { - "type": "boolean", + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "used to enable or disable ingress", "title": "Enabled" }, @@ -795,7 +939,11 @@ "default": "nginx-internal" }, "enabled": { - "type": "boolean", + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "used to enable or disable ingress", "title": "Enabled" }, @@ -966,7 +1114,11 @@ } }, "pauseForSecondsBeforeSwitchActive": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "tell how much to wait for given period of time before switch active the container", "title": "Pause For Seconds Before SwitchActive" }, @@ -1024,7 +1176,11 @@ "title": "Raw YAML" }, "replicaCount": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "count of Replicas of pod", "title": "REplica Count" }, @@ -1130,7 +1286,11 @@ ] }, "enabled": { - "type": "boolean", + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "used to enable or disable service", "title": "Enabled" } @@ -1191,7 +1351,11 @@ "title": "Volumes" }, "waitForSecondsBeforeScalingDown": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "Wait for given period of time before scaling down the container", "title": "Wait For Seconds Before Scaling Down" } diff --git a/scripts/devtron-reference-helm-charts/statefulset-chart_4-18-0/schema.json b/scripts/devtron-reference-helm-charts/statefulset-chart_4-18-0/schema.json index 25935a51ef..da40c1753a 100644 --- a/scripts/devtron-reference-helm-charts/statefulset-chart_4-18-0/schema.json +++ b/scripts/devtron-reference-helm-charts/statefulset-chart_4-18-0/schema.json @@ -16,7 +16,11 @@ "type": "object", "properties": { "envoyPort": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "envoy port for the container", "title": "Envoy Port" }, @@ -31,27 +35,47 @@ "title": "Name" }, "port": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "Port", "title": "port for the container" }, "servicePort": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "port of the corresponding kubernetes service", "title": "Service Port" }, "nodePort": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "nodeport of the corresponding kubernetes service", "title": "Node Port" }, "supportStreaming": { - "type": "boolean", + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "field to enable/disable timeout for high performance protocols like grpc", "title": "Support Streaming" }, "useHTTP2": { - "type": "boolean", + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": " field for setting if envoy container can accept(or not) HTTP2 requests", "title": "Use HTTP2" } @@ -84,7 +108,11 @@ "description": "used to provide mounts to the volume" }, "revisionHistoryLimit":{ - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "title": "RevisionHistoryLimit", "description": "revisionHistoryLimit is the maximum number of revisions that will bemaintained in the StatefulSet's revision history." }, @@ -381,7 +409,11 @@ ] }, "GracePeriod": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "time for which Kubernetes waits before terminating the pods", "title": "Grace Period" }, @@ -402,7 +434,11 @@ "title": "Command" }, "failureThreshold": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "defines the maximum number of failures that are acceptable before a given container is not considered as live", "title": "Failure Threshold" }, @@ -413,17 +449,29 @@ "title": "HTTP headers" }, "initialDelaySeconds": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "defines the time to wait before a given container is checked for liveness", "title": "Initial Delay Seconds" }, "periodSeconds": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "defines the time to check a given container for liveness", "title": "Period Seconds" }, "port": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "port to access on the container", "title": "Port" }, @@ -433,34 +481,58 @@ "title": "Scheme" }, "successThreshold": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "defines the number of successes required before a given container is said to fulfil the liveness probe", "title": "Success Threshold" }, "tcp": { - "type": "boolean", + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "If enabled, the kubelet will attempt to open a socket to container. If connection is established, the container is considered healthy", "title": "TCP" }, "timeoutSeconds": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "defines the time for checking timeout", "title": "Timeout Seconds" } } }, "MaxSurge": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "maximum number of pods that can be created over the desired number of pods", "title": "Maximum Surge" }, "MaxUnavailable": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "maximum number of pods that can be unavailable during the update process", "title": "Maximum Unavailable" }, "MinReadySeconds": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "minimum number of seconds for which a newly created Pod should be ready without any of its containers crashing, for it to be considered available", "title": "Minimum Ready Seconds" }, @@ -481,7 +553,11 @@ "title": "Command" }, "failureThreshold": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "defines the maximum number of failures that are acceptable before a given container is not considered as ready", "title": "Failure Threshold" }, @@ -492,17 +568,29 @@ "title": "HTTP headers" }, "initialDelaySeconds": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "defines the time to wait before a given container is checked for readiness", "title": "Initial Delay Seconds" }, "periodSeconds": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "defines the time to check a given container for readiness", "title": "Period Seconds" }, "port": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "port to access on the container", "title": "Port" }, @@ -512,17 +600,29 @@ "title": "Scheme" }, "successThreshold": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "defines the number of successes required before a given container is said to fulfil the readiness probe", "title": "Success Threshold" }, "tcp": { - "type": "boolean", + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "If enabled, the kubelet will attempt to open a socket to container. If connection is established, the container is considered healthy", "title": "TCP" }, "timeoutSeconds": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "defines the time for checking timeout", "title": "Timeout Seconds" } @@ -578,12 +678,20 @@ "title": "CORS" }, "enabled": { - "type": "boolean", + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "used to specify whether to create an ambassador mapping or not", "title": "Enabled" }, "weight": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "used to specify weight for canary ambassador mappings" }, "hostname": { @@ -629,7 +737,11 @@ "title": "Arguments", "properties": { "enabled": { - "type": "boolean", + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "used for enabling/disabling aruguments", "title": "Enabled" }, @@ -657,22 +769,38 @@ "title": "Autoscaling", "properties": { "MaxReplicas": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "Maximum number of replicas allowed for scaling", "title": "Maximum Replicas" }, "MinReplicas": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "Minimum number of replicas allowed for scaling", "title": "Minimum Replicas" }, "TargetCPUUtilizationPercentage": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "The target CPU utilization that is expected for a container", "title": "TargetCPUUtilizationPercentage" }, "TargetMemoryUtilizationPercentage": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "The target memory utilization that is expected for a container", "title": "TargetMemoryUtilizationPercentage" }, @@ -682,7 +810,11 @@ "title": "Behavior" }, "enabled": { - "type": "boolean", + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "used for enabling/disabling autoscaling", "title": "Enabled" }, @@ -710,7 +842,11 @@ "title": "Command", "properties": { "enabled": { - "type": "boolean", + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "used for enabling/disabling commands" }, "value": { @@ -743,7 +879,11 @@ "title": "Db Migration Config", "properties": { "enabled": { - "type": "boolean", + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "used for enabling/disabling the config", "title": "Enabled" } @@ -892,7 +1032,11 @@ "default": "" }, "enabled": { - "type": "boolean", + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "used to enable or disable ingress", "title": "Enabled" }, @@ -963,7 +1107,11 @@ "default": "" }, "enabled": { - "type": "boolean", + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "used to enable or disable ingress", "title": "Enabled" }, @@ -1023,7 +1171,11 @@ "default": "nginx-internal" }, "enabled": { - "type": "boolean", + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "used to enable or disable ingress", "title": "Enabled" }, @@ -1144,7 +1296,11 @@ } }, "pauseForSecondsBeforeSwitchActive": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "tell how much to wait for given period of time before switch active the container", "title": "Pause For Seconds Before SwitchActive" }, @@ -1202,7 +1358,11 @@ "title": "Raw YAML" }, "replicaCount": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "count of Replicas of pod", "title": "REplica Count" }, @@ -1364,7 +1524,11 @@ "title": "Volumes" }, "waitForSecondsBeforeScalingDown": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "Wait for given period of time before scaling down the container", "title": "Wait For Seconds Before Scaling Down" } diff --git a/scripts/devtron-reference-helm-charts/statefulset-chart_4-18-0/values.yaml b/scripts/devtron-reference-helm-charts/statefulset-chart_4-18-0/values.yaml index a0eaca9949..b58cc8d715 100644 --- a/scripts/devtron-reference-helm-charts/statefulset-chart_4-18-0/values.yaml +++ b/scripts/devtron-reference-helm-charts/statefulset-chart_4-18-0/values.yaml @@ -137,9 +137,9 @@ server: image_tag: 1-95af053 image: "" -EnvVariablesFromFieldPath: -- name: POD_NAME - fieldPath: metadata.name +EnvVariablesFromFieldPath: [] +# - name: POD_NAME +# fieldPath: metadata.name EnvVariables: - name: FLASK_ENV diff --git a/scripts/devtron-reference-helm-charts/statefulset-chart_5-0-0/schema.json b/scripts/devtron-reference-helm-charts/statefulset-chart_5-0-0/schema.json index 25935a51ef..672df0a42e 100644 --- a/scripts/devtron-reference-helm-charts/statefulset-chart_5-0-0/schema.json +++ b/scripts/devtron-reference-helm-charts/statefulset-chart_5-0-0/schema.json @@ -16,7 +16,11 @@ "type": "object", "properties": { "envoyPort": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "envoy port for the container", "title": "Envoy Port" }, @@ -31,27 +35,47 @@ "title": "Name" }, "port": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "Port", "title": "port for the container" }, "servicePort": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "port of the corresponding kubernetes service", "title": "Service Port" }, "nodePort": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "nodeport of the corresponding kubernetes service", "title": "Node Port" }, "supportStreaming": { - "type": "boolean", + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "field to enable/disable timeout for high performance protocols like grpc", "title": "Support Streaming" }, "useHTTP2": { - "type": "boolean", + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": " field for setting if envoy container can accept(or not) HTTP2 requests", "title": "Use HTTP2" } @@ -84,7 +108,11 @@ "description": "used to provide mounts to the volume" }, "revisionHistoryLimit":{ - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "title": "RevisionHistoryLimit", "description": "revisionHistoryLimit is the maximum number of revisions that will bemaintained in the StatefulSet's revision history." }, @@ -381,7 +409,11 @@ ] }, "GracePeriod": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "time for which Kubernetes waits before terminating the pods", "title": "Grace Period" }, @@ -402,7 +434,11 @@ "title": "Command" }, "failureThreshold": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "defines the maximum number of failures that are acceptable before a given container is not considered as live", "title": "Failure Threshold" }, @@ -413,17 +449,29 @@ "title": "HTTP headers" }, "initialDelaySeconds": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "defines the time to wait before a given container is checked for liveness", "title": "Initial Delay Seconds" }, "periodSeconds": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "defines the time to check a given container for liveness", "title": "Period Seconds" }, "port": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "port to access on the container", "title": "Port" }, @@ -433,34 +481,58 @@ "title": "Scheme" }, "successThreshold": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "defines the number of successes required before a given container is said to fulfil the liveness probe", "title": "Success Threshold" }, "tcp": { - "type": "boolean", + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "If enabled, the kubelet will attempt to open a socket to container. If connection is established, the container is considered healthy", "title": "TCP" }, "timeoutSeconds": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "defines the time for checking timeout", "title": "Timeout Seconds" } } }, "MaxSurge": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "maximum number of pods that can be created over the desired number of pods", "title": "Maximum Surge" }, "MaxUnavailable": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "maximum number of pods that can be unavailable during the update process", "title": "Maximum Unavailable" }, "MinReadySeconds": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "minimum number of seconds for which a newly created Pod should be ready without any of its containers crashing, for it to be considered available", "title": "Minimum Ready Seconds" }, @@ -481,7 +553,11 @@ "title": "Command" }, "failureThreshold": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "defines the maximum number of failures that are acceptable before a given container is not considered as ready", "title": "Failure Threshold" }, @@ -492,17 +568,26 @@ "title": "HTTP headers" }, "initialDelaySeconds": { - "type": "integer", + "type": [ + "integer", + "string" ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "defines the time to wait before a given container is checked for readiness", "title": "Initial Delay Seconds" }, "periodSeconds": { - "type": "integer", + "type": [ + "integer", + "string" ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "defines the time to check a given container for readiness", "title": "Period Seconds" }, "port": { - "type": "integer", + "type": [ + "integer", + "string" ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "port to access on the container", "title": "Port" }, @@ -512,17 +597,27 @@ "title": "Scheme" }, "successThreshold": { - "type": "integer", + "type": [ + "integer", + "string" ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "defines the number of successes required before a given container is said to fulfil the readiness probe", "title": "Success Threshold" }, "tcp": { - "type": "boolean", + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "If enabled, the kubelet will attempt to open a socket to container. If connection is established, the container is considered healthy", "title": "TCP" }, "timeoutSeconds": { - "type": "integer", + "type": [ + "integer", + "string" ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "defines the time for checking timeout", "title": "Timeout Seconds" } @@ -578,12 +673,20 @@ "title": "CORS" }, "enabled": { - "type": "boolean", + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "used to specify whether to create an ambassador mapping or not", "title": "Enabled" }, "weight": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "used to specify weight for canary ambassador mappings" }, "hostname": { @@ -629,7 +732,11 @@ "title": "Arguments", "properties": { "enabled": { - "type": "boolean", + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "used for enabling/disabling aruguments", "title": "Enabled" }, @@ -657,22 +764,37 @@ "title": "Autoscaling", "properties": { "MaxReplicas": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "Maximum number of replicas allowed for scaling", "title": "Maximum Replicas" }, "MinReplicas": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "Minimum number of replicas allowed for scaling", "title": "Minimum Replicas" }, "TargetCPUUtilizationPercentage": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "The target CPU utilization that is expected for a container", "title": "TargetCPUUtilizationPercentage" }, "TargetMemoryUtilizationPercentage": { - "type": "integer", + "type": [ + "integer", + "string" ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "The target memory utilization that is expected for a container", "title": "TargetMemoryUtilizationPercentage" }, @@ -682,7 +804,11 @@ "title": "Behavior" }, "enabled": { - "type": "boolean", + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "used for enabling/disabling autoscaling", "title": "Enabled" }, @@ -710,7 +836,11 @@ "title": "Command", "properties": { "enabled": { - "type": "boolean", + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "used for enabling/disabling commands" }, "value": { @@ -743,7 +873,11 @@ "title": "Db Migration Config", "properties": { "enabled": { - "type": "boolean", + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "used for enabling/disabling the config", "title": "Enabled" } @@ -892,7 +1026,11 @@ "default": "" }, "enabled": { - "type": "boolean", + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "used to enable or disable ingress", "title": "Enabled" }, @@ -963,7 +1101,11 @@ "default": "" }, "enabled": { - "type": "boolean", + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "used to enable or disable ingress", "title": "Enabled" }, @@ -1023,7 +1165,11 @@ "default": "nginx-internal" }, "enabled": { - "type": "boolean", + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "used to enable or disable ingress", "title": "Enabled" }, @@ -1144,7 +1290,11 @@ } }, "pauseForSecondsBeforeSwitchActive": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "tell how much to wait for given period of time before switch active the container", "title": "Pause For Seconds Before SwitchActive" }, @@ -1202,7 +1352,11 @@ "title": "Raw YAML" }, "replicaCount": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "count of Replicas of pod", "title": "REplica Count" }, @@ -1364,7 +1518,10 @@ "title": "Volumes" }, "waitForSecondsBeforeScalingDown": { - "type": "integer", + "type": [ + "integer", + "string" ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "Wait for given period of time before scaling down the container", "title": "Wait For Seconds Before Scaling Down" } diff --git a/scripts/devtron-reference-helm-charts/statefulset-chart_5-0-0/values.yaml b/scripts/devtron-reference-helm-charts/statefulset-chart_5-0-0/values.yaml index 4fe896feee..4a252b23e1 100644 --- a/scripts/devtron-reference-helm-charts/statefulset-chart_5-0-0/values.yaml +++ b/scripts/devtron-reference-helm-charts/statefulset-chart_5-0-0/values.yaml @@ -140,9 +140,9 @@ server: image_tag: 1-95af053 image: "" -EnvVariablesFromFieldPath: -- name: POD_NAME - fieldPath: metadata.name +EnvVariablesFromFieldPath: [] +# - name: POD_NAME +# fieldPath: metadata.name EnvVariables: - name: FLASK_ENV diff --git a/scripts/devtron-reference-helm-charts/workflow-chart_1-0-0/schema.json b/scripts/devtron-reference-helm-charts/workflow-chart_1-0-0/schema.json index 2e31a72972..6d970c183b 100644 --- a/scripts/devtron-reference-helm-charts/workflow-chart_1-0-0/schema.json +++ b/scripts/devtron-reference-helm-charts/workflow-chart_1-0-0/schema.json @@ -11,7 +11,11 @@ "type": "object", "properties": { "envoyPort": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "envoy port for the container", "title": "Envoy Port" }, @@ -26,22 +30,38 @@ "title": "Name" }, "port": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "Port", "title": "port for the container" }, "servicePort": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "port of the corresponding kubernetes service", "title": "Service Port" }, "supportStreaming": { - "type": "boolean", + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "field to enable/disable timeout for high performance protocols like grpc", "title": "Support Streaming" }, "useHTTP2": { - "type": "boolean", + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": " field for setting if envoy container can accept(or not) HTTP2 requests", "title": "Use HTTP2" } @@ -55,7 +75,11 @@ "title": "Environment Variables" }, "GracePeriod": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "time for which Kubernetes waits before terminating the pods", "title": "Grace Period" }, @@ -76,7 +100,11 @@ "title": "Command" }, "failureThreshold": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "defines the maximum number of failures that are acceptable before a given container is not considered as live", "title": "Failure Threshold" }, @@ -87,17 +115,29 @@ "title": "HTTP headers" }, "initialDelaySeconds": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "defines the time to wait before a given container is checked for liveness", "title": "Initial Delay Seconds" }, "periodSeconds": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "defines the time to check a given container for liveness", "title": "Period Seconds" }, "port": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "port to access on the container", "title": "Port" }, @@ -107,34 +147,58 @@ "title": "Scheme" }, "successThreshold": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "defines the number of successes required before a given container is said to fulfil the liveness probe", "title": "Success Threshold" }, "tcp": { - "type": "boolean", + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "If enabled, the kubelet will attempt to open a socket to container. If connection is established, the container is considered healthy", "title": "TCP" }, "timeoutSeconds": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "defines the time for checking timeout", "title": "Timeout Seconds" } } }, "MaxSurge": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "maximum number of pods that can be created over the desired number of pods", "title": "Maximum Surge" }, "MaxUnavailable": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "maximum number of pods that can be unavailable during the update process", "title": "Maximum Unavailable" }, "MinReadySeconds": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "minimum number of seconds for which a newly created Pod should be ready without any of its containers crashing, for it to be considered available", "title": "Minimum Ready Seconds" }, @@ -155,7 +219,11 @@ "title": "Command" }, "failureThreshold": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "defines the maximum number of failures that are acceptable before a given container is not considered as ready", "title": "Failure Threshold" }, @@ -166,17 +234,29 @@ "title": "HTTP headers" }, "initialDelaySeconds": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "defines the time to wait before a given container is checked for readiness", "title": "Initial Delay Seconds" }, "periodSeconds": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "defines the time to check a given container for readiness", "title": "Period Seconds" }, "port": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "port to access on the container", "title": "Port" }, @@ -186,17 +266,29 @@ "title": "Scheme" }, "successThreshold": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "defines the number of successes required before a given container is said to fulfil the readiness probe", "title": "Success Threshold" }, "tcp": { - "type": "boolean", + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "If enabled, the kubelet will attempt to open a socket to container. If connection is established, the container is considered healthy", "title": "TCP" }, "timeoutSeconds": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "defines the time for checking timeout", "title": "Timeout Seconds" } @@ -242,7 +334,11 @@ "title": "Arguments", "properties": { "enabled": { - "type": "boolean", + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "used for enabling/disabling aruguments", "title": "Enabled" }, @@ -270,22 +366,38 @@ "title": "Autoscaling", "properties": { "MaxReplicas": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "Maximum number of replicas allowed for scaling", "title": "Maximum Replicas" }, "MinReplicas": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "Minimum number of replicas allowed for scaling", "title": "Minimum Replicas" }, "TargetCPUUtilizationPercentage": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "The target CPU utilization that is expected for a container", "title": "TargetCPUUtilizationPercentage" }, "TargetMemoryUtilizationPercentage": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "The target memory utilization that is expected for a container", "title": "TargetMemoryUtilizationPercentage" }, @@ -295,7 +407,11 @@ "title": "Behavior" }, "enabled": { - "type": "boolean", + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "used for enabling/disabling autoscaling", "title": "Enabled" }, @@ -313,7 +429,11 @@ "title": "Command", "properties": { "enabled": { - "type": "boolean", + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "used for enabling/disabling commands" }, "value": { @@ -340,7 +460,11 @@ "title": "Db Migration Config", "properties": { "enabled": { - "type": "boolean", + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "used for enabling/disabling the config", "title": "Enabled" } @@ -443,7 +567,11 @@ "default": "nginx" }, "enabled": { - "type": "boolean", + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "used to enable or disable ingress", "title": "Enabled" }, @@ -503,7 +631,11 @@ "default": "nginx-internal" }, "enabled": { - "type": "boolean", + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "used to enable or disable ingress", "title": "Enabled" }, @@ -597,7 +729,11 @@ } }, "pauseForSecondsBeforeSwitchActive": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "tell how much to wait for given period of time before switch active the container", "title": "Pause For Seconds Before SwitchActive" }, @@ -634,7 +770,11 @@ "title": "Raw YAML" }, "replicaCount": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "count of Replicas of pod", "title": "REplica Count" }, @@ -776,7 +916,11 @@ "title": "Volumes" }, "waitForSecondsBeforeScalingDown": { - "type": "integer", + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", "description": "Wait for given period of time before scaling down the container", "title": "Wait For Seconds Before Scaling Down" } diff --git a/scripts/sql/181_remove_index_image_scan_deploy_info.down.sql b/scripts/sql/181_remove_index_image_scan_deploy_info.down.sql new file mode 100644 index 0000000000..2c9791ab66 --- /dev/null +++ b/scripts/sql/181_remove_index_image_scan_deploy_info.down.sql @@ -0,0 +1,2 @@ +DROP index image_scan_deploy_info_unique; +CREATE UNIQUE INDEX image_scan_deploy_info_unique ON public.image_scan_deploy_info USING btree (scan_object_meta_id, object_type); diff --git a/scripts/sql/181_remove_index_image_scan_deploy_info.up.sql b/scripts/sql/181_remove_index_image_scan_deploy_info.up.sql new file mode 100644 index 0000000000..d57d541bc3 --- /dev/null +++ b/scripts/sql/181_remove_index_image_scan_deploy_info.up.sql @@ -0,0 +1,2 @@ +DROP index image_scan_deploy_info_unique; +CREATE INDEX image_scan_deploy_info_unique ON public.image_scan_deploy_info USING btree (scan_object_meta_id, object_type); diff --git a/scripts/sql/177_custom_image_tag.down.sql b/scripts/sql/182_custom_image_tag.down.sql similarity index 100% rename from scripts/sql/177_custom_image_tag.down.sql rename to scripts/sql/182_custom_image_tag.down.sql diff --git a/scripts/sql/177_custom_image_tag.up.sql b/scripts/sql/182_custom_image_tag.up.sql similarity index 100% rename from scripts/sql/177_custom_image_tag.up.sql rename to scripts/sql/182_custom_image_tag.up.sql diff --git a/util/argo/ArgoUserService.go b/util/argo/ArgoUserService.go index 3e7e24b65e..7536b9132d 100644 --- a/util/argo/ArgoUserService.go +++ b/util/argo/ArgoUserService.go @@ -7,6 +7,7 @@ import ( "github.com/devtron-labs/authenticator/client" "github.com/devtron-labs/common-lib/utils/k8s" "github.com/devtron-labs/devtron/client/argocdServer" + "github.com/devtron-labs/devtron/client/argocdServer/connection" "github.com/devtron-labs/devtron/client/argocdServer/session" "github.com/devtron-labs/devtron/internal/sql/repository" "github.com/devtron-labs/devtron/pkg/cluster" @@ -48,12 +49,12 @@ type ArgoUserServiceImpl struct { devtronSecretConfig *util2.DevtronSecretConfig runTimeConfig *client.RuntimeConfig gitOpsRepository repository.GitOpsConfigRepository - argoCDConnectionManager argocdServer.ArgoCDConnectionManager + argoCDConnectionManager connection.ArgoCDConnectionManager versionService argocdServer.VersionService k8sUtil *k8s.K8sUtil } -func NewArgoUserServiceImpl(Logger *zap.SugaredLogger, clusterService cluster.ClusterService, devtronSecretConfig *util2.DevtronSecretConfig, runTimeConfig *client.RuntimeConfig, gitOpsRepository repository.GitOpsConfigRepository, argoCDConnectionManager argocdServer.ArgoCDConnectionManager, versionService argocdServer.VersionService, k8sUtil *k8s.K8sUtil) (*ArgoUserServiceImpl, error) { +func NewArgoUserServiceImpl(Logger *zap.SugaredLogger, clusterService cluster.ClusterService, devtronSecretConfig *util2.DevtronSecretConfig, runTimeConfig *client.RuntimeConfig, gitOpsRepository repository.GitOpsConfigRepository, argoCDConnectionManager connection.ArgoCDConnectionManager, versionService argocdServer.VersionService, k8sUtil *k8s.K8sUtil) (*ArgoUserServiceImpl, error) { argoUserServiceImpl := &ArgoUserServiceImpl{ logger: Logger, clusterService: clusterService, diff --git a/util/context-utils.go b/util/context-utils.go new file mode 100644 index 0000000000..c28b588f15 --- /dev/null +++ b/util/context-utils.go @@ -0,0 +1,23 @@ +package util + +import ( + "context" + "fmt" + "reflect" +) + +const IsSuperAdminFlag = "isSuperAdmin" + +func SetSuperAdminInContext(ctx context.Context, isSuperAdmin bool) context.Context { + ctx = context.WithValue(ctx, IsSuperAdminFlag, isSuperAdmin) + return ctx +} + +func GetIsSuperAdminFromContext(ctx context.Context) (bool, error) { + flag := ctx.Value(IsSuperAdminFlag) + + if flag != nil && reflect.TypeOf(flag).Kind() == reflect.Bool { + return flag.(bool), nil + } + return false, fmt.Errorf("context not valid, isSuperAdmin flag not set correctly %v", flag) +} diff --git a/util/rbac/EnforcerUtil.go b/util/rbac/EnforcerUtil.go index b1183d2862..693ad2eb28 100644 --- a/util/rbac/EnforcerUtil.go +++ b/util/rbac/EnforcerUtil.go @@ -30,7 +30,6 @@ import ( "github.com/devtron-labs/devtron/pkg/user/casbin" "github.com/go-pg/pg" "go.uber.org/zap" - "strings" ) type EnforcerUtil interface { @@ -115,7 +114,7 @@ func (impl EnforcerUtilImpl) GetRbacObjectsByEnvIdsAndAppId(envIds []int, appId for _, env := range envs { if _, ok := objects[env.Id]; !ok { - objects[env.Id] = fmt.Sprintf("%s/%s", strings.ToLower(env.EnvironmentIdentifier), strings.ToLower(appName)) + objects[env.Id] = fmt.Sprintf("%s/%s", env.EnvironmentIdentifier, appName) envObjectToName[objects[env.Id]] = env.Name } } @@ -131,7 +130,7 @@ func (impl EnforcerUtilImpl) GetRbacObjectsByAppIds(appIds []int) map[int]string } for _, item := range result { if _, ok := objects[item.Id]; !ok { - objects[item.Id] = fmt.Sprintf("%s/%s", strings.ToLower(item.Team.Name), strings.ToLower(item.AppName)) + objects[item.Id] = fmt.Sprintf("%s/%s", item.Team.Name, item.AppName) } } return objects @@ -140,17 +139,17 @@ func (impl EnforcerUtilImpl) GetRbacObjectsByAppIds(appIds []int) map[int]string func (impl EnforcerUtilImpl) GetAppRBACName(appName string) string { application, err := impl.appRepo.FindAppAndProjectByAppName(appName) if err != nil { - return fmt.Sprintf("%s/%s", "", strings.ToLower(appName)) + return fmt.Sprintf("%s/%s", "", appName) } - return fmt.Sprintf("%s/%s", strings.ToLower(application.Team.Name), strings.ToLower(appName)) + return fmt.Sprintf("%s/%s", application.Team.Name, appName) } func (impl EnforcerUtilImpl) GetProjectAdminRBACNameBYAppName(appName string) string { application, err := impl.appRepo.FindAppAndProjectByAppName(appName) if err != nil { - return fmt.Sprintf("%s/%s", "", strings.ToLower(appName)) + return fmt.Sprintf("%s/%s", "", appName) } - return fmt.Sprintf("%s/%s", strings.ToLower(application.Team.Name), "*") + return fmt.Sprintf("%s/%s", application.Team.Name, "*") } func (impl EnforcerUtilImpl) GetRbacObjectsForAllApps() map[int]string { @@ -161,7 +160,7 @@ func (impl EnforcerUtilImpl) GetRbacObjectsForAllApps() map[int]string { } for _, item := range result { if _, ok := objects[item.Id]; !ok { - objects[item.Id] = fmt.Sprintf("%s/%s", strings.ToLower(item.Team.Name), strings.ToLower(item.AppName)) + objects[item.Id] = fmt.Sprintf("%s/%s", item.Team.Name, item.AppName) } } return objects @@ -175,7 +174,7 @@ func (impl EnforcerUtilImpl) GetRbacObjectsForAllAppsWithTeamID(teamID int) map[ } for _, item := range result { if _, ok := objects[item.Id]; !ok { - objects[item.Id] = fmt.Sprintf("%s/%s", strings.ToLower(item.Team.Name), strings.ToLower(item.AppName)) + objects[item.Id] = fmt.Sprintf("%s/%s", item.Team.Name, item.AppName) } } return objects @@ -186,15 +185,15 @@ func (impl EnforcerUtilImpl) GetAppRBACNameByAppId(appId int) string { if err != nil { return fmt.Sprintf("%s/%s", "", "") } - return fmt.Sprintf("%s/%s", strings.ToLower(application.Team.Name), strings.ToLower(application.AppName)) + return fmt.Sprintf("%s/%s", application.Team.Name, application.AppName) } func (impl EnforcerUtilImpl) GetAppRBACByAppNameAndEnvId(appName string, envId int) string { env, err := impl.environmentRepository.FindById(envId) if err != nil { - return fmt.Sprintf("%s/%s", "", strings.ToLower(appName)) + return fmt.Sprintf("%s/%s", "", appName) } - return fmt.Sprintf("%s/%s", strings.ToLower(env.EnvironmentIdentifier), strings.ToLower(appName)) + return fmt.Sprintf("%s/%s", env.EnvironmentIdentifier, appName) } func (impl EnforcerUtilImpl) GetAppRBACByAppIdAndPipelineId(appId int, pipelineId int) string { @@ -204,13 +203,13 @@ func (impl EnforcerUtilImpl) GetAppRBACByAppIdAndPipelineId(appId int, pipelineI } pipeline, err := impl.pipelineRepository.FindById(pipelineId) if err != nil { - return fmt.Sprintf("%s/%s", "", strings.ToLower(application.AppName)) + return fmt.Sprintf("%s/%s", "", application.AppName) } env, err := impl.environmentRepository.FindById(pipeline.EnvironmentId) if err != nil { - return fmt.Sprintf("%s/%s", "", strings.ToLower(application.AppName)) + return fmt.Sprintf("%s/%s", "", application.AppName) } - return fmt.Sprintf("%s/%s", strings.ToLower(env.EnvironmentIdentifier), strings.ToLower(application.AppName)) + return fmt.Sprintf("%s/%s", env.EnvironmentIdentifier, application.AppName) } func (impl EnforcerUtilImpl) GetEnvRBACNameByAppId(appId int, envId int) string { @@ -221,9 +220,9 @@ func (impl EnforcerUtilImpl) GetEnvRBACNameByAppId(appId int, envId int) string var appName = application.AppName env, err := impl.environmentRepository.FindById(envId) if err != nil { - return fmt.Sprintf("%s/%s", "", strings.ToLower(appName)) + return fmt.Sprintf("%s/%s", "", appName) } - return fmt.Sprintf("%s/%s", strings.ToLower(env.EnvironmentIdentifier), strings.ToLower(appName)) + return fmt.Sprintf("%s/%s", env.EnvironmentIdentifier, appName) } func (impl EnforcerUtilImpl) GetTeamEnvRBACNameByAppId(appId int, envId int) string { @@ -235,9 +234,9 @@ func (impl EnforcerUtilImpl) GetTeamEnvRBACNameByAppId(appId int, envId int) str var teamName = application.Team.Name env, err := impl.environmentRepository.FindById(envId) if err != nil { - return fmt.Sprintf("%s/%s/%s", strings.ToLower(teamName), "", strings.ToLower(appName)) + return fmt.Sprintf("%s/%s/%s", teamName, "", appName) } - return fmt.Sprintf("%s/%s/%s", strings.ToLower(teamName), strings.ToLower(env.EnvironmentIdentifier), strings.ToLower(appName)) + return fmt.Sprintf("%s/%s/%s", teamName, env.EnvironmentIdentifier, appName) } func (impl EnforcerUtilImpl) GetTeamRBACByCiPipelineId(pipelineId int) string { @@ -276,9 +275,9 @@ func (impl EnforcerUtilImpl) GetEnvRBACNameByCiPipelineIdAndEnvId(ciPipelineId i appName := application.AppName env, err := impl.environmentRepository.FindById(envId) if err != nil { - return fmt.Sprintf("%s/%s", "", strings.ToLower(appName)) + return fmt.Sprintf("%s/%s", "", appName) } - return fmt.Sprintf("%s/%s", strings.ToLower(env.EnvironmentIdentifier), strings.ToLower(appName)) + return fmt.Sprintf("%s/%s", env.EnvironmentIdentifier, appName) } func (impl EnforcerUtilImpl) GetEnvRBACNameByCdPipelineIdAndEnvId(cdPipelineId int) string { @@ -287,7 +286,7 @@ func (impl EnforcerUtilImpl) GetEnvRBACNameByCdPipelineIdAndEnvId(cdPipelineId i impl.logger.Error(err) return fmt.Sprintf("%s/%s", "", "") } - return fmt.Sprintf("%s/%s", strings.ToLower(pipeline.Environment.EnvironmentIdentifier), strings.ToLower(pipeline.App.AppName)) + return fmt.Sprintf("%s/%s", pipeline.Environment.EnvironmentIdentifier, pipeline.App.AppName) } func (impl EnforcerUtilImpl) GetTeamRbacObjectByCiPipelineId(ciPipelineId int) string { @@ -299,7 +298,7 @@ func (impl EnforcerUtilImpl) GetTeamRbacObjectByCiPipelineId(ciPipelineId int) s if err != nil { return fmt.Sprintf("%s/%s", "", "") } - return fmt.Sprintf("%s/%s", strings.ToLower(application.Team.Name), strings.ToLower(ciPipeline.App.AppName)) + return fmt.Sprintf("%s/%s", application.Team.Name, ciPipeline.App.AppName) } func (impl EnforcerUtilImpl) GetTeamAndEnvironmentRbacObjectByCDPipelineId(pipelineId int) (string, string) { @@ -313,8 +312,8 @@ func (impl EnforcerUtilImpl) GetTeamAndEnvironmentRbacObjectByCDPipelineId(pipel impl.logger.Errorw("error on fetching data for rbac object", "err", err) return "", "" } - teamRbac := fmt.Sprintf("%s/%s", strings.ToLower(application.Team.Name), strings.ToLower(pipeline.App.AppName)) - envRbac := fmt.Sprintf("%s/%s", strings.ToLower(pipeline.Environment.EnvironmentIdentifier), strings.ToLower(pipeline.App.AppName)) + teamRbac := fmt.Sprintf("%s/%s", application.Team.Name, pipeline.App.AppName) + envRbac := fmt.Sprintf("%s/%s", pipeline.Environment.EnvironmentIdentifier, pipeline.App.AppName) return teamRbac, envRbac } @@ -328,7 +327,7 @@ func (impl EnforcerUtilImpl) GetRbacObjectsForAllAppsAndEnvironments() (map[int] } for _, item := range apps { if _, ok := appObjects[item.Id]; !ok { - appObjects[item.Id] = fmt.Sprintf("%s/%s", strings.ToLower(item.Team.Name), strings.ToLower(item.AppName)) + appObjects[item.Id] = fmt.Sprintf("%s/%s", item.Team.Name, item.AppName) } } @@ -341,7 +340,7 @@ func (impl EnforcerUtilImpl) GetRbacObjectsForAllAppsAndEnvironments() (map[int] for _, app := range apps { key := fmt.Sprintf("%d-%d", env.Id, app.Id) if _, ok := envObjects[key]; !ok { - envObjects[key] = fmt.Sprintf("%s/%s", strings.ToLower(env.EnvironmentIdentifier), strings.ToLower(app.AppName)) + envObjects[key] = fmt.Sprintf("%s/%s", env.EnvironmentIdentifier, app.AppName) } } } @@ -381,11 +380,11 @@ func (impl EnforcerUtilImpl) GetHelmObject(appId int, envId int) (string, string }*/ if environmentIdentifier2 == "" { - return fmt.Sprintf("%s/%s/%s", strings.ToLower(application.Team.Name), environmentIdentifier, strings.ToLower(application.AppName)), "" + return fmt.Sprintf("%s/%s/%s", application.Team.Name, environmentIdentifier, application.AppName), "" } - return fmt.Sprintf("%s/%s/%s", strings.ToLower(application.Team.Name), environmentIdentifier, strings.ToLower(application.AppName)), - fmt.Sprintf("%s/%s/%s", strings.ToLower(application.Team.Name), environmentIdentifier2, strings.ToLower(application.AppName)) + return fmt.Sprintf("%s/%s/%s", application.Team.Name, environmentIdentifier, application.AppName), + fmt.Sprintf("%s/%s/%s", application.Team.Name, environmentIdentifier2, application.AppName) } func (impl EnforcerUtilImpl) GetHelmObjectByAppNameAndEnvId(appName string, envId int) (string, string) { @@ -412,7 +411,7 @@ func (impl EnforcerUtilImpl) GetHelmObjectByAppNameAndEnvId(appName string, envI } } if environmentIdentifier2 == "" { - return fmt.Sprintf("%s/%s/%s", strings.ToLower(application.Team.Name), environmentIdentifier, strings.ToLower(application.AppName)), "" + return fmt.Sprintf("%s/%s/%s", application.Team.Name, environmentIdentifier, application.AppName), "" } //TODO - FIX required for futuristic permission for cluster__* all environment for migrated environment identifier only @@ -420,8 +419,8 @@ func (impl EnforcerUtilImpl) GetHelmObjectByAppNameAndEnvId(appName string, envI if !strings.HasPrefix(env.EnvironmentIdentifier, fmt.Sprintf("%s__", env.Cluster.ClusterName)) { environmentIdentifier = fmt.Sprintf("%s__%s", env.Cluster.ClusterName, env.EnvironmentIdentifier) }*/ - return fmt.Sprintf("%s/%s/%s", strings.ToLower(application.Team.Name), environmentIdentifier, strings.ToLower(application.AppName)), - fmt.Sprintf("%s/%s/%s", strings.ToLower(application.Team.Name), environmentIdentifier2, strings.ToLower(application.AppName)) + return fmt.Sprintf("%s/%s/%s", application.Team.Name, environmentIdentifier, application.AppName), + fmt.Sprintf("%s/%s/%s", application.Team.Name, environmentIdentifier2, application.AppName) } func (impl EnforcerUtilImpl) GetHelmObjectByProjectIdAndEnvId(teamId int, envId int) (string, string) { @@ -452,7 +451,7 @@ func (impl EnforcerUtilImpl) GetHelmObjectByProjectIdAndEnvId(teamId int, envId } if environmentIdentifier2 == "" { - return fmt.Sprintf("%s/%s/%s", strings.ToLower(team.Name), environmentIdentifier, "*"), "" + return fmt.Sprintf("%s/%s/%s", team.Name, environmentIdentifier, "*"), "" } //TODO - FIX required for futuristic permission for cluster__* all environment for migrated environment identifier only @@ -460,8 +459,8 @@ func (impl EnforcerUtilImpl) GetHelmObjectByProjectIdAndEnvId(teamId int, envId if !strings.HasPrefix(env.EnvironmentIdentifier, fmt.Sprintf("%s__", env.Cluster.ClusterName)) { environmentIdentifier = fmt.Sprintf("%s__%s", env.Cluster.ClusterName, env.EnvironmentIdentifier) }*/ - return fmt.Sprintf("%s/%s/%s", strings.ToLower(team.Name), environmentIdentifier, "*"), - fmt.Sprintf("%s/%s/%s", strings.ToLower(team.Name), environmentIdentifier2, "*") + return fmt.Sprintf("%s/%s/%s", team.Name, environmentIdentifier, "*"), + fmt.Sprintf("%s/%s/%s", team.Name, environmentIdentifier2, "*") } func (impl EnforcerUtilImpl) GetAppRBACNameByTeamIdAndAppId(teamId int, appId int) string { @@ -475,7 +474,7 @@ func (impl EnforcerUtilImpl) GetAppRBACNameByTeamIdAndAppId(teamId int, appId in impl.logger.Errorw("error on fetching data for rbac object", "err", err) return fmt.Sprintf("%s/%s", "", "") } - return fmt.Sprintf("%s/%s", strings.ToLower(team.Name), strings.ToLower(application.AppName)) + return fmt.Sprintf("%s/%s", team.Name, application.AppName) } func (impl EnforcerUtilImpl) GetRBACNameForClusterEntity(clusterName string, resourceIdentifier k8s.ResourceIdentifier) (resourceName, objectName string) { @@ -504,7 +503,7 @@ func (impl EnforcerUtilImpl) GetAppObjectByCiPipelineIds(ciPipelineIds []int) ma } for _, pipeline := range models { if _, ok := objects[pipeline.Id]; !ok { - appObject := fmt.Sprintf("%s/%s", strings.ToLower(pipeline.App.Team.Name), strings.ToLower(pipeline.App.AppName)) + appObject := fmt.Sprintf("%s/%s", pipeline.App.Team.Name, pipeline.App.AppName) objects[pipeline.Id] = appObject } } @@ -520,8 +519,8 @@ func (impl EnforcerUtilImpl) GetAppAndEnvObjectByPipelineIds(cdPipelineIds []int } for _, pipeline := range models { if _, ok := objects[pipeline.Id]; !ok { - appObject := fmt.Sprintf("%s/%s", strings.ToLower(pipeline.App.Team.Name), strings.ToLower(pipeline.App.AppName)) - envObject := fmt.Sprintf("%s/%s", strings.ToLower(pipeline.Environment.EnvironmentIdentifier), strings.ToLower(pipeline.App.AppName)) + appObject := fmt.Sprintf("%s/%s", pipeline.App.Team.Name, pipeline.App.AppName) + envObject := fmt.Sprintf("%s/%s", pipeline.Environment.EnvironmentIdentifier, pipeline.App.AppName) objects[pipeline.Id] = []string{appObject, envObject} } } @@ -536,7 +535,7 @@ func (impl EnforcerUtilImpl) GetRbacObjectsForAllAppsWithMatchingAppName(appName } for _, item := range result { if _, ok := objects[item.Id]; !ok { - objects[item.Id] = fmt.Sprintf("%s/%s", strings.ToLower(item.Team.Name), strings.ToLower(item.AppName)) + objects[item.Id] = fmt.Sprintf("%s/%s", item.Team.Name, item.AppName) } } return objects @@ -559,8 +558,8 @@ func (impl EnforcerUtilImpl) GetAppAndEnvObjectByPipeline(cdPipelines []*bean.CD } for _, pipeline := range cdPipelines { if _, ok := objects[pipeline.Id]; !ok { - appObject := fmt.Sprintf("%s/%s", strings.ToLower(teamMap[pipeline.TeamId]), strings.ToLower(pipeline.AppName)) - envObject := fmt.Sprintf("%s/%s", strings.ToLower(pipeline.EnvironmentIdentifier), strings.ToLower(pipeline.AppName)) + appObject := fmt.Sprintf("%s/%s", teamMap[pipeline.TeamId], pipeline.AppName) + envObject := fmt.Sprintf("%s/%s", pipeline.EnvironmentIdentifier, pipeline.AppName) objects[pipeline.Id] = []string{appObject, envObject} } } @@ -586,8 +585,8 @@ func (impl EnforcerUtilImpl) GetAppAndEnvObjectByDbPipeline(cdPipelines []*pipel } for _, pipeline := range cdPipelines { if _, ok := objects[pipeline.Id]; !ok { - appObject := fmt.Sprintf("%s/%s", strings.ToLower(teamMap[pipeline.App.TeamId]), strings.ToLower(pipeline.App.AppName)) - envObject := fmt.Sprintf("%s/%s", strings.ToLower(pipeline.Environment.EnvironmentIdentifier), strings.ToLower(pipeline.App.AppName)) + appObject := fmt.Sprintf("%s/%s", teamMap[pipeline.App.TeamId], pipeline.App.AppName) + envObject := fmt.Sprintf("%s/%s", pipeline.Environment.EnvironmentIdentifier, pipeline.App.AppName) objects[pipeline.Id] = []string{appObject, envObject} } } @@ -601,11 +600,11 @@ func (impl EnforcerUtilImpl) GetAllActiveTeamNames() ([]string, error) { return nil, err } for i, teamName := range teamNames { - teamNames[i] = strings.ToLower(teamName) + teamNames[i] = teamName } return teamNames, nil } func (impl EnforcerUtilImpl) GetAppRBACNameByAppAndProjectName(projectName, appName string) string { - return fmt.Sprintf("%s/%s", strings.ToLower(projectName), strings.ToLower(appName)) + return fmt.Sprintf("%s/%s", projectName, appName) } diff --git a/util/rbac/EnforcerUtilHelm.go b/util/rbac/EnforcerUtilHelm.go index ea7bdd8565..099c14a9e1 100644 --- a/util/rbac/EnforcerUtilHelm.go +++ b/util/rbac/EnforcerUtilHelm.go @@ -8,7 +8,6 @@ import ( "github.com/devtron-labs/devtron/pkg/team" "github.com/go-pg/pg" "go.uber.org/zap" - "strings" ) type EnforcerUtilHelm interface { @@ -48,7 +47,7 @@ func (impl EnforcerUtilHelmImpl) GetHelmObjectByClusterId(clusterId int, namespa if err != nil { return fmt.Sprintf("%s/%s/%s", "", "", "") } - return fmt.Sprintf("%s/%s__%s/%s", team.UNASSIGNED_PROJECT, cluster.ClusterName, namespace, strings.ToLower(appName)) + return fmt.Sprintf("%s/%s__%s/%s", team.UNASSIGNED_PROJECT, cluster.ClusterName, namespace, appName) } func (impl EnforcerUtilHelmImpl) GetHelmObjectByTeamIdAndClusterId(teamId int, clusterId int, namespace string, appName string) string { @@ -60,7 +59,7 @@ func (impl EnforcerUtilHelmImpl) GetHelmObjectByTeamIdAndClusterId(teamId int, c if err != nil { return fmt.Sprintf("%s/%s/%s", "", "", "") } - return fmt.Sprintf("%s/%s__%s/%s", teamObj.Name, cluster.ClusterName, namespace, strings.ToLower(appName)) + return fmt.Sprintf("%s/%s__%s/%s", teamObj.Name, cluster.ClusterName, namespace, appName) } func (impl EnforcerUtilHelmImpl) GetHelmObjectByClusterIdNamespaceAndAppName(clusterId int, namespace string, appName string) (string, string) { @@ -89,27 +88,27 @@ func (impl EnforcerUtilHelmImpl) GetHelmObjectByClusterIdNamespaceAndAppName(clu if app.TeamId == 0 { // case if project is not assigned to cli app - return fmt.Sprintf("%s/%s__%s/%s", team.UNASSIGNED_PROJECT, cluster.ClusterName, namespace, strings.ToLower(appName)), "" + return fmt.Sprintf("%s/%s__%s/%s", team.UNASSIGNED_PROJECT, cluster.ClusterName, namespace, appName), "" } else { // case if project is assigned - return fmt.Sprintf("%s/%s__%s/%s", app.Team.Name, cluster.ClusterName, namespace, strings.ToLower(appName)), "" + return fmt.Sprintf("%s/%s__%s/%s", app.Team.Name, cluster.ClusterName, namespace, appName), "" } } if installedApp.App.TeamId == 0 { // for EA apps which have no project assigned to them - return fmt.Sprintf("%s/%s__%s/%s", team.UNASSIGNED_PROJECT, cluster.ClusterName, namespace, strings.ToLower(appName)), - fmt.Sprintf("%s/%s/%s", team.UNASSIGNED_PROJECT, installedApp.Environment.EnvironmentIdentifier, strings.ToLower(appName)) + return fmt.Sprintf("%s/%s__%s/%s", team.UNASSIGNED_PROJECT, cluster.ClusterName, namespace, appName), + fmt.Sprintf("%s/%s/%s", team.UNASSIGNED_PROJECT, installedApp.Environment.EnvironmentIdentifier, appName) } else { if installedApp.EnvironmentId == 0 { // for apps in EA mode, initally env can be 0. - return fmt.Sprintf("%s/%s__%s/%s", installedApp.App.Team.Name, cluster.ClusterName, namespace, strings.ToLower(appName)), "" + return fmt.Sprintf("%s/%s__%s/%s", installedApp.App.Team.Name, cluster.ClusterName, namespace, appName), "" } // for apps which are assigned to a project and have env ID - rbacOne := fmt.Sprintf("%s/%s/%s", installedApp.App.Team.Name, installedApp.Environment.EnvironmentIdentifier, strings.ToLower(appName)) - rbacTwo := fmt.Sprintf("%s/%s__%s/%s", installedApp.App.Team.Name, cluster.ClusterName, namespace, strings.ToLower(appName)) + rbacOne := fmt.Sprintf("%s/%s/%s", installedApp.App.Team.Name, installedApp.Environment.EnvironmentIdentifier, appName) + rbacTwo := fmt.Sprintf("%s/%s__%s/%s", installedApp.App.Team.Name, cluster.ClusterName, namespace, appName) if installedApp.Environment.IsVirtualEnvironment { return rbacOne, "" } @@ -125,7 +124,7 @@ func (impl EnforcerUtilHelmImpl) GetAppRBACNameByInstalledAppId(installedAppVers impl.logger.Errorw("error in fetching installed app version data", "err", err) return fmt.Sprintf("%s/%s/%s", "", "", ""), fmt.Sprintf("%s/%s/%s", "", "", "") } - rbacOne := fmt.Sprintf("%s/%s/%s", InstalledApp.App.Team.Name, InstalledApp.Environment.EnvironmentIdentifier, strings.ToLower(InstalledApp.App.AppName)) + rbacOne := fmt.Sprintf("%s/%s/%s", InstalledApp.App.Team.Name, InstalledApp.Environment.EnvironmentIdentifier, InstalledApp.App.AppName) if InstalledApp.Environment.IsVirtualEnvironment { return rbacOne, "" @@ -134,11 +133,10 @@ func (impl EnforcerUtilHelmImpl) GetAppRBACNameByInstalledAppId(installedAppVers var rbacTwo string if !InstalledApp.Environment.IsVirtualEnvironment { if InstalledApp.Environment.EnvironmentIdentifier != InstalledApp.Environment.Cluster.ClusterName+"__"+InstalledApp.Environment.Namespace { - rbacTwo = fmt.Sprintf("%s/%s/%s", InstalledApp.App.Team.Name, InstalledApp.Environment.Cluster.ClusterName+"__"+InstalledApp.Environment.Namespace, strings.ToLower(InstalledApp.App.AppName)) + rbacTwo = fmt.Sprintf("%s/%s/%s", InstalledApp.App.Team.Name, InstalledApp.Environment.Cluster.ClusterName+"__"+InstalledApp.Environment.Namespace, InstalledApp.App.AppName) return rbacOne, rbacTwo } } return rbacOne, "" - } diff --git a/wire_gen.go b/wire_gen.go index ab75504855..a095cb7752 100644 --- a/wire_gen.go +++ b/wire_gen.go @@ -44,7 +44,8 @@ import ( "github.com/devtron-labs/devtron/client/argocdServer" "github.com/devtron-labs/devtron/client/argocdServer/application" "github.com/devtron-labs/devtron/client/argocdServer/cluster" - repository8 "github.com/devtron-labs/devtron/client/argocdServer/repository" + "github.com/devtron-labs/devtron/client/argocdServer/connection" + repository9 "github.com/devtron-labs/devtron/client/argocdServer/repository" "github.com/devtron-labs/devtron/client/cron" "github.com/devtron-labs/devtron/client/dashboard" "github.com/devtron-labs/devtron/client/events" @@ -67,7 +68,6 @@ import ( "github.com/devtron-labs/devtron/internal/sql/repository/security" "github.com/devtron-labs/devtron/internal/util" "github.com/devtron-labs/devtron/internal/util/ArgoUtil" - "github.com/devtron-labs/devtron/pkg" "github.com/devtron-labs/devtron/pkg/apiToken" app2 "github.com/devtron-labs/devtron/pkg/app" "github.com/devtron-labs/devtron/pkg/app/status" @@ -100,7 +100,7 @@ import ( delete2 "github.com/devtron-labs/devtron/pkg/delete" "github.com/devtron-labs/devtron/pkg/deploymentGroup" "github.com/devtron-labs/devtron/pkg/devtronResource" - repository9 "github.com/devtron-labs/devtron/pkg/devtronResource/repository" + repository8 "github.com/devtron-labs/devtron/pkg/devtronResource/repository" "github.com/devtron-labs/devtron/pkg/dockerRegistry" "github.com/devtron-labs/devtron/pkg/externalLink" "github.com/devtron-labs/devtron/pkg/generateManifest" @@ -207,15 +207,15 @@ func InitializeApp() (*App, error) { return nil, err } k8sUtil := k8s.NewK8sUtil(sugaredLogger, runtimeConfig) - argocdServerConfig, err := argocdServer.GetConfig() + connectionConfig, err := connection.GetConfig() if err != nil { return nil, err } - settingsManager, err := argocdServer.SettingsManager(argocdServerConfig) + settingsManager, err := connection.SettingsManager(connectionConfig) if err != nil { return nil, err } - argoCDConnectionManagerImpl, err := argocdServer.NewArgoCDConnectionManagerImpl(sugaredLogger, settingsManager, moduleRepositoryImpl) + argoCDConnectionManagerImpl, err := connection.NewArgoCDConnectionManagerImpl(sugaredLogger, settingsManager, moduleRepositoryImpl) if err != nil { return nil, err } @@ -329,28 +329,12 @@ func InitializeApp() (*App, error) { if err != nil { return nil, err } - deploymentTemplateHistoryServiceImpl := history.NewDeploymentTemplateHistoryServiceImpl(sugaredLogger, deploymentTemplateHistoryRepositoryImpl, pipelineRepositoryImpl, chartRepositoryImpl, chartRefRepositoryImpl, envLevelAppMetricsRepositoryImpl, appLevelMetricsRepositoryImpl, userServiceImpl, cdWorkflowRepositoryImpl, variableSnapshotHistoryServiceImpl, variableTemplateParserImpl) - chartWorkingDir := _wireChartWorkingDirValue - globalEnvVariables, err := util3.GetGlobalEnvVariables() - if err != nil { - return nil, err - } - chartTemplateServiceImpl := util.NewChartTemplateServiceImpl(sugaredLogger, chartWorkingDir, httpClient, gitFactory, globalEnvVariables, gitOpsConfigRepositoryImpl, userRepositoryImpl, chartRepositoryImpl) - refChartDir := _wireRefChartDirValue - chartRepoRepositoryImpl := chartRepoRepository.NewChartRepoRepositoryImpl(db) - defaultChart := _wireDefaultChartValue - utilMergeUtil := util.MergeUtil{ - Logger: sugaredLogger, - } - repositoryServiceClientImpl := repository8.NewServiceClientImpl(sugaredLogger, argoCDConnectionManagerImpl) - variableEntityMappingRepositoryImpl := repository7.NewVariableEntityMappingRepository(sugaredLogger, db) - variableEntityMappingServiceImpl := variables.NewVariableEntityMappingServiceImpl(variableEntityMappingRepositoryImpl, sugaredLogger) scopedVariableRepositoryImpl := repository7.NewScopedVariableRepository(db, sugaredLogger) qualifiersMappingRepositoryImpl, err := resourceQualifiers.NewQualifiersMappingRepositoryImpl(db, sugaredLogger) if err != nil { return nil, err } - devtronResourceSearchableKeyRepositoryImpl := repository9.NewDevtronResourceSearchableKeyRepositoryImpl(sugaredLogger, db) + devtronResourceSearchableKeyRepositoryImpl := repository8.NewDevtronResourceSearchableKeyRepositoryImpl(sugaredLogger, db) devtronResourceSearchableKeyServiceImpl, err := devtronResource.NewDevtronResourceSearchableKeyServiceImpl(sugaredLogger, devtronResourceSearchableKeyRepositoryImpl) if err != nil { return nil, err @@ -363,6 +347,22 @@ func InitializeApp() (*App, error) { if err != nil { return nil, err } + deploymentTemplateHistoryServiceImpl := history.NewDeploymentTemplateHistoryServiceImpl(sugaredLogger, deploymentTemplateHistoryRepositoryImpl, pipelineRepositoryImpl, chartRepositoryImpl, chartRefRepositoryImpl, envLevelAppMetricsRepositoryImpl, appLevelMetricsRepositoryImpl, userServiceImpl, cdWorkflowRepositoryImpl, variableSnapshotHistoryServiceImpl, variableTemplateParserImpl, scopedVariableServiceImpl) + chartWorkingDir := _wireChartWorkingDirValue + globalEnvVariables, err := util3.GetGlobalEnvVariables() + if err != nil { + return nil, err + } + chartTemplateServiceImpl := util.NewChartTemplateServiceImpl(sugaredLogger, chartWorkingDir, httpClient, gitFactory, globalEnvVariables, gitOpsConfigRepositoryImpl, userRepositoryImpl, chartRepositoryImpl) + refChartDir := _wireRefChartDirValue + chartRepoRepositoryImpl := chartRepoRepository.NewChartRepoRepositoryImpl(db) + defaultChart := _wireDefaultChartValue + utilMergeUtil := util.MergeUtil{ + Logger: sugaredLogger, + } + repositoryServiceClientImpl := repository9.NewServiceClientImpl(sugaredLogger, argoCDConnectionManagerImpl) + variableEntityMappingRepositoryImpl := repository7.NewVariableEntityMappingRepository(sugaredLogger, db) + variableEntityMappingServiceImpl := variables.NewVariableEntityMappingServiceImpl(variableEntityMappingRepositoryImpl, sugaredLogger) chartServiceImpl := chart.NewChartServiceImpl(chartRepositoryImpl, sugaredLogger, chartTemplateServiceImpl, chartRepoRepositoryImpl, appRepositoryImpl, refChartDir, defaultChart, utilMergeUtil, repositoryServiceClientImpl, chartRefRepositoryImpl, envConfigOverrideRepositoryImpl, pipelineConfigRepositoryImpl, configMapRepositoryImpl, environmentRepositoryImpl, pipelineRepositoryImpl, appLevelMetricsRepositoryImpl, envLevelAppMetricsRepositoryImpl, httpClient, deploymentTemplateHistoryServiceImpl, variableEntityMappingServiceImpl, variableTemplateParserImpl, scopedVariableServiceImpl) devtronSecretConfig, err := util3.GetDevtronSecretName() if err != nil { @@ -393,23 +393,24 @@ func InitializeApp() (*App, error) { return nil, err } appStatusServiceImpl := appStatus2.NewAppStatusServiceImpl(appStatusRepositoryImpl, sugaredLogger, enforcerImpl, enforcerUtilImpl) + chartGroupDeploymentRepositoryImpl := repository3.NewChartGroupDeploymentRepositoryImpl(db, sugaredLogger) clusterInstalledAppsRepositoryImpl := repository3.NewClusterInstalledAppsRepositoryImpl(db, sugaredLogger) refChartProxyDir := _wireRefChartProxyDirValue appStoreDeploymentCommonServiceImpl := appStoreDeploymentCommon.NewAppStoreDeploymentCommonServiceImpl(sugaredLogger, installedAppRepositoryImpl, appStoreApplicationVersionRepositoryImpl, environmentRepositoryImpl, chartTemplateServiceImpl, refChartProxyDir, gitFactory, gitOpsConfigRepositoryImpl) ociRegistryConfigRepositoryImpl := repository5.NewOCIRegistryConfigRepositoryImpl(db) appStoreDeploymentHelmServiceImpl := appStoreDeploymentTool.NewAppStoreDeploymentHelmServiceImpl(sugaredLogger, helmAppServiceImpl, appStoreApplicationVersionRepositoryImpl, environmentRepositoryImpl, helmAppClientImpl, installedAppRepositoryImpl, appStoreDeploymentCommonServiceImpl, ociRegistryConfigRepositoryImpl) - appStoreDeploymentFullModeServiceImpl := appStoreDeploymentFullMode.NewAppStoreDeploymentFullModeServiceImpl(sugaredLogger, chartTemplateServiceImpl, refChartProxyDir, repositoryServiceClientImpl, appStoreApplicationVersionRepositoryImpl, environmentRepositoryImpl, applicationServiceClientImpl, argoK8sClientImpl, gitFactory, acdAuthConfig, globalEnvVariables, installedAppRepositoryImpl, tokenCache, argoUserServiceImpl, gitOpsConfigRepositoryImpl, pipelineStatusTimelineServiceImpl, appStoreDeploymentCommonServiceImpl) - chartGroupDeploymentRepositoryImpl := repository3.NewChartGroupDeploymentRepositoryImpl(db, sugaredLogger) - appStoreDeploymentArgoCdServiceImpl := appStoreDeploymentGitopsTool.NewAppStoreDeploymentArgoCdServiceImpl(sugaredLogger, appStoreDeploymentFullModeServiceImpl, applicationServiceClientImpl, chartGroupDeploymentRepositoryImpl, installedAppRepositoryImpl, installedAppVersionHistoryRepositoryImpl, chartTemplateServiceImpl, gitFactory, argoUserServiceImpl, appStoreDeploymentCommonServiceImpl, helmAppServiceImpl, gitOpsConfigRepositoryImpl, appStatusServiceImpl, pipelineStatusTimelineServiceImpl, userServiceImpl, pipelineStatusTimelineRepositoryImpl, appStoreApplicationVersionRepositoryImpl) + argoClientWrapperServiceImpl := argocdServer.NewArgoClientWrapperServiceImpl(sugaredLogger, applicationServiceClientImpl) + appStoreDeploymentFullModeServiceImpl := appStoreDeploymentFullMode.NewAppStoreDeploymentFullModeServiceImpl(sugaredLogger, chartTemplateServiceImpl, refChartProxyDir, repositoryServiceClientImpl, appStoreApplicationVersionRepositoryImpl, environmentRepositoryImpl, applicationServiceClientImpl, argoK8sClientImpl, gitFactory, acdAuthConfig, globalEnvVariables, installedAppRepositoryImpl, tokenCache, argoUserServiceImpl, gitOpsConfigRepositoryImpl, pipelineStatusTimelineServiceImpl, appStoreDeploymentCommonServiceImpl, argoClientWrapperServiceImpl) + appStoreDeploymentArgoCdServiceImpl := appStoreDeploymentGitopsTool.NewAppStoreDeploymentArgoCdServiceImpl(sugaredLogger, appStoreDeploymentFullModeServiceImpl, applicationServiceClientImpl, chartGroupDeploymentRepositoryImpl, installedAppRepositoryImpl, installedAppVersionHistoryRepositoryImpl, chartTemplateServiceImpl, gitFactory, argoUserServiceImpl, appStoreDeploymentCommonServiceImpl, helmAppServiceImpl, gitOpsConfigRepositoryImpl, appStatusServiceImpl, pipelineStatusTimelineServiceImpl, userServiceImpl, pipelineStatusTimelineRepositoryImpl, appStoreApplicationVersionRepositoryImpl, argoClientWrapperServiceImpl) deploymentServiceTypeConfig, err := service.GetDeploymentServiceTypeConfig() if err != nil { return nil, err } - appStoreDeploymentServiceImpl := service.NewAppStoreDeploymentServiceImpl(sugaredLogger, installedAppRepositoryImpl, appStoreApplicationVersionRepositoryImpl, environmentRepositoryImpl, clusterInstalledAppsRepositoryImpl, appRepositoryImpl, appStoreDeploymentHelmServiceImpl, appStoreDeploymentArgoCdServiceImpl, environmentServiceImpl, clusterServiceImplExtended, helmAppServiceImpl, appStoreDeploymentCommonServiceImpl, globalEnvVariables, installedAppVersionHistoryRepositoryImpl, gitOpsConfigRepositoryImpl, attributesServiceImpl, deploymentServiceTypeConfig, chartTemplateServiceImpl, pubSubClientServiceImpl) + appStoreDeploymentServiceImpl := service.NewAppStoreDeploymentServiceImpl(sugaredLogger, installedAppRepositoryImpl, chartGroupDeploymentRepositoryImpl, appStoreApplicationVersionRepositoryImpl, environmentRepositoryImpl, clusterInstalledAppsRepositoryImpl, appRepositoryImpl, appStoreDeploymentHelmServiceImpl, appStoreDeploymentArgoCdServiceImpl, environmentServiceImpl, clusterServiceImplExtended, helmAppServiceImpl, appStoreDeploymentCommonServiceImpl, globalEnvVariables, installedAppVersionHistoryRepositoryImpl, gitOpsConfigRepositoryImpl, attributesServiceImpl, deploymentServiceTypeConfig, chartTemplateServiceImpl, pubSubClientServiceImpl) k8sCommonServiceImpl := k8s2.NewK8sCommonServiceImpl(sugaredLogger, k8sUtil, clusterServiceImplExtended) manifestPushConfigRepositoryImpl := repository11.NewManifestPushConfigRepository(sugaredLogger, db) gitOpsManifestPushServiceImpl := app2.NewGitOpsManifestPushServiceImpl(sugaredLogger, chartTemplateServiceImpl, chartServiceImpl, gitOpsConfigRepositoryImpl, gitFactory, pipelineStatusTimelineServiceImpl) - appServiceImpl := app2.NewAppService(envConfigOverrideRepositoryImpl, pipelineOverrideRepositoryImpl, mergeUtil, sugaredLogger, ciArtifactRepositoryImpl, pipelineRepositoryImpl, dbMigrationConfigRepositoryImpl, eventRESTClientImpl, eventSimpleFactoryImpl, applicationServiceClientImpl, tokenCache, acdAuthConfig, enforcerImpl, enforcerUtilImpl, userServiceImpl, appListingRepositoryImpl, appRepositoryImpl, environmentRepositoryImpl, pipelineConfigRepositoryImpl, configMapRepositoryImpl, appLevelMetricsRepositoryImpl, envLevelAppMetricsRepositoryImpl, chartRepositoryImpl, ciPipelineMaterialRepositoryImpl, cdWorkflowRepositoryImpl, commonServiceImpl, imageScanDeployInfoRepositoryImpl, imageScanHistoryRepositoryImpl, argoK8sClientImpl, gitFactory, pipelineStrategyHistoryServiceImpl, configMapHistoryServiceImpl, deploymentTemplateHistoryServiceImpl, chartTemplateServiceImpl, refChartDir, chartRefRepositoryImpl, chartServiceImpl, helmAppClientImpl, argoUserServiceImpl, pipelineStatusTimelineRepositoryImpl, appCrudOperationServiceImpl, configMapHistoryRepositoryImpl, pipelineStrategyHistoryRepositoryImpl, deploymentTemplateHistoryRepositoryImpl, dockerRegistryIpsConfigServiceImpl, pipelineStatusTimelineResourcesServiceImpl, pipelineStatusSyncDetailServiceImpl, pipelineStatusTimelineServiceImpl, appServiceConfig, gitOpsConfigRepositoryImpl, appStatusServiceImpl, installedAppRepositoryImpl, appStoreDeploymentServiceImpl, k8sCommonServiceImpl, installedAppVersionHistoryRepositoryImpl, globalEnvVariables, helmAppServiceImpl, manifestPushConfigRepositoryImpl, gitOpsManifestPushServiceImpl, variableSnapshotHistoryServiceImpl, scopedVariableServiceImpl, variableEntityMappingServiceImpl, variableTemplateParserImpl) + appServiceImpl := app2.NewAppService(envConfigOverrideRepositoryImpl, pipelineOverrideRepositoryImpl, mergeUtil, sugaredLogger, ciArtifactRepositoryImpl, pipelineRepositoryImpl, dbMigrationConfigRepositoryImpl, eventRESTClientImpl, eventSimpleFactoryImpl, applicationServiceClientImpl, tokenCache, acdAuthConfig, enforcerImpl, enforcerUtilImpl, userServiceImpl, appListingRepositoryImpl, appRepositoryImpl, environmentRepositoryImpl, pipelineConfigRepositoryImpl, configMapRepositoryImpl, appLevelMetricsRepositoryImpl, envLevelAppMetricsRepositoryImpl, chartRepositoryImpl, ciPipelineMaterialRepositoryImpl, cdWorkflowRepositoryImpl, commonServiceImpl, imageScanDeployInfoRepositoryImpl, imageScanHistoryRepositoryImpl, argoK8sClientImpl, gitFactory, pipelineStrategyHistoryServiceImpl, configMapHistoryServiceImpl, deploymentTemplateHistoryServiceImpl, chartTemplateServiceImpl, refChartDir, chartRefRepositoryImpl, chartServiceImpl, helmAppClientImpl, argoUserServiceImpl, pipelineStatusTimelineRepositoryImpl, appCrudOperationServiceImpl, configMapHistoryRepositoryImpl, pipelineStrategyHistoryRepositoryImpl, deploymentTemplateHistoryRepositoryImpl, dockerRegistryIpsConfigServiceImpl, pipelineStatusTimelineResourcesServiceImpl, pipelineStatusSyncDetailServiceImpl, pipelineStatusTimelineServiceImpl, appServiceConfig, gitOpsConfigRepositoryImpl, appStatusServiceImpl, installedAppRepositoryImpl, appStoreDeploymentServiceImpl, k8sCommonServiceImpl, installedAppVersionHistoryRepositoryImpl, globalEnvVariables, helmAppServiceImpl, manifestPushConfigRepositoryImpl, gitOpsManifestPushServiceImpl, variableSnapshotHistoryServiceImpl, scopedVariableServiceImpl, variableEntityMappingServiceImpl, variableTemplateParserImpl, argoClientWrapperServiceImpl) validate, err := util.IntValidator() if err != nil { return nil, err @@ -445,15 +446,10 @@ func InitializeApp() (*App, error) { pipelineStageRepositoryImpl := repository11.NewPipelineStageRepository(sugaredLogger, db) globalPluginRepositoryImpl := repository12.NewGlobalPluginRepository(sugaredLogger, db) pipelineStageServiceImpl := pipeline.NewPipelineStageService(sugaredLogger, pipelineStageRepositoryImpl, globalPluginRepositoryImpl, pipelineRepositoryImpl, scopedVariableServiceImpl, variableEntityMappingServiceImpl, variableTemplateParserImpl) - globalPluginServiceImpl := plugin.NewGlobalPluginService(sugaredLogger, globalPluginRepositoryImpl) - dockerRegistryConfigImpl := pipeline.NewDockerRegistryConfigImpl(sugaredLogger, helmAppServiceImpl, dockerArtifactStoreRepositoryImpl, dockerRegistryIpsConfigRepositoryImpl, ociRegistryConfigRepositoryImpl) - imageTagRepositoryImpl := repository.NewImageTagRepository(db, sugaredLogger) - customTagServiceImpl := pkg.NewCustomTagService(sugaredLogger, imageTagRepositoryImpl) - pluginInputVariableParserImpl := pipeline.NewPluginInputVariableParserImpl(sugaredLogger, dockerRegistryConfigImpl, customTagServiceImpl) - workflowDagExecutorImpl := pipeline.NewWorkflowDagExecutorImpl(sugaredLogger, pipelineRepositoryImpl, cdWorkflowRepositoryImpl, pubSubClientServiceImpl, appServiceImpl, workflowServiceImpl, ciArtifactRepositoryImpl, ciPipelineRepositoryImpl, materialRepositoryImpl, pipelineOverrideRepositoryImpl, userServiceImpl, deploymentGroupRepositoryImpl, environmentRepositoryImpl, enforcerImpl, enforcerUtilImpl, tokenCache, acdAuthConfig, eventSimpleFactoryImpl, eventRESTClientImpl, cvePolicyRepositoryImpl, imageScanResultRepositoryImpl, appWorkflowRepositoryImpl, prePostCdScriptHistoryServiceImpl, argoUserServiceImpl, pipelineStatusTimelineRepositoryImpl, pipelineStatusTimelineServiceImpl, ciTemplateRepositoryImpl, ciWorkflowRepositoryImpl, appLabelRepositoryImpl, clientImpl, pipelineStageServiceImpl, k8sCommonServiceImpl, variableSnapshotHistoryServiceImpl, globalPluginServiceImpl, pluginInputVariableParserImpl) + workflowDagExecutorImpl := pipeline.NewWorkflowDagExecutorImpl(sugaredLogger, pipelineRepositoryImpl, cdWorkflowRepositoryImpl, pubSubClientServiceImpl, appServiceImpl, workflowServiceImpl, ciArtifactRepositoryImpl, ciPipelineRepositoryImpl, materialRepositoryImpl, pipelineOverrideRepositoryImpl, userServiceImpl, deploymentGroupRepositoryImpl, environmentRepositoryImpl, enforcerImpl, enforcerUtilImpl, tokenCache, acdAuthConfig, eventSimpleFactoryImpl, eventRESTClientImpl, cvePolicyRepositoryImpl, imageScanResultRepositoryImpl, appWorkflowRepositoryImpl, prePostCdScriptHistoryServiceImpl, argoUserServiceImpl, pipelineStatusTimelineRepositoryImpl, pipelineStatusTimelineServiceImpl, ciTemplateRepositoryImpl, ciWorkflowRepositoryImpl, appLabelRepositoryImpl, clientImpl, pipelineStageServiceImpl, k8sCommonServiceImpl, variableSnapshotHistoryServiceImpl, deploymentTemplateHistoryServiceImpl, configMapHistoryServiceImpl, pipelineStrategyHistoryServiceImpl, manifestPushConfigRepositoryImpl, gitOpsManifestPushServiceImpl, ciPipelineMaterialRepositoryImpl, imageScanHistoryRepositoryImpl, imageScanDeployInfoRepositoryImpl, appCrudOperationServiceImpl, pipelineConfigRepositoryImpl, dockerRegistryIpsConfigServiceImpl, chartRepositoryImpl, chartTemplateServiceImpl, pipelineStrategyHistoryRepositoryImpl, appRepositoryImpl, deploymentTemplateHistoryRepositoryImpl, argoK8sClientImpl, configMapRepositoryImpl, configMapHistoryRepositoryImpl, refChartDir, helmAppServiceImpl, helmAppClientImpl, chartRefRepositoryImpl, envConfigOverrideRepositoryImpl, appLevelMetricsRepositoryImpl, envLevelAppMetricsRepositoryImpl, dbMigrationConfigRepositoryImpl, mergeUtil, gitOpsConfigRepositoryImpl, gitFactory, applicationServiceClientImpl, variableEntityMappingServiceImpl, variableTemplateParserImpl, argoClientWrapperServiceImpl, scopedVariableServiceImpl) deploymentGroupAppRepositoryImpl := repository.NewDeploymentGroupAppRepositoryImpl(sugaredLogger, db) deploymentGroupServiceImpl := deploymentGroup.NewDeploymentGroupServiceImpl(appRepositoryImpl, sugaredLogger, pipelineRepositoryImpl, ciPipelineRepositoryImpl, deploymentGroupRepositoryImpl, environmentRepositoryImpl, deploymentGroupAppRepositoryImpl, ciArtifactRepositoryImpl, appWorkflowRepositoryImpl, workflowDagExecutorImpl) - deploymentConfigServiceImpl := pipeline.NewDeploymentConfigServiceImpl(sugaredLogger, envConfigOverrideRepositoryImpl, chartRepositoryImpl, pipelineRepositoryImpl, envLevelAppMetricsRepositoryImpl, appLevelMetricsRepositoryImpl, pipelineConfigRepositoryImpl, configMapRepositoryImpl, configMapHistoryServiceImpl, chartRefRepositoryImpl, variableEntityMappingServiceImpl, scopedVariableServiceImpl) + deploymentConfigServiceImpl := pipeline.NewDeploymentConfigServiceImpl(sugaredLogger, envConfigOverrideRepositoryImpl, chartRepositoryImpl, pipelineRepositoryImpl, envLevelAppMetricsRepositoryImpl, appLevelMetricsRepositoryImpl, pipelineConfigRepositoryImpl, configMapRepositoryImpl, configMapHistoryServiceImpl, chartRefRepositoryImpl, variableEntityMappingServiceImpl, scopedVariableServiceImpl, variableTemplateParserImpl) pipelineTriggerRestHandlerImpl := restHandler.NewPipelineRestHandler(appServiceImpl, userServiceImpl, validate, enforcerImpl, teamServiceImpl, sugaredLogger, enforcerUtilImpl, workflowDagExecutorImpl, deploymentGroupServiceImpl, argoUserServiceImpl, deploymentConfigServiceImpl) sseSSE := sse.NewSSE() pipelineTriggerRouterImpl := router.NewPipelineTriggerRouter(pipelineTriggerRestHandlerImpl, sseSSE) @@ -468,6 +464,8 @@ func InitializeApp() (*App, error) { ciBuildConfigServiceImpl := pipeline.NewCiBuildConfigServiceImpl(sugaredLogger, ciBuildConfigRepositoryImpl) ciTemplateServiceImpl := pipeline.NewCiTemplateServiceImpl(sugaredLogger, ciBuildConfigServiceImpl, ciTemplateRepositoryImpl, ciTemplateOverrideRepositoryImpl) configMapServiceImpl := pipeline.NewConfigMapServiceImpl(chartRepositoryImpl, sugaredLogger, chartRepoRepositoryImpl, utilMergeUtil, pipelineConfigRepositoryImpl, configMapRepositoryImpl, envConfigOverrideRepositoryImpl, commonServiceImpl, appRepositoryImpl, configMapHistoryServiceImpl, environmentRepositoryImpl) + imageTagRepositoryImpl := repository.NewImageTagRepository(db, sugaredLogger) + customTagServiceImpl := pipeline.NewCustomTagService(sugaredLogger, imageTagRepositoryImpl) ciCdPipelineOrchestratorImpl := pipeline.NewCiCdPipelineOrchestrator(appRepositoryImpl, sugaredLogger, materialRepositoryImpl, pipelineRepositoryImpl, ciPipelineRepositoryImpl, ciPipelineMaterialRepositoryImpl, clientImpl, ciCdConfig, appWorkflowRepositoryImpl, environmentRepositoryImpl, attributesServiceImpl, appListingRepositoryImpl, appCrudOperationServiceImpl, userAuthServiceImpl, prePostCdScriptHistoryServiceImpl, prePostCiScriptHistoryServiceImpl, pipelineStageServiceImpl, ciTemplateOverrideRepositoryImpl, gitMaterialHistoryServiceImpl, ciPipelineHistoryServiceImpl, ciTemplateServiceImpl, dockerArtifactStoreRepositoryImpl, configMapServiceImpl, customTagServiceImpl, genericNoteServiceImpl) ecrConfig, err := pipeline.GetEcrConfig() if err != nil { @@ -489,7 +487,7 @@ func InitializeApp() (*App, error) { return nil, err } devtronAppCMCSServiceImpl := pipeline.NewDevtronAppCMCSServiceImpl(sugaredLogger, appServiceImpl, attributesRepositoryImpl) - cdPipelineConfigServiceImpl := pipeline.NewCdPipelineConfigServiceImpl(sugaredLogger, pipelineRepositoryImpl, environmentRepositoryImpl, pipelineConfigRepositoryImpl, appWorkflowRepositoryImpl, pipelineStageServiceImpl, appRepositoryImpl, appServiceImpl, deploymentGroupRepositoryImpl, ciCdPipelineOrchestratorImpl, appStatusRepositoryImpl, ciPipelineRepositoryImpl, prePostCdScriptHistoryServiceImpl, clusterRepositoryImpl, helmAppServiceImpl, enforcerUtilImpl, gitOpsConfigRepositoryImpl, pipelineStrategyHistoryServiceImpl, chartRepositoryImpl, resourceGroupServiceImpl, chartDeploymentServiceImpl, chartTemplateServiceImpl, propertiesConfigServiceImpl, appLevelMetricsRepositoryImpl, deploymentTemplateHistoryServiceImpl, variableEntityMappingServiceImpl, variableTemplateParserImpl, pipelineDeploymentServiceTypeConfig, applicationServiceClientImpl, devtronAppCMCSServiceImpl, customTagServiceImpl) + cdPipelineConfigServiceImpl := pipeline.NewCdPipelineConfigServiceImpl(sugaredLogger, pipelineRepositoryImpl, environmentRepositoryImpl, pipelineConfigRepositoryImpl, appWorkflowRepositoryImpl, pipelineStageServiceImpl, appRepositoryImpl, appServiceImpl, deploymentGroupRepositoryImpl, ciCdPipelineOrchestratorImpl, appStatusRepositoryImpl, ciPipelineRepositoryImpl, prePostCdScriptHistoryServiceImpl, clusterRepositoryImpl, helmAppServiceImpl, enforcerUtilImpl, gitOpsConfigRepositoryImpl, pipelineStrategyHistoryServiceImpl, chartRepositoryImpl, resourceGroupServiceImpl, chartDeploymentServiceImpl, chartTemplateServiceImpl, propertiesConfigServiceImpl, appLevelMetricsRepositoryImpl, deploymentTemplateHistoryServiceImpl, variableEntityMappingServiceImpl, variableTemplateParserImpl, pipelineDeploymentServiceTypeConfig, applicationServiceClientImpl, devtronAppCMCSServiceImpl) appArtifactManagerImpl := pipeline.NewAppArtifactManagerImpl(sugaredLogger, cdWorkflowRepositoryImpl, userServiceImpl, imageTaggingServiceImpl, ciArtifactRepositoryImpl, ciWorkflowRepositoryImpl, pipelineStageServiceImpl, cdPipelineConfigServiceImpl) globalStrategyMetadataChartRefMappingRepositoryImpl := chartRepoRepository.NewGlobalStrategyMetadataChartRefMappingRepositoryImpl(db, sugaredLogger) devtronAppStrategyServiceImpl := pipeline.NewDevtronAppStrategyServiceImpl(sugaredLogger, chartRepositoryImpl, globalStrategyMetadataChartRefMappingRepositoryImpl, ciCdPipelineOrchestratorImpl, cdPipelineConfigServiceImpl) @@ -504,13 +502,14 @@ func InitializeApp() (*App, error) { } ciHandlerImpl := pipeline.NewCiHandlerImpl(sugaredLogger, ciServiceImpl, ciPipelineMaterialRepositoryImpl, clientImpl, ciWorkflowRepositoryImpl, workflowServiceImpl, ciLogServiceImpl, ciArtifactRepositoryImpl, userServiceImpl, eventRESTClientImpl, eventSimpleFactoryImpl, ciPipelineRepositoryImpl, appListingRepositoryImpl, k8sUtil, pipelineRepositoryImpl, enforcerUtilImpl, resourceGroupServiceImpl, environmentRepositoryImpl, imageTaggingServiceImpl, appWorkflowRepositoryImpl, customTagServiceImpl, k8sCommonServiceImpl) gitRegistryConfigImpl := pipeline.NewGitRegistryConfigImpl(sugaredLogger, gitProviderRepositoryImpl, clientImpl) + dockerRegistryConfigImpl := pipeline.NewDockerRegistryConfigImpl(sugaredLogger, helmAppServiceImpl, dockerArtifactStoreRepositoryImpl, dockerRegistryIpsConfigRepositoryImpl, ociRegistryConfigRepositoryImpl) appListingViewBuilderImpl := app2.NewAppListingViewBuilderImpl(sugaredLogger) linkoutsRepositoryImpl := repository.NewLinkoutsRepositoryImpl(sugaredLogger, db) appListingServiceImpl := app2.NewAppListingServiceImpl(sugaredLogger, appListingRepositoryImpl, applicationServiceClientImpl, appRepositoryImpl, appListingViewBuilderImpl, pipelineRepositoryImpl, linkoutsRepositoryImpl, appLevelMetricsRepositoryImpl, envLevelAppMetricsRepositoryImpl, cdWorkflowRepositoryImpl, pipelineOverrideRepositoryImpl, environmentRepositoryImpl, argoUserServiceImpl, envConfigOverrideRepositoryImpl, chartRepositoryImpl, ciPipelineRepositoryImpl, dockerRegistryIpsConfigServiceImpl) deploymentEventHandlerImpl := app2.NewDeploymentEventHandlerImpl(sugaredLogger, appListingServiceImpl, eventRESTClientImpl, eventSimpleFactoryImpl) cdHandlerImpl := pipeline.NewCdHandlerImpl(sugaredLogger, userServiceImpl, cdWorkflowRepositoryImpl, ciLogServiceImpl, ciArtifactRepositoryImpl, ciPipelineMaterialRepositoryImpl, pipelineRepositoryImpl, environmentRepositoryImpl, ciWorkflowRepositoryImpl, helmAppServiceImpl, pipelineOverrideRepositoryImpl, workflowDagExecutorImpl, appListingServiceImpl, appListingRepositoryImpl, pipelineStatusTimelineRepositoryImpl, applicationServiceClientImpl, argoUserServiceImpl, deploymentEventHandlerImpl, eventRESTClientImpl, pipelineStatusTimelineResourcesServiceImpl, pipelineStatusSyncDetailServiceImpl, pipelineStatusTimelineServiceImpl, appServiceImpl, appStatusServiceImpl, enforcerUtilImpl, installedAppRepositoryImpl, installedAppVersionHistoryRepositoryImpl, appRepositoryImpl, resourceGroupServiceImpl, imageTaggingServiceImpl, k8sUtil, workflowServiceImpl) appWorkflowServiceImpl := appWorkflow2.NewAppWorkflowServiceImpl(sugaredLogger, appWorkflowRepositoryImpl, ciCdPipelineOrchestratorImpl, ciPipelineRepositoryImpl, pipelineRepositoryImpl, enforcerUtilImpl, resourceGroupServiceImpl) - appCloneServiceImpl := appClone.NewAppCloneServiceImpl(sugaredLogger, pipelineBuilderImpl, materialRepositoryImpl, chartServiceImpl, configMapServiceImpl, appWorkflowServiceImpl, appListingServiceImpl, propertiesConfigServiceImpl, ciTemplateOverrideRepositoryImpl, pipelineStageServiceImpl, ciTemplateServiceImpl, appRepositoryImpl) + appCloneServiceImpl := appClone.NewAppCloneServiceImpl(sugaredLogger, pipelineBuilderImpl, materialRepositoryImpl, chartServiceImpl, configMapServiceImpl, appWorkflowServiceImpl, appListingServiceImpl, propertiesConfigServiceImpl, ciTemplateOverrideRepositoryImpl, pipelineStageServiceImpl, ciTemplateServiceImpl, appRepositoryImpl, ciPipelineRepositoryImpl, pipelineRepositoryImpl, appWorkflowRepositoryImpl) deploymentTemplateRepositoryImpl := repository.NewDeploymentTemplateRepositoryImpl(db, sugaredLogger) deploymentTemplateServiceImpl := generateManifest.NewDeploymentTemplateServiceImpl(sugaredLogger, chartServiceImpl, appListingServiceImpl, appListingRepositoryImpl, deploymentTemplateRepositoryImpl, helmAppServiceImpl, chartRepositoryImpl, chartTemplateServiceImpl, helmAppClientImpl, k8sUtil, propertiesConfigServiceImpl, deploymentTemplateHistoryServiceImpl, environmentRepositoryImpl, appRepositoryImpl) imageScanObjectMetaRepositoryImpl := security.NewImageScanObjectMetaRepositoryImpl(db, sugaredLogger) @@ -745,6 +744,7 @@ func InitializeApp() (*App, error) { externalLinkServiceImpl := externalLink.NewExternalLinkServiceImpl(sugaredLogger, externalLinkMonitoringToolRepositoryImpl, externalLinkIdentifierMappingRepositoryImpl, externalLinkRepositoryImpl) externalLinkRestHandlerImpl := externalLink2.NewExternalLinkRestHandlerImpl(sugaredLogger, externalLinkServiceImpl, userServiceImpl, enforcerImpl, enforcerUtilImpl) externalLinkRouterImpl := externalLink2.NewExternalLinkRouterImpl(externalLinkRestHandlerImpl) + globalPluginServiceImpl := plugin.NewGlobalPluginService(sugaredLogger, globalPluginRepositoryImpl) globalPluginRestHandlerImpl := restHandler.NewGlobalPluginRestHandler(sugaredLogger, globalPluginServiceImpl, enforcerUtilImpl, enforcerImpl, pipelineBuilderImpl) globalPluginRouterImpl := router.NewGlobalPluginRouter(sugaredLogger, globalPluginRestHandlerImpl) moduleRestHandlerImpl := module2.NewModuleRestHandlerImpl(sugaredLogger, moduleServiceImpl, userServiceImpl, enforcerImpl, validate) From 6fa35bd462740a2857d87c0fddc6291fc5ca5d6b Mon Sep 17 00:00:00 2001 From: ayushmaheshwari Date: Thu, 26 Oct 2023 14:41:54 +0530 Subject: [PATCH 062/143] enable disable changes --- api/bean/CustomTag.go | 1 + .../sql/repository/CustomTagRepository.go | 1 + pkg/bean/app.go | 2 + pkg/pipeline/CiCdPipelineOrchestrator.go | 6 +++ pkg/pipeline/CustomTagService.go | 34 ++++++++++++----- .../DeploymentPipelineConfigService.go | 37 ++++++++++--------- pkg/pipeline/WorkflowDagExecutor.go | 5 +-- pkg/pipeline/bean/CustomTagService.go | 2 + pkg/pipeline/pipelineStageVariableParser.go | 5 +-- wire_gen.go | 13 ++++--- 10 files changed, 67 insertions(+), 39 deletions(-) diff --git a/api/bean/CustomTag.go b/api/bean/CustomTag.go index 8d398be5be..7c39ff4175 100644 --- a/api/bean/CustomTag.go +++ b/api/bean/CustomTag.go @@ -6,6 +6,7 @@ type CustomTag struct { TagPattern string `json:"tagPattern"` AutoIncreasingNumber int `json:"counterX"` Metadata string `json:"metadata"` + Enabled bool `json:"enabled"` } type CustomTagErrorResponse struct { diff --git a/internal/sql/repository/CustomTagRepository.go b/internal/sql/repository/CustomTagRepository.go index b075491db0..132046c885 100644 --- a/internal/sql/repository/CustomTagRepository.go +++ b/internal/sql/repository/CustomTagRepository.go @@ -14,6 +14,7 @@ type CustomTag struct { AutoIncreasingNumber int `sql:"auto_increasing_number, notnull"` Active bool `sql:"active"` Metadata string `sql:"metadata"` + Enabled bool `sql:"enabled"` } type ImagePathReservation struct { diff --git a/pkg/bean/app.go b/pkg/bean/app.go index 8743917f01..149aa4616b 100644 --- a/pkg/bean/app.go +++ b/pkg/bean/app.go @@ -121,6 +121,7 @@ type CiPipeline struct { LastTriggeredEnvId int `json:"lastTriggeredEnvId"` CustomTagObject *CustomTagData `json:"customTag,omitempty"` DefaultTag []string `json:"defaultTag,omitempty"` + EnableCustomTag bool `json:"enableCustomTag"` } type DockerConfigOverride struct { @@ -565,6 +566,7 @@ type CDPipelineConfigObject struct { ExternalCiPipelineId int `json:"externalCiPipelineId,omitempty"` CustomTagObject *CustomTagData `json:"customTag,omitempty"` CustomTagStage *repository.PipelineStageType `json:"customTagStage,omitempty"` + EnableCustomTag bool `json:"enableCustomTag"` } type PreStageConfigMapSecretNames struct { diff --git a/pkg/pipeline/CiCdPipelineOrchestrator.go b/pkg/pipeline/CiCdPipelineOrchestrator.go index f7500a83d3..ffef141f8a 100644 --- a/pkg/pipeline/CiCdPipelineOrchestrator.go +++ b/pkg/pipeline/CiCdPipelineOrchestrator.go @@ -330,6 +330,10 @@ func (impl CiCdPipelineOrchestratorImpl) PatchMaterialValue(createRequest *bean. AuditLog: sql.AuditLog{UpdatedBy: userId, UpdatedOn: time.Now()}, } + if createRequest.EnableCustomTag && createRequest.CustomTagObject == nil { + return nil, errors.New("please input custom tag data if tag is enabled") + } + //If customTagObject has been passed, create or update the resource //Otherwise deleteIfExists if createRequest.CustomTagObject != nil { @@ -338,6 +342,7 @@ func (impl CiCdPipelineOrchestratorImpl) PatchMaterialValue(createRequest *bean. EntityValue: strconv.Itoa(ciPipelineObject.Id), TagPattern: createRequest.CustomTagObject.TagPattern, AutoIncreasingNumber: createRequest.CustomTagObject.CounterX, + Enabled: createRequest.EnableCustomTag, } err = impl.customTagService.CreateOrUpdateCustomTag(&customTag) if err != nil { @@ -347,6 +352,7 @@ func (impl CiCdPipelineOrchestratorImpl) PatchMaterialValue(createRequest *bean. customTag := bean4.CustomTag{ EntityKey: bean2.EntityTypeCiPipelineId, EntityValue: strconv.Itoa(ciPipelineObject.Id), + Enabled: false, } err := impl.customTagService.DeleteCustomTagIfExists(customTag) if err != nil { diff --git a/pkg/pipeline/CustomTagService.go b/pkg/pipeline/CustomTagService.go index 76cfc766ac..91f8e00853 100644 --- a/pkg/pipeline/CustomTagService.go +++ b/pkg/pipeline/CustomTagService.go @@ -38,17 +38,31 @@ func (impl *CustomTagServiceImpl) DeactivateImagePathReservation(id int) error { } func (impl *CustomTagServiceImpl) CreateOrUpdateCustomTag(tag *bean.CustomTag) error { - if err := validateTagPattern(tag.TagPattern); err != nil { - return err - } - customTagData := repository.CustomTag{ - EntityKey: tag.EntityKey, - EntityValue: tag.EntityValue, - TagPattern: strings.ReplaceAll(tag.TagPattern, bean2.IMAGE_TAG_VARIABLE_NAME_X, bean2.IMAGE_TAG_VARIABLE_NAME_x), - AutoIncreasingNumber: tag.AutoIncreasingNumber, - Metadata: tag.Metadata, - Active: true, + if tag.Enabled { + if err := validateTagPattern(tag.TagPattern); err != nil { + return err + } + } + var customTagData repository.CustomTag + if tag.Enabled { + customTagData = repository.CustomTag{ + EntityKey: tag.EntityKey, + EntityValue: tag.EntityValue, + TagPattern: strings.ReplaceAll(tag.TagPattern, bean2.IMAGE_TAG_VARIABLE_NAME_X, bean2.IMAGE_TAG_VARIABLE_NAME_x), + AutoIncreasingNumber: tag.AutoIncreasingNumber, + Metadata: tag.Metadata, + Active: true, + Enabled: true, + } + } else { + customTagData = repository.CustomTag{ + EntityKey: tag.EntityKey, + EntityValue: tag.EntityValue, + Active: true, + Enabled: false, + } } + oldTagObject, err := impl.customTagRepository.FetchCustomTagData(customTagData.EntityKey, customTagData.EntityValue) if err != nil && err != pg.ErrNoRows { return err diff --git a/pkg/pipeline/DeploymentPipelineConfigService.go b/pkg/pipeline/DeploymentPipelineConfigService.go index d8953ff3c9..9c03328f07 100644 --- a/pkg/pipeline/DeploymentPipelineConfigService.go +++ b/pkg/pipeline/DeploymentPipelineConfigService.go @@ -33,7 +33,6 @@ import ( "github.com/devtron-labs/devtron/internal/sql/repository/chartConfig" "github.com/devtron-labs/devtron/internal/sql/repository/pipelineConfig" "github.com/devtron-labs/devtron/internal/util" - "github.com/devtron-labs/devtron/pkg" "github.com/devtron-labs/devtron/pkg/app" "github.com/devtron-labs/devtron/pkg/bean" chartRepoRepository "github.com/devtron-labs/devtron/pkg/chartRepo/repository" @@ -142,7 +141,7 @@ type CdPipelineConfigServiceImpl struct { variableTemplateParser parsers.VariableTemplateParser deploymentConfig *DeploymentServiceTypeConfig application application.ServiceClient - customTagService pkg.CustomTagService + customTagService CustomTagService devtronAppCMCSService DevtronAppCMCSService } @@ -177,7 +176,7 @@ func NewCdPipelineConfigServiceImpl( deploymentConfig *DeploymentServiceTypeConfig, application application.ServiceClient, devtronAppCMCSService DevtronAppCMCSService, - customTagService pkg.CustomTagService) *CdPipelineConfigServiceImpl { + customTagService CustomTagService) *CdPipelineConfigServiceImpl { return &CdPipelineConfigServiceImpl{ logger: logger, pipelineRepository: pipelineRepository, @@ -281,12 +280,12 @@ func (impl *CdPipelineConfigServiceImpl) GetCdPipelineById(pipelineId int) (cdPi var customTag *bean.CustomTagData var customTagStage repository5.PipelineStageType - customTagPreCD, err := impl.customTagService.GetActiveCustomTagByEntityKeyAndValue(pkg.EntityTypePreCD, strconv.Itoa(pipelineId)) + customTagPreCD, err := impl.customTagService.GetActiveCustomTagByEntityKeyAndValue(bean3.EntityTypePreCD, strconv.Itoa(pipelineId)) if err != nil && err != pg.ErrNoRows { impl.logger.Errorw("error in fetching custom Tag precd") return nil, err } - customTagPostCD, err := impl.customTagService.GetActiveCustomTagByEntityKeyAndValue(pkg.EntityTypePostCD, strconv.Itoa(pipelineId)) + customTagPostCD, err := impl.customTagService.GetActiveCustomTagByEntityKeyAndValue(bean3.EntityTypePostCD, strconv.Itoa(pipelineId)) if err != nil && err != pg.ErrNoRows { impl.logger.Errorw("error in fetching custom Tag precd") return nil, err @@ -425,6 +424,9 @@ func (impl *CdPipelineConfigServiceImpl) CreateCdPipelines(pipelineCreateRequest } func (impl *CdPipelineConfigServiceImpl) CDPipelineCustomTagDBOperations(pipeline *bean.CDPipelineConfigObject) error { + if pipeline.EnableCustomTag && pipeline.CustomTagObject == nil { + return fmt.Errorf("please provide custom tag data if tag is enabled") + } if pipeline.CustomTagObject == nil && pipeline.CustomTagStage == nil { // delete custom tag if removed from request err := impl.DeleteCustomTag(pipeline) @@ -483,7 +485,7 @@ func (impl *CdPipelineConfigServiceImpl) DeleteCustomTagByPipelineStageType(pipe } func (impl *CdPipelineConfigServiceImpl) SaveOrUpdateCustomTagForCDPipeline(pipeline *bean.CDPipelineConfigObject) error { - customTag, err := impl.ParseCustomTagPatchRequest(pipeline.Id, pipeline.CustomTagObject, pipeline.CustomTagStage) + customTag, err := impl.ParseCustomTagPatchRequest(pipeline) if err != nil { impl.logger.Errorw("err", err) return err @@ -496,17 +498,18 @@ func (impl *CdPipelineConfigServiceImpl) SaveOrUpdateCustomTagForCDPipeline(pipe return nil } -func (impl *CdPipelineConfigServiceImpl) ParseCustomTagPatchRequest(pipelineId int, customTagData *bean.CustomTagData, pipelineStageType *repository5.PipelineStageType) (*bean2.CustomTag, error) { - entityType := getEntityTypeByPipelineStageType(*pipelineStageType) +func (impl *CdPipelineConfigServiceImpl) ParseCustomTagPatchRequest(pipelineRequest *bean.CDPipelineConfigObject) (*bean2.CustomTag, error) { + entityType := getEntityTypeByPipelineStageType(*pipelineRequest.CustomTagStage) if entityType == 0 { - return nil, fmt.Errorf("invalid stage for cd pipeline custom tag; pipelineStageType: %s ", string(*pipelineStageType)) + return nil, fmt.Errorf("invalid stage for cd pipeline custom tag; pipelineStageType: %s ", string(*pipelineRequest.CustomTagStage)) } customTag := &bean2.CustomTag{ EntityKey: entityType, - EntityValue: fmt.Sprintf("%d", pipelineId), - TagPattern: customTagData.TagPattern, - AutoIncreasingNumber: customTagData.CounterX, + EntityValue: fmt.Sprintf("%d", pipelineRequest.Id), + TagPattern: pipelineRequest.CustomTagObject.TagPattern, + AutoIncreasingNumber: pipelineRequest.CustomTagObject.CounterX, Metadata: "", + Enabled: pipelineRequest.EnableCustomTag, } return customTag, nil } @@ -514,11 +517,11 @@ func (impl *CdPipelineConfigServiceImpl) ParseCustomTagPatchRequest(pipelineId i func getEntityTypeByPipelineStageType(pipelineStageType repository5.PipelineStageType) (customTagEntityType int) { switch pipelineStageType { case repository5.PIPELINE_STAGE_TYPE_PRE_CD: - customTagEntityType = pkg.EntityTypePreCD + customTagEntityType = bean3.EntityTypePreCD case repository5.PIPELINE_STAGE_TYPE_POST_CD: - customTagEntityType = pkg.EntityTypePostCD + customTagEntityType = bean3.EntityTypePostCD default: - customTagEntityType = pkg.EntityNull + customTagEntityType = bean3.EntityNull } return customTagEntityType } @@ -1027,12 +1030,12 @@ func (impl *CdPipelineConfigServiceImpl) GetCdPipelinesByEnvironment(request res for _, dbPipeline := range authorizedPipelines { var customTag *bean.CustomTagData var customTagStage repository5.PipelineStageType - customTagPreCD, err := impl.customTagService.GetActiveCustomTagByEntityKeyAndValue(pkg.EntityTypePreCD, strconv.Itoa(dbPipeline.Id)) + customTagPreCD, err := impl.customTagService.GetActiveCustomTagByEntityKeyAndValue(bean3.EntityTypePreCD, strconv.Itoa(dbPipeline.Id)) if err != nil && err != pg.ErrNoRows { impl.logger.Errorw("error in fetching custom Tag precd") return nil, err } - customTagPostCD, err := impl.customTagService.GetActiveCustomTagByEntityKeyAndValue(pkg.EntityTypePostCD, strconv.Itoa(dbPipeline.Id)) + customTagPostCD, err := impl.customTagService.GetActiveCustomTagByEntityKeyAndValue(bean3.EntityTypePostCD, strconv.Itoa(dbPipeline.Id)) if err != nil && err != pg.ErrNoRows { impl.logger.Errorw("error in fetching custom Tag precd") return nil, err diff --git a/pkg/pipeline/WorkflowDagExecutor.go b/pkg/pipeline/WorkflowDagExecutor.go index b7ed0a951e..d5b0c43a1c 100644 --- a/pkg/pipeline/WorkflowDagExecutor.go +++ b/pkg/pipeline/WorkflowDagExecutor.go @@ -32,7 +32,6 @@ import ( "github.com/devtron-labs/devtron/client/argocdServer" application2 "github.com/devtron-labs/devtron/client/argocdServer/application" gitSensorClient "github.com/devtron-labs/devtron/client/gitSensor" - "github.com/devtron-labs/devtron/pkg" "github.com/devtron-labs/devtron/internal/middleware" app2 "github.com/devtron-labs/devtron/internal/sql/repository/app" bean4 "github.com/devtron-labs/devtron/pkg/app/bean" @@ -758,7 +757,7 @@ func (impl *WorkflowDagExecutorImpl) TriggerPreStage(ctx context.Context, cdWf * for _, step := range cdStageWorkflowRequest.PreCiSteps { if step.RefPluginId == skopeoRefPluginId { // for Skopeo plugin parse destination images and save its data in image path reservation table - registryDestinationImageMap, registryCredentialMap, err := impl.pluginInputVariableParser.ParseSkopeoPluginInputVariables(step.InputVars, pkg.EntityTypePreCD, strconv.Itoa(pipeline.Id), cdStageWorkflowRequest.CiArtifactDTO.Image, cdStageWorkflowRequest.DockerRegistryId) + registryDestinationImageMap, registryCredentialMap, err := impl.pluginInputVariableParser.ParseSkopeoPluginInputVariables(step.InputVars, bean3.EntityTypePreCD, strconv.Itoa(pipeline.Id), cdStageWorkflowRequest.CiArtifactDTO.Image, cdStageWorkflowRequest.DockerRegistryId) if err != nil { impl.logger.Errorw("error in parsing skopeo input variable", "err", err) return err @@ -891,7 +890,7 @@ func (impl *WorkflowDagExecutorImpl) TriggerPostStage(cdWf *pipelineConfig.CdWor for _, step := range cdStageWorkflowRequest.PostCiSteps { if step.RefPluginId == skopeoRefPluginId { // for Skopeo plugin parse destination images and save its data in image path reservation table - registryDestinationImageMap, registryCredentialMap, err := impl.pluginInputVariableParser.ParseSkopeoPluginInputVariables(step.InputVars, pkg.EntityTypePostCD, strconv.Itoa(pipeline.Id), cdStageWorkflowRequest.CiArtifactDTO.Image, cdStageWorkflowRequest.DockerRegistryId) + registryDestinationImageMap, registryCredentialMap, err := impl.pluginInputVariableParser.ParseSkopeoPluginInputVariables(step.InputVars, bean3.EntityTypePostCD, strconv.Itoa(pipeline.Id), cdStageWorkflowRequest.CiArtifactDTO.Image, cdStageWorkflowRequest.DockerRegistryId) if err != nil { impl.logger.Errorw("error in parsing skopeo input variable", "err", err) return err diff --git a/pkg/pipeline/bean/CustomTagService.go b/pkg/pipeline/bean/CustomTagService.go index b823de3aed..13ac0da763 100644 --- a/pkg/pipeline/bean/CustomTagService.go +++ b/pkg/pipeline/bean/CustomTagService.go @@ -5,6 +5,8 @@ import "fmt" const ( EntityNull = iota EntityTypeCiPipelineId + EntityTypePreCD + EntityTypePostCD ) const ( diff --git a/pkg/pipeline/pipelineStageVariableParser.go b/pkg/pipeline/pipelineStageVariableParser.go index 7aa0d07401..0bff56e6da 100644 --- a/pkg/pipeline/pipelineStageVariableParser.go +++ b/pkg/pipeline/pipelineStageVariableParser.go @@ -3,7 +3,6 @@ package pipeline import ( "errors" "fmt" - "github.com/devtron-labs/devtron/pkg" "github.com/devtron-labs/devtron/pkg/pipeline/bean" "github.com/devtron-labs/devtron/pkg/plugin" "github.com/go-pg/pg" @@ -30,13 +29,13 @@ type PluginInputVariableParser interface { type PluginInputVariableParserImpl struct { logger *zap.SugaredLogger dockerRegistryConfig DockerRegistryConfig - customTagService pkg.CustomTagService + customTagService CustomTagService } func NewPluginInputVariableParserImpl( logger *zap.SugaredLogger, dockerRegistryConfig DockerRegistryConfig, - customTagService pkg.CustomTagService, + customTagService CustomTagService, ) *PluginInputVariableParserImpl { return &PluginInputVariableParserImpl{ logger: logger, diff --git a/wire_gen.go b/wire_gen.go index a095cb7752..2c9845e17e 100644 --- a/wire_gen.go +++ b/wire_gen.go @@ -446,7 +446,12 @@ func InitializeApp() (*App, error) { pipelineStageRepositoryImpl := repository11.NewPipelineStageRepository(sugaredLogger, db) globalPluginRepositoryImpl := repository12.NewGlobalPluginRepository(sugaredLogger, db) pipelineStageServiceImpl := pipeline.NewPipelineStageService(sugaredLogger, pipelineStageRepositoryImpl, globalPluginRepositoryImpl, pipelineRepositoryImpl, scopedVariableServiceImpl, variableEntityMappingServiceImpl, variableTemplateParserImpl) - workflowDagExecutorImpl := pipeline.NewWorkflowDagExecutorImpl(sugaredLogger, pipelineRepositoryImpl, cdWorkflowRepositoryImpl, pubSubClientServiceImpl, appServiceImpl, workflowServiceImpl, ciArtifactRepositoryImpl, ciPipelineRepositoryImpl, materialRepositoryImpl, pipelineOverrideRepositoryImpl, userServiceImpl, deploymentGroupRepositoryImpl, environmentRepositoryImpl, enforcerImpl, enforcerUtilImpl, tokenCache, acdAuthConfig, eventSimpleFactoryImpl, eventRESTClientImpl, cvePolicyRepositoryImpl, imageScanResultRepositoryImpl, appWorkflowRepositoryImpl, prePostCdScriptHistoryServiceImpl, argoUserServiceImpl, pipelineStatusTimelineRepositoryImpl, pipelineStatusTimelineServiceImpl, ciTemplateRepositoryImpl, ciWorkflowRepositoryImpl, appLabelRepositoryImpl, clientImpl, pipelineStageServiceImpl, k8sCommonServiceImpl, variableSnapshotHistoryServiceImpl, deploymentTemplateHistoryServiceImpl, configMapHistoryServiceImpl, pipelineStrategyHistoryServiceImpl, manifestPushConfigRepositoryImpl, gitOpsManifestPushServiceImpl, ciPipelineMaterialRepositoryImpl, imageScanHistoryRepositoryImpl, imageScanDeployInfoRepositoryImpl, appCrudOperationServiceImpl, pipelineConfigRepositoryImpl, dockerRegistryIpsConfigServiceImpl, chartRepositoryImpl, chartTemplateServiceImpl, pipelineStrategyHistoryRepositoryImpl, appRepositoryImpl, deploymentTemplateHistoryRepositoryImpl, argoK8sClientImpl, configMapRepositoryImpl, configMapHistoryRepositoryImpl, refChartDir, helmAppServiceImpl, helmAppClientImpl, chartRefRepositoryImpl, envConfigOverrideRepositoryImpl, appLevelMetricsRepositoryImpl, envLevelAppMetricsRepositoryImpl, dbMigrationConfigRepositoryImpl, mergeUtil, gitOpsConfigRepositoryImpl, gitFactory, applicationServiceClientImpl, variableEntityMappingServiceImpl, variableTemplateParserImpl, argoClientWrapperServiceImpl, scopedVariableServiceImpl) + globalPluginServiceImpl := plugin.NewGlobalPluginService(sugaredLogger, globalPluginRepositoryImpl) + dockerRegistryConfigImpl := pipeline.NewDockerRegistryConfigImpl(sugaredLogger, helmAppServiceImpl, dockerArtifactStoreRepositoryImpl, dockerRegistryIpsConfigRepositoryImpl, ociRegistryConfigRepositoryImpl) + imageTagRepositoryImpl := repository.NewImageTagRepository(db, sugaredLogger) + customTagServiceImpl := pipeline.NewCustomTagService(sugaredLogger, imageTagRepositoryImpl) + pluginInputVariableParserImpl := pipeline.NewPluginInputVariableParserImpl(sugaredLogger, dockerRegistryConfigImpl, customTagServiceImpl) + workflowDagExecutorImpl := pipeline.NewWorkflowDagExecutorImpl(sugaredLogger, pipelineRepositoryImpl, cdWorkflowRepositoryImpl, pubSubClientServiceImpl, appServiceImpl, workflowServiceImpl, ciArtifactRepositoryImpl, ciPipelineRepositoryImpl, materialRepositoryImpl, pipelineOverrideRepositoryImpl, userServiceImpl, deploymentGroupRepositoryImpl, environmentRepositoryImpl, enforcerImpl, enforcerUtilImpl, tokenCache, acdAuthConfig, eventSimpleFactoryImpl, eventRESTClientImpl, cvePolicyRepositoryImpl, imageScanResultRepositoryImpl, appWorkflowRepositoryImpl, prePostCdScriptHistoryServiceImpl, argoUserServiceImpl, pipelineStatusTimelineRepositoryImpl, pipelineStatusTimelineServiceImpl, ciTemplateRepositoryImpl, ciWorkflowRepositoryImpl, appLabelRepositoryImpl, clientImpl, pipelineStageServiceImpl, k8sCommonServiceImpl, variableSnapshotHistoryServiceImpl, globalPluginServiceImpl, pluginInputVariableParserImpl, deploymentTemplateHistoryServiceImpl, configMapHistoryServiceImpl, pipelineStrategyHistoryServiceImpl, manifestPushConfigRepositoryImpl, gitOpsManifestPushServiceImpl, ciPipelineMaterialRepositoryImpl, imageScanHistoryRepositoryImpl, imageScanDeployInfoRepositoryImpl, appCrudOperationServiceImpl, pipelineConfigRepositoryImpl, dockerRegistryIpsConfigServiceImpl, chartRepositoryImpl, chartTemplateServiceImpl, pipelineStrategyHistoryRepositoryImpl, appRepositoryImpl, deploymentTemplateHistoryRepositoryImpl, argoK8sClientImpl, configMapRepositoryImpl, configMapHistoryRepositoryImpl, refChartDir, helmAppServiceImpl, helmAppClientImpl, chartRefRepositoryImpl, envConfigOverrideRepositoryImpl, appLevelMetricsRepositoryImpl, envLevelAppMetricsRepositoryImpl, dbMigrationConfigRepositoryImpl, mergeUtil, gitOpsConfigRepositoryImpl, gitFactory, applicationServiceClientImpl, variableEntityMappingServiceImpl, variableTemplateParserImpl, argoClientWrapperServiceImpl, scopedVariableServiceImpl) deploymentGroupAppRepositoryImpl := repository.NewDeploymentGroupAppRepositoryImpl(sugaredLogger, db) deploymentGroupServiceImpl := deploymentGroup.NewDeploymentGroupServiceImpl(appRepositoryImpl, sugaredLogger, pipelineRepositoryImpl, ciPipelineRepositoryImpl, deploymentGroupRepositoryImpl, environmentRepositoryImpl, deploymentGroupAppRepositoryImpl, ciArtifactRepositoryImpl, appWorkflowRepositoryImpl, workflowDagExecutorImpl) deploymentConfigServiceImpl := pipeline.NewDeploymentConfigServiceImpl(sugaredLogger, envConfigOverrideRepositoryImpl, chartRepositoryImpl, pipelineRepositoryImpl, envLevelAppMetricsRepositoryImpl, appLevelMetricsRepositoryImpl, pipelineConfigRepositoryImpl, configMapRepositoryImpl, configMapHistoryServiceImpl, chartRefRepositoryImpl, variableEntityMappingServiceImpl, scopedVariableServiceImpl, variableTemplateParserImpl) @@ -464,8 +469,6 @@ func InitializeApp() (*App, error) { ciBuildConfigServiceImpl := pipeline.NewCiBuildConfigServiceImpl(sugaredLogger, ciBuildConfigRepositoryImpl) ciTemplateServiceImpl := pipeline.NewCiTemplateServiceImpl(sugaredLogger, ciBuildConfigServiceImpl, ciTemplateRepositoryImpl, ciTemplateOverrideRepositoryImpl) configMapServiceImpl := pipeline.NewConfigMapServiceImpl(chartRepositoryImpl, sugaredLogger, chartRepoRepositoryImpl, utilMergeUtil, pipelineConfigRepositoryImpl, configMapRepositoryImpl, envConfigOverrideRepositoryImpl, commonServiceImpl, appRepositoryImpl, configMapHistoryServiceImpl, environmentRepositoryImpl) - imageTagRepositoryImpl := repository.NewImageTagRepository(db, sugaredLogger) - customTagServiceImpl := pipeline.NewCustomTagService(sugaredLogger, imageTagRepositoryImpl) ciCdPipelineOrchestratorImpl := pipeline.NewCiCdPipelineOrchestrator(appRepositoryImpl, sugaredLogger, materialRepositoryImpl, pipelineRepositoryImpl, ciPipelineRepositoryImpl, ciPipelineMaterialRepositoryImpl, clientImpl, ciCdConfig, appWorkflowRepositoryImpl, environmentRepositoryImpl, attributesServiceImpl, appListingRepositoryImpl, appCrudOperationServiceImpl, userAuthServiceImpl, prePostCdScriptHistoryServiceImpl, prePostCiScriptHistoryServiceImpl, pipelineStageServiceImpl, ciTemplateOverrideRepositoryImpl, gitMaterialHistoryServiceImpl, ciPipelineHistoryServiceImpl, ciTemplateServiceImpl, dockerArtifactStoreRepositoryImpl, configMapServiceImpl, customTagServiceImpl, genericNoteServiceImpl) ecrConfig, err := pipeline.GetEcrConfig() if err != nil { @@ -487,7 +490,7 @@ func InitializeApp() (*App, error) { return nil, err } devtronAppCMCSServiceImpl := pipeline.NewDevtronAppCMCSServiceImpl(sugaredLogger, appServiceImpl, attributesRepositoryImpl) - cdPipelineConfigServiceImpl := pipeline.NewCdPipelineConfigServiceImpl(sugaredLogger, pipelineRepositoryImpl, environmentRepositoryImpl, pipelineConfigRepositoryImpl, appWorkflowRepositoryImpl, pipelineStageServiceImpl, appRepositoryImpl, appServiceImpl, deploymentGroupRepositoryImpl, ciCdPipelineOrchestratorImpl, appStatusRepositoryImpl, ciPipelineRepositoryImpl, prePostCdScriptHistoryServiceImpl, clusterRepositoryImpl, helmAppServiceImpl, enforcerUtilImpl, gitOpsConfigRepositoryImpl, pipelineStrategyHistoryServiceImpl, chartRepositoryImpl, resourceGroupServiceImpl, chartDeploymentServiceImpl, chartTemplateServiceImpl, propertiesConfigServiceImpl, appLevelMetricsRepositoryImpl, deploymentTemplateHistoryServiceImpl, variableEntityMappingServiceImpl, variableTemplateParserImpl, pipelineDeploymentServiceTypeConfig, applicationServiceClientImpl, devtronAppCMCSServiceImpl) + cdPipelineConfigServiceImpl := pipeline.NewCdPipelineConfigServiceImpl(sugaredLogger, pipelineRepositoryImpl, environmentRepositoryImpl, pipelineConfigRepositoryImpl, appWorkflowRepositoryImpl, pipelineStageServiceImpl, appRepositoryImpl, appServiceImpl, deploymentGroupRepositoryImpl, ciCdPipelineOrchestratorImpl, appStatusRepositoryImpl, ciPipelineRepositoryImpl, prePostCdScriptHistoryServiceImpl, clusterRepositoryImpl, helmAppServiceImpl, enforcerUtilImpl, gitOpsConfigRepositoryImpl, pipelineStrategyHistoryServiceImpl, chartRepositoryImpl, resourceGroupServiceImpl, chartDeploymentServiceImpl, chartTemplateServiceImpl, propertiesConfigServiceImpl, appLevelMetricsRepositoryImpl, deploymentTemplateHistoryServiceImpl, variableEntityMappingServiceImpl, variableTemplateParserImpl, pipelineDeploymentServiceTypeConfig, applicationServiceClientImpl, devtronAppCMCSServiceImpl, customTagServiceImpl) appArtifactManagerImpl := pipeline.NewAppArtifactManagerImpl(sugaredLogger, cdWorkflowRepositoryImpl, userServiceImpl, imageTaggingServiceImpl, ciArtifactRepositoryImpl, ciWorkflowRepositoryImpl, pipelineStageServiceImpl, cdPipelineConfigServiceImpl) globalStrategyMetadataChartRefMappingRepositoryImpl := chartRepoRepository.NewGlobalStrategyMetadataChartRefMappingRepositoryImpl(db, sugaredLogger) devtronAppStrategyServiceImpl := pipeline.NewDevtronAppStrategyServiceImpl(sugaredLogger, chartRepositoryImpl, globalStrategyMetadataChartRefMappingRepositoryImpl, ciCdPipelineOrchestratorImpl, cdPipelineConfigServiceImpl) @@ -502,7 +505,6 @@ func InitializeApp() (*App, error) { } ciHandlerImpl := pipeline.NewCiHandlerImpl(sugaredLogger, ciServiceImpl, ciPipelineMaterialRepositoryImpl, clientImpl, ciWorkflowRepositoryImpl, workflowServiceImpl, ciLogServiceImpl, ciArtifactRepositoryImpl, userServiceImpl, eventRESTClientImpl, eventSimpleFactoryImpl, ciPipelineRepositoryImpl, appListingRepositoryImpl, k8sUtil, pipelineRepositoryImpl, enforcerUtilImpl, resourceGroupServiceImpl, environmentRepositoryImpl, imageTaggingServiceImpl, appWorkflowRepositoryImpl, customTagServiceImpl, k8sCommonServiceImpl) gitRegistryConfigImpl := pipeline.NewGitRegistryConfigImpl(sugaredLogger, gitProviderRepositoryImpl, clientImpl) - dockerRegistryConfigImpl := pipeline.NewDockerRegistryConfigImpl(sugaredLogger, helmAppServiceImpl, dockerArtifactStoreRepositoryImpl, dockerRegistryIpsConfigRepositoryImpl, ociRegistryConfigRepositoryImpl) appListingViewBuilderImpl := app2.NewAppListingViewBuilderImpl(sugaredLogger) linkoutsRepositoryImpl := repository.NewLinkoutsRepositoryImpl(sugaredLogger, db) appListingServiceImpl := app2.NewAppListingServiceImpl(sugaredLogger, appListingRepositoryImpl, applicationServiceClientImpl, appRepositoryImpl, appListingViewBuilderImpl, pipelineRepositoryImpl, linkoutsRepositoryImpl, appLevelMetricsRepositoryImpl, envLevelAppMetricsRepositoryImpl, cdWorkflowRepositoryImpl, pipelineOverrideRepositoryImpl, environmentRepositoryImpl, argoUserServiceImpl, envConfigOverrideRepositoryImpl, chartRepositoryImpl, ciPipelineRepositoryImpl, dockerRegistryIpsConfigServiceImpl) @@ -744,7 +746,6 @@ func InitializeApp() (*App, error) { externalLinkServiceImpl := externalLink.NewExternalLinkServiceImpl(sugaredLogger, externalLinkMonitoringToolRepositoryImpl, externalLinkIdentifierMappingRepositoryImpl, externalLinkRepositoryImpl) externalLinkRestHandlerImpl := externalLink2.NewExternalLinkRestHandlerImpl(sugaredLogger, externalLinkServiceImpl, userServiceImpl, enforcerImpl, enforcerUtilImpl) externalLinkRouterImpl := externalLink2.NewExternalLinkRouterImpl(externalLinkRestHandlerImpl) - globalPluginServiceImpl := plugin.NewGlobalPluginService(sugaredLogger, globalPluginRepositoryImpl) globalPluginRestHandlerImpl := restHandler.NewGlobalPluginRestHandler(sugaredLogger, globalPluginServiceImpl, enforcerUtilImpl, enforcerImpl, pipelineBuilderImpl) globalPluginRouterImpl := router.NewGlobalPluginRouter(sugaredLogger, globalPluginRestHandlerImpl) moduleRestHandlerImpl := module2.NewModuleRestHandlerImpl(sugaredLogger, moduleServiceImpl, userServiceImpl, enforcerImpl, validate) From a489727c1cec7fe480091309391e047cafd5ba56 Mon Sep 17 00:00:00 2001 From: ayushmaheshwari Date: Thu, 26 Oct 2023 14:48:17 +0530 Subject: [PATCH 063/143] tag enabled sql script --- scripts/sql/183_custom_tag.up.sql | 1 + 1 file changed, 1 insertion(+) create mode 100644 scripts/sql/183_custom_tag.up.sql diff --git a/scripts/sql/183_custom_tag.up.sql b/scripts/sql/183_custom_tag.up.sql new file mode 100644 index 0000000000..f9b5a646e9 --- /dev/null +++ b/scripts/sql/183_custom_tag.up.sql @@ -0,0 +1 @@ +ALTER TABLE custom_tag ADD COLUMN enabled boolean default false; From d98117f6f14f64c7014664ca515a0bb3f5f38c96 Mon Sep 17 00:00:00 2001 From: ayushmaheshwari Date: Thu, 26 Oct 2023 15:27:50 +0530 Subject: [PATCH 064/143] enable flag in get api --- pkg/bean/app.go | 1 + pkg/pipeline/BuildPipelineConfigService.go | 1 + pkg/pipeline/DeploymentPipelineConfigService.go | 2 ++ 3 files changed, 4 insertions(+) diff --git a/pkg/bean/app.go b/pkg/bean/app.go index 149aa4616b..4b4b9c4240 100644 --- a/pkg/bean/app.go +++ b/pkg/bean/app.go @@ -244,6 +244,7 @@ type CiMaterialPatchRequest struct { type CustomTagData struct { TagPattern string `json:"tagPattern"` CounterX int `json:"counterX"` + Enabled bool `json:"enabled"` } type CiMaterialValuePatchRequest struct { diff --git a/pkg/pipeline/BuildPipelineConfigService.go b/pkg/pipeline/BuildPipelineConfigService.go index 4219f677ef..b4378898b3 100644 --- a/pkg/pipeline/BuildPipelineConfigService.go +++ b/pkg/pipeline/BuildPipelineConfigService.go @@ -628,6 +628,7 @@ func (impl *CiPipelineConfigServiceImpl) GetCiPipeline(appId int) (ciConfig *bea ciPipeline.CustomTagObject = &bean.CustomTagData{ TagPattern: customTag.TagPattern, CounterX: customTag.AutoIncreasingNumber, + Enabled: customTag.Enabled, } } if ciEnvMapping.Id > 0 { diff --git a/pkg/pipeline/DeploymentPipelineConfigService.go b/pkg/pipeline/DeploymentPipelineConfigService.go index 9c03328f07..224cd21ae8 100644 --- a/pkg/pipeline/DeploymentPipelineConfigService.go +++ b/pkg/pipeline/DeploymentPipelineConfigService.go @@ -293,11 +293,13 @@ func (impl *CdPipelineConfigServiceImpl) GetCdPipelineById(pipelineId int) (cdPi if customTagPreCD != nil && customTagPreCD.Id > 0 { customTag = &bean.CustomTagData{TagPattern: customTagPreCD.TagPattern, CounterX: customTagPreCD.AutoIncreasingNumber, + Enabled: customTagPreCD.Enabled, } customTagStage = repository5.PIPELINE_STAGE_TYPE_PRE_CD } else if customTagPostCD != nil && customTagPostCD.Id > 0 { customTag = &bean.CustomTagData{TagPattern: customTagPostCD.TagPattern, CounterX: customTagPostCD.AutoIncreasingNumber, + Enabled: customTagPostCD.Enabled, } customTagStage = repository5.PIPELINE_STAGE_TYPE_POST_CD } From 478709110979bb08845de6c158dc3a0c56f0742c Mon Sep 17 00:00:00 2001 From: Gireesh Naidu Date: Thu, 26 Oct 2023 16:42:19 +0530 Subject: [PATCH 065/143] searchstring refactor fix --- internal/sql/repository/CiArtifactRepository.go | 2 +- internal/sql/repository/pipelineConfig/CdWorfkflowRepository.go | 2 +- pkg/pipeline/AppArtifactManager.go | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/internal/sql/repository/CiArtifactRepository.go b/internal/sql/repository/CiArtifactRepository.go index e370688fd5..6cde743368 100644 --- a/internal/sql/repository/CiArtifactRepository.go +++ b/internal/sql/repository/CiArtifactRepository.go @@ -243,7 +243,7 @@ func (impl CiArtifactRepositoryImpl) GetArtifactsByCDPipeline(cdPipelineId, limi func (impl CiArtifactRepositoryImpl) GetArtifactsByCDPipelineV3(listingFilterOpts *bean.ArtifactsListFilterOptions) ([]*CiArtifact, error) { //TODO Gireesh: listingFilterOpts.SearchString should be conditional, artifacts := make([]*CiArtifact, 0, listingFilterOpts.Limit) - commonPaginationQueryPart := " cia.image ILIKE %?%" + + commonPaginationQueryPart := " cia.image LIKE ?" + " ORDER BY cia.id DESC" + " LIMIT ?" + " OFFSET ?;" diff --git a/internal/sql/repository/pipelineConfig/CdWorfkflowRepository.go b/internal/sql/repository/pipelineConfig/CdWorfkflowRepository.go index 44a982b844..1d948f630b 100644 --- a/internal/sql/repository/pipelineConfig/CdWorfkflowRepository.go +++ b/internal/sql/repository/pipelineConfig/CdWorfkflowRepository.go @@ -392,7 +392,7 @@ func (impl *CdWorkflowRepositoryImpl) FindArtifactByListFilter(listingFilterOpti listingFilterOptions.ParentId, listingFilterOptions.ParentStageType, pg.In([]string{application.Healthy, application.SUCCEEDED})). - Where("cia.image ILIKE %?%", listingFilterOptions.SearchString) + Where("cia.image LIKE ?", listingFilterOptions.SearchString) if len(listingFilterOptions.ExcludeArtifactIds) > 0 { query = query.Where("cd_workflow.ci_artifact_id NOT IN (?)", pg.In(listingFilterOptions.ExcludeArtifactIds)) } diff --git a/pkg/pipeline/AppArtifactManager.go b/pkg/pipeline/AppArtifactManager.go index b38017644b..11b9e4b055 100644 --- a/pkg/pipeline/AppArtifactManager.go +++ b/pkg/pipeline/AppArtifactManager.go @@ -441,7 +441,7 @@ func (impl *AppArtifactManagerImpl) RetrieveArtifactsByCDPipelineV2(pipeline *pi artifactListingFilterOpts.ParentCdId = parentCdId artifactListingFilterOpts.ParentStageType = parentType artifactListingFilterOpts.StageType = stage - + artifactListingFilterOpts.SearchString = "%" + artifactListingFilterOpts.SearchString + "%" ciArtifactsRefs, latestWfArtifactId, latestWfArtifactStatus, err := impl.BuildArtifactsList(artifactListingFilterOpts) if err != nil && err != pg.ErrNoRows { impl.logger.Errorw("error in getting artifacts for child cd stage", "err", err, "stage", stage) From 1402943db21aa24413c2d497af6d3d7249c36e43 Mon Sep 17 00:00:00 2001 From: ayushmaheshwari Date: Thu, 26 Oct 2023 17:37:17 +0530 Subject: [PATCH 066/143] wip: get api fix --- pkg/pipeline/BuildPipelineConfigService.go | 1 + 1 file changed, 1 insertion(+) diff --git a/pkg/pipeline/BuildPipelineConfigService.go b/pkg/pipeline/BuildPipelineConfigService.go index b4378898b3..d8373c061b 100644 --- a/pkg/pipeline/BuildPipelineConfigService.go +++ b/pkg/pipeline/BuildPipelineConfigService.go @@ -766,6 +766,7 @@ func (impl *CiPipelineConfigServiceImpl) GetCiPipelineById(pipelineId int) (ciPi ciPipeline.CustomTagObject = &bean.CustomTagData{ TagPattern: customTag.TagPattern, CounterX: customTag.AutoIncreasingNumber, + Enabled: customTag.Enabled, } } ciEnvMapping, err := impl.ciPipelineRepository.FindCiEnvMappingByCiPipelineId(pipelineId) From b0021db380f0135e42741c2b808d102c4a8e01ac Mon Sep 17 00:00:00 2001 From: ayushmaheshwari Date: Thu, 26 Oct 2023 19:00:17 +0530 Subject: [PATCH 067/143] custom tag enabled moved outside --- pkg/bean/app.go | 2 +- pkg/pipeline/BuildPipelineConfigService.go | 2 +- pkg/pipeline/DeploymentPipelineConfigService.go | 1 + 3 files changed, 3 insertions(+), 2 deletions(-) diff --git a/pkg/bean/app.go b/pkg/bean/app.go index a5397679b2..166c1b7b8b 100644 --- a/pkg/bean/app.go +++ b/pkg/bean/app.go @@ -244,7 +244,7 @@ type CiMaterialPatchRequest struct { type CustomTagData struct { TagPattern string `json:"tagPattern"` CounterX int `json:"counterX"` - Enabled bool `json:"enabled"` + Enabled bool `json:"-"` } type CiMaterialValuePatchRequest struct { diff --git a/pkg/pipeline/BuildPipelineConfigService.go b/pkg/pipeline/BuildPipelineConfigService.go index d8373c061b..e46af40880 100644 --- a/pkg/pipeline/BuildPipelineConfigService.go +++ b/pkg/pipeline/BuildPipelineConfigService.go @@ -766,8 +766,8 @@ func (impl *CiPipelineConfigServiceImpl) GetCiPipelineById(pipelineId int) (ciPi ciPipeline.CustomTagObject = &bean.CustomTagData{ TagPattern: customTag.TagPattern, CounterX: customTag.AutoIncreasingNumber, - Enabled: customTag.Enabled, } + ciPipeline.EnableCustomTag = customTag.Enabled } ciEnvMapping, err := impl.ciPipelineRepository.FindCiEnvMappingByCiPipelineId(pipelineId) if err != nil && err != pg.ErrNoRows { diff --git a/pkg/pipeline/DeploymentPipelineConfigService.go b/pkg/pipeline/DeploymentPipelineConfigService.go index 224cd21ae8..25acb52cc3 100644 --- a/pkg/pipeline/DeploymentPipelineConfigService.go +++ b/pkg/pipeline/DeploymentPipelineConfigService.go @@ -327,6 +327,7 @@ func (impl *CdPipelineConfigServiceImpl) GetCdPipelineById(pipelineId int) (cdPi IsVirtualEnvironment: dbPipeline.Environment.IsVirtualEnvironment, CustomTagObject: customTag, CustomTagStage: &customTagStage, + EnableCustomTag: customTag.Enabled, } var preDeployStage *bean3.PipelineStageDto var postDeployStage *bean3.PipelineStageDto From cb0db3b754b3cd9164288eb14d43739385066dfe Mon Sep 17 00:00:00 2001 From: ayushmaheshwari Date: Thu, 26 Oct 2023 19:28:33 +0530 Subject: [PATCH 068/143] wip --- pkg/pipeline/BuildPipelineConfigService.go | 1 + 1 file changed, 1 insertion(+) diff --git a/pkg/pipeline/BuildPipelineConfigService.go b/pkg/pipeline/BuildPipelineConfigService.go index e46af40880..2612eebe15 100644 --- a/pkg/pipeline/BuildPipelineConfigService.go +++ b/pkg/pipeline/BuildPipelineConfigService.go @@ -630,6 +630,7 @@ func (impl *CiPipelineConfigServiceImpl) GetCiPipeline(appId int) (ciConfig *bea CounterX: customTag.AutoIncreasingNumber, Enabled: customTag.Enabled, } + ciPipeline.EnableCustomTag = customTag.Enabled } if ciEnvMapping.Id > 0 { ciPipeline.EnvironmentId = ciEnvMapping.EnvironmentId From 70e70d8243250b83e29b156b87b0f63d711c52da Mon Sep 17 00:00:00 2001 From: ayushmaheshwari Date: Thu, 26 Oct 2023 19:41:48 +0530 Subject: [PATCH 069/143] wip --- pkg/pipeline/CiCdPipelineOrchestrator.go | 1 + 1 file changed, 1 insertion(+) diff --git a/pkg/pipeline/CiCdPipelineOrchestrator.go b/pkg/pipeline/CiCdPipelineOrchestrator.go index ffef141f8a..1d96de20d1 100644 --- a/pkg/pipeline/CiCdPipelineOrchestrator.go +++ b/pkg/pipeline/CiCdPipelineOrchestrator.go @@ -780,6 +780,7 @@ func (impl CiCdPipelineOrchestratorImpl) CreateCiConf(createRequest *bean.CiConf EntityValue: strconv.Itoa(ciPipeline.Id), TagPattern: ciPipeline.CustomTagObject.TagPattern, AutoIncreasingNumber: ciPipeline.CustomTagObject.CounterX, + Enabled: ciPipeline.EnableCustomTag, } err := impl.customTagService.CreateOrUpdateCustomTag(customTag) if err != nil { From b1dad3270d89bf1a698c22d7d7560aa185d27274 Mon Sep 17 00:00:00 2001 From: ayushmaheshwari Date: Fri, 27 Oct 2023 00:26:05 +0530 Subject: [PATCH 070/143] update api --- pkg/pipeline/CustomTagService.go | 30 +++++++++++++----------------- 1 file changed, 13 insertions(+), 17 deletions(-) diff --git a/pkg/pipeline/CustomTagService.go b/pkg/pipeline/CustomTagService.go index 91f8e00853..8403311649 100644 --- a/pkg/pipeline/CustomTagService.go +++ b/pkg/pipeline/CustomTagService.go @@ -44,23 +44,14 @@ func (impl *CustomTagServiceImpl) CreateOrUpdateCustomTag(tag *bean.CustomTag) e } } var customTagData repository.CustomTag - if tag.Enabled { - customTagData = repository.CustomTag{ - EntityKey: tag.EntityKey, - EntityValue: tag.EntityValue, - TagPattern: strings.ReplaceAll(tag.TagPattern, bean2.IMAGE_TAG_VARIABLE_NAME_X, bean2.IMAGE_TAG_VARIABLE_NAME_x), - AutoIncreasingNumber: tag.AutoIncreasingNumber, - Metadata: tag.Metadata, - Active: true, - Enabled: true, - } - } else { - customTagData = repository.CustomTag{ - EntityKey: tag.EntityKey, - EntityValue: tag.EntityValue, - Active: true, - Enabled: false, - } + customTagData = repository.CustomTag{ + EntityKey: tag.EntityKey, + EntityValue: tag.EntityValue, + TagPattern: strings.ReplaceAll(tag.TagPattern, bean2.IMAGE_TAG_VARIABLE_NAME_X, bean2.IMAGE_TAG_VARIABLE_NAME_x), + AutoIncreasingNumber: tag.AutoIncreasingNumber, + Metadata: tag.Metadata, + Active: true, + Enabled: true, } oldTagObject, err := impl.customTagRepository.FetchCustomTagData(customTagData.EntityKey, customTagData.EntityValue) @@ -72,6 +63,11 @@ func (impl *CustomTagServiceImpl) CreateOrUpdateCustomTag(tag *bean.CustomTag) e } else { customTagData.Id = oldTagObject.Id customTagData.Active = true + if !tag.Enabled { + customTagData.TagPattern = oldTagObject.TagPattern + customTagData.AutoIncreasingNumber = oldTagObject.AutoIncreasingNumber + customTagData.Enabled = false + } return impl.customTagRepository.UpdateImageTag(&customTagData) } } From 2349e4371876212f8ccff12e4a1080e4dadf2609 Mon Sep 17 00:00:00 2001 From: ayushmaheshwari Date: Fri, 27 Oct 2023 11:50:38 +0530 Subject: [PATCH 071/143] wip --- pkg/pipeline/DeploymentPipelineConfigService.go | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/pkg/pipeline/DeploymentPipelineConfigService.go b/pkg/pipeline/DeploymentPipelineConfigService.go index 25acb52cc3..0b2610ce81 100644 --- a/pkg/pipeline/DeploymentPipelineConfigService.go +++ b/pkg/pipeline/DeploymentPipelineConfigService.go @@ -280,6 +280,7 @@ func (impl *CdPipelineConfigServiceImpl) GetCdPipelineById(pipelineId int) (cdPi var customTag *bean.CustomTagData var customTagStage repository5.PipelineStageType + var customTagEnabled bool customTagPreCD, err := impl.customTagService.GetActiveCustomTagByEntityKeyAndValue(bean3.EntityTypePreCD, strconv.Itoa(pipelineId)) if err != nil && err != pg.ErrNoRows { impl.logger.Errorw("error in fetching custom Tag precd") @@ -296,12 +297,14 @@ func (impl *CdPipelineConfigServiceImpl) GetCdPipelineById(pipelineId int) (cdPi Enabled: customTagPreCD.Enabled, } customTagStage = repository5.PIPELINE_STAGE_TYPE_PRE_CD + customTagEnabled = customTagPreCD.Enabled } else if customTagPostCD != nil && customTagPostCD.Id > 0 { customTag = &bean.CustomTagData{TagPattern: customTagPostCD.TagPattern, CounterX: customTagPostCD.AutoIncreasingNumber, Enabled: customTagPostCD.Enabled, } customTagStage = repository5.PIPELINE_STAGE_TYPE_POST_CD + customTagEnabled = customTagPostCD.Enabled } cdPipeline = &bean.CDPipelineConfigObject{ @@ -327,7 +330,7 @@ func (impl *CdPipelineConfigServiceImpl) GetCdPipelineById(pipelineId int) (cdPi IsVirtualEnvironment: dbPipeline.Environment.IsVirtualEnvironment, CustomTagObject: customTag, CustomTagStage: &customTagStage, - EnableCustomTag: customTag.Enabled, + EnableCustomTag: customTagEnabled, } var preDeployStage *bean3.PipelineStageDto var postDeployStage *bean3.PipelineStageDto From 5f35f1c412014d93bfb0c3ba649beacd0024c9d2 Mon Sep 17 00:00:00 2001 From: ayushmaheshwari Date: Fri, 27 Oct 2023 13:07:54 +0530 Subject: [PATCH 072/143] wip --- pkg/bean/app.go | 4 +-- .../DeploymentPipelineConfigService.go | 31 +++++++++++++++++++ 2 files changed, 33 insertions(+), 2 deletions(-) diff --git a/pkg/bean/app.go b/pkg/bean/app.go index 166c1b7b8b..a0278c294d 100644 --- a/pkg/bean/app.go +++ b/pkg/bean/app.go @@ -565,8 +565,8 @@ type CDPipelineConfigObject struct { SourceToNewPipelineId map[int]int `json:"sourceToNewPipelineId,omitempty"` RefPipelineId int `json:"refPipelineId,omitempty"` ExternalCiPipelineId int `json:"externalCiPipelineId,omitempty"` - CustomTagObject *CustomTagData `json:"customTag,omitempty"` - CustomTagStage *repository.PipelineStageType `json:"customTagStage,omitempty"` + CustomTagObject *CustomTagData `json:"customTag"` + CustomTagStage *repository.PipelineStageType `json:"customTagStage"` EnableCustomTag bool `json:"enableCustomTag"` } diff --git a/pkg/pipeline/DeploymentPipelineConfigService.go b/pkg/pipeline/DeploymentPipelineConfigService.go index 0b2610ce81..78e59330fc 100644 --- a/pkg/pipeline/DeploymentPipelineConfigService.go +++ b/pkg/pipeline/DeploymentPipelineConfigService.go @@ -922,6 +922,34 @@ func (impl *CdPipelineConfigServiceImpl) GetCdPipelinesForApp(appId int) (cdPipe deploymentTemplate = item.Strategy } } + var customTag *bean.CustomTagData + var customTagStage repository5.PipelineStageType + var customTagEnabled bool + customTagPreCD, err := impl.customTagService.GetActiveCustomTagByEntityKeyAndValue(bean3.EntityTypePreCD, strconv.Itoa(dbPipeline.Id)) + if err != nil && err != pg.ErrNoRows { + impl.logger.Errorw("error in fetching custom Tag precd") + return nil, err + } + customTagPostCD, err := impl.customTagService.GetActiveCustomTagByEntityKeyAndValue(bean3.EntityTypePostCD, strconv.Itoa(dbPipeline.Id)) + if err != nil && err != pg.ErrNoRows { + impl.logger.Errorw("error in fetching custom Tag precd") + return nil, err + } + if customTagPreCD != nil && customTagPreCD.Id > 0 { + customTag = &bean.CustomTagData{TagPattern: customTagPreCD.TagPattern, + CounterX: customTagPreCD.AutoIncreasingNumber, + Enabled: customTagPreCD.Enabled, + } + customTagStage = repository5.PIPELINE_STAGE_TYPE_PRE_CD + customTagEnabled = customTagPreCD.Enabled + } else if customTagPostCD != nil && customTagPostCD.Id > 0 { + customTag = &bean.CustomTagData{TagPattern: customTagPostCD.TagPattern, + CounterX: customTagPostCD.AutoIncreasingNumber, + Enabled: customTagPostCD.Enabled, + } + customTagStage = repository5.PIPELINE_STAGE_TYPE_POST_CD + customTagEnabled = customTagPostCD.Enabled + } pipeline := &bean.CDPipelineConfigObject{ Id: dbPipeline.Id, Name: dbPipeline.Name, @@ -945,6 +973,9 @@ func (impl *CdPipelineConfigServiceImpl) GetCdPipelinesForApp(appId int) (cdPipe IsVirtualEnvironment: dbPipeline.IsVirtualEnvironment, PreDeployStage: dbPipeline.PreDeployStage, PostDeployStage: dbPipeline.PostDeployStage, + CustomTagObject: customTag, + CustomTagStage: &customTagStage, + EnableCustomTag: customTagEnabled, } pipelines = append(pipelines, pipeline) } From 53532cfb0c4c0d3e0f24b80a7e2d76da0ec70086 Mon Sep 17 00:00:00 2001 From: Gireesh Naidu Date: Fri, 27 Oct 2023 14:18:29 +0530 Subject: [PATCH 073/143] added rollback API V2 in --- .../app/DeploymentPipelineRestHandler.go | 9 +- .../pipelineConfig/CdWorfkflowRepository.go | 34 +++++- pkg/bean/app.go | 1 + pkg/pipeline/AppArtifactManager.go | 112 ++++++++++++++++++ 4 files changed, 153 insertions(+), 3 deletions(-) diff --git a/api/restHandler/app/DeploymentPipelineRestHandler.go b/api/restHandler/app/DeploymentPipelineRestHandler.go index 8ce8a5f2da..511d243131 100644 --- a/api/restHandler/app/DeploymentPipelineRestHandler.go +++ b/api/restHandler/app/DeploymentPipelineRestHandler.go @@ -1451,8 +1451,8 @@ func (handler PipelineConfigRestHandlerImpl) GetArtifactsForRollback(w http.Resp common.WriteJsonResp(w, err, "invalid size", http.StatusBadRequest) return } + searchString := r.URL.Query().Get("search") - searchString := r.URL.Query().Get("searchString") //rbac block starts from here object := handler.enforcerUtil.GetAppRBACName(app.AppName) if ok := handler.enforcer.Enforce(token, casbin.ResourceApplications, casbin.ActionGet, object); !ok { @@ -1466,9 +1466,14 @@ func (handler PipelineConfigRestHandlerImpl) GetArtifactsForRollback(w http.Resp } //rbac block ends here //rbac for edit tags access + var ciArtifactResponse bean.CiArtifactResponse triggerAccess := handler.enforcer.Enforce(token, casbin.ResourceApplications, casbin.ActionTrigger, object) + if handler.pipelineRestHandlerEnvConfig.UseArtifactListApiV2 { + ciArtifactResponse, err = handler.pipelineBuilder.FetchArtifactForRollbackV2(cdPipelineId, app.Id, offset, limit, searchString, app, deploymentPipeline) + } else { + ciArtifactResponse, err = handler.pipelineBuilder.FetchArtifactForRollback(cdPipelineId, app.Id, offset, limit, searchString) + } - ciArtifactResponse, err := handler.pipelineBuilder.FetchArtifactForRollback(cdPipelineId, app.Id, offset, limit, searchString) if err != nil { handler.Logger.Errorw("service err, GetArtifactsForRollback", "err", err, "cdPipelineId", cdPipelineId) common.WriteJsonResp(w, err, "unable to fetch artifacts", http.StatusInternalServerError) diff --git a/internal/sql/repository/pipelineConfig/CdWorfkflowRepository.go b/internal/sql/repository/pipelineConfig/CdWorfkflowRepository.go index 1d948f630b..4a9337efff 100644 --- a/internal/sql/repository/pipelineConfig/CdWorfkflowRepository.go +++ b/internal/sql/repository/pipelineConfig/CdWorfkflowRepository.go @@ -70,6 +70,7 @@ type CdWorkflowRepository interface { ExistsByStatus(status string) (bool, error) FetchArtifactsByCdPipelineId(pipelineId int, runnerType bean.WorkflowType, offset, limit int, searchString string) ([]CdWorkflowRunner, error) + FetchArtifactsByCdPipelineIdV2(listingFilterOptions bean.ArtifactsListFilterOptions) ([]CdWorkflowRunner, int, error) GetLatestTriggersOfHelmPipelinesStuckInNonTerminalStatuses(getPipelineDeployedWithinHours int) ([]*CdWorkflowRunner, error) } @@ -608,12 +609,13 @@ func (impl *CdWorkflowRepositoryImpl) ExistsByStatus(status string) (bool, error func (impl *CdWorkflowRepositoryImpl) FetchArtifactsByCdPipelineId(pipelineId int, runnerType bean.WorkflowType, offset, limit int, searchString string) ([]CdWorkflowRunner, error) { var wfrList []CdWorkflowRunner + searchStringFinal := "%" + searchString + "%" err := impl.dbConnection. Model(&wfrList). Column("cd_workflow_runner.*", "CdWorkflow", "CdWorkflow.Pipeline", "CdWorkflow.CiArtifact"). Where("cd_workflow.pipeline_id = ?", pipelineId). Where("cd_workflow_runner.workflow_type = ?", runnerType). - Where("ci_artifact.image ILIKE %?%", searchString). + Where("cd_workflow__ci_artifact.image LIKE ?", searchStringFinal). Order("cd_workflow_runner.id DESC"). Limit(limit).Offset(offset). Select() @@ -624,6 +626,36 @@ func (impl *CdWorkflowRepositoryImpl) FetchArtifactsByCdPipelineId(pipelineId in return wfrList, err } +func (impl *CdWorkflowRepositoryImpl) FetchArtifactsByCdPipelineIdV2(listingFilterOptions bean.ArtifactsListFilterOptions) ([]CdWorkflowRunner, int, error) { + var wfrList []CdWorkflowRunner + query := impl.dbConnection. + Model(&wfrList). + Column("cd_workflow_runner.*", "CdWorkflow", "CdWorkflow.Pipeline", "CdWorkflow.CiArtifact"). + Where("cd_workflow.pipeline_id = ?", listingFilterOptions.PipelineId). + Where("cd_workflow_runner.workflow_type = ?", listingFilterOptions.StageType). + Where("cd_workflow__ci_artifact.image LIKE ?", listingFilterOptions.SearchString) + + if len(listingFilterOptions.ExcludeArtifactIds) > 0 { + query = query.Where("cd_workflow__ci_artifact.id NOT IN (?)", pg.In(listingFilterOptions.ExcludeArtifactIds)) + } + totalCount, err := query.Count() + if err != nil && err != pg.ErrNoRows { + impl.logger.Errorw("error in getting Wfrs count and ci artifacts by pipelineId", "err", err, "pipelineId", listingFilterOptions.PipelineId) + return nil, totalCount, err + } + + query = query.Order("cd_workflow_runner.id DESC"). + Limit(listingFilterOptions.Limit). + Offset(listingFilterOptions.Offset) + + err = query.Select() + if err != nil && err != pg.ErrNoRows { + impl.logger.Errorw("error in getting Wfrs and ci artifacts by pipelineId", "err", err, "pipelineId", listingFilterOptions.PipelineId) + return nil, totalCount, err + } + return wfrList, totalCount, nil +} + func (impl *CdWorkflowRepositoryImpl) GetLatestTriggersOfHelmPipelinesStuckInNonTerminalStatuses(getPipelineDeployedWithinHours int) ([]*CdWorkflowRunner, error) { var wfrList []*CdWorkflowRunner err := impl.dbConnection. diff --git a/pkg/bean/app.go b/pkg/bean/app.go index fffdb5d9b0..4924609dd9 100644 --- a/pkg/bean/app.go +++ b/pkg/bean/app.go @@ -735,6 +735,7 @@ type CiArtifactResponse struct { TagsEditable bool `json:"tagsEditable"` AppReleaseTagNames []string `json:"appReleaseTagNames"` //unique list of tags exists in the app HideImageTaggingHardDelete bool `json:"hideImageTaggingHardDelete"` + TotalCount int `json:"totalCount"` } type AppLabelsDto struct { diff --git a/pkg/pipeline/AppArtifactManager.go b/pkg/pipeline/AppArtifactManager.go index 11b9e4b055..ab297faee1 100644 --- a/pkg/pipeline/AppArtifactManager.go +++ b/pkg/pipeline/AppArtifactManager.go @@ -40,6 +40,8 @@ type AppArtifactManager interface { //FetchArtifactForRollback : FetchArtifactForRollback(cdPipelineId, appId, offset, limit int, searchString string) (bean2.CiArtifactResponse, error) + FetchArtifactForRollbackV2(cdPipelineId, appId, offset, limit int, searchString string, app *bean2.CreateAppDTO, deploymentPipeline *pipelineConfig.Pipeline) (bean2.CiArtifactResponse, error) + BuildArtifactsForCdStage(pipelineId int, stageType bean.WorkflowType, ciArtifacts []bean2.CiArtifactBean, artifactMap map[int]int, parent bool, limit int, parentCdId int) ([]bean2.CiArtifactBean, map[int]int, int, string, error) BuildArtifactsForParentStage(cdPipelineId int, parentId int, parentType bean.WorkflowType, ciArtifacts []bean2.CiArtifactBean, artifactMap map[int]int, limit int, parentCdId int) ([]bean2.CiArtifactBean, error) @@ -264,6 +266,116 @@ func (impl *AppArtifactManagerImpl) FetchArtifactForRollback(cdPipelineId, appId return deployedCiArtifactsResponse, nil } +func (impl *AppArtifactManagerImpl) FetchArtifactForRollbackV2(cdPipelineId, appId, offset, limit int, searchString string, app *bean2.CreateAppDTO, deploymentPipeline *pipelineConfig.Pipeline) (bean2.CiArtifactResponse, error) { + var deployedCiArtifactsResponse bean2.CiArtifactResponse + imageTagsDataMap, err := impl.imageTaggingService.GetTagsDataMapByAppId(appId) + if err != nil { + impl.logger.Errorw("error in getting image tagging data with appId", "err", err, "appId", appId) + return deployedCiArtifactsResponse, err + } + + artifactListingFilterOpts := bean.ArtifactsListFilterOptions{} + artifactListingFilterOpts.PipelineId = cdPipelineId + artifactListingFilterOpts.StageType = bean.CD_WORKFLOW_TYPE_DEPLOY + artifactListingFilterOpts.SearchString = "%" + searchString + "%" + artifactListingFilterOpts.Limit = limit + artifactListingFilterOpts.Offset = offset + deployedCiArtifacts, artifactIds, totalCount, err := impl.BuildRollbackArtifactsList(artifactListingFilterOpts) + if err != nil { + impl.logger.Errorw("error in building ci artifacts for rollback", "err", err, "cdPipelineId", cdPipelineId) + return deployedCiArtifactsResponse, err + } + + imageCommentsDataMap, err := impl.imageTaggingService.GetImageCommentsDataMapByArtifactIds(artifactIds) + if err != nil { + impl.logger.Errorw("error in getting GetImageCommentsDataMapByArtifactIds", "err", err, "appId", appId, "artifactIds", artifactIds) + return deployedCiArtifactsResponse, err + } + + for i, _ := range deployedCiArtifacts { + imageTaggingResp := imageTagsDataMap[deployedCiArtifacts[i].Id] + if imageTaggingResp != nil { + deployedCiArtifacts[i].ImageReleaseTags = imageTaggingResp + } + if imageCommentResp := imageCommentsDataMap[deployedCiArtifacts[i].Id]; imageCommentResp != nil { + deployedCiArtifacts[i].ImageComment = imageCommentResp + } + } + + deployedCiArtifactsResponse.CdPipelineId = cdPipelineId + if deployedCiArtifacts == nil { + deployedCiArtifacts = []bean2.CiArtifactBean{} + } + deployedCiArtifactsResponse.CiArtifacts = deployedCiArtifacts + deployedCiArtifactsResponse.TotalCount = totalCount + return deployedCiArtifactsResponse, nil +} + +func (impl *AppArtifactManagerImpl) BuildRollbackArtifactsList(artifactListingFilterOpts bean.ArtifactsListFilterOptions) ([]bean2.CiArtifactBean, []int, int, error) { + var deployedCiArtifacts []bean2.CiArtifactBean + + //1)get current deployed artifact on this pipeline + latestWf, err := impl.cdWorkflowRepository.FindArtifactByPipelineIdAndRunnerType(artifactListingFilterOpts.PipelineId, artifactListingFilterOpts.StageType, 1) + if err != nil && err != pg.ErrNoRows { + impl.logger.Errorw("error in getting latest workflow by pipelineId", "pipelineId", artifactListingFilterOpts.PipelineId, "currentStageType", artifactListingFilterOpts.StageType) + return deployedCiArtifacts, nil, 0, err + } + if len(latestWf) > 0 { + //we should never show current deployed artifact in rollback API + artifactListingFilterOpts.ExcludeArtifactIds = []int{latestWf[0].CdWorkflow.CiArtifactId} + } + + cdWfrs, totalCount, err := impl.cdWorkflowRepository.FetchArtifactsByCdPipelineIdV2(artifactListingFilterOpts) + + if err != nil { + impl.logger.Errorw("error in getting artifacts for rollback by cdPipelineId", "err", err, "cdPipelineId", artifactListingFilterOpts.PipelineId) + return deployedCiArtifacts, nil, totalCount, err + } + + var ids []int32 + for _, item := range cdWfrs { + ids = append(ids, item.TriggeredBy) + } + + userEmails := make(map[int32]string) + users, err := impl.userService.GetByIds(ids) + if err != nil { + impl.logger.Errorw("unable to fetch users by ids", "err", err, "ids", ids) + } + for _, item := range users { + userEmails[item.Id] = item.EmailId + } + + artifactIds := make([]int, 0) + + for _, cdWfr := range cdWfrs { + ciArtifact := &repository.CiArtifact{} + if cdWfr.CdWorkflow != nil && cdWfr.CdWorkflow.CiArtifact != nil { + ciArtifact = cdWfr.CdWorkflow.CiArtifact + } + if ciArtifact == nil { + continue + } + mInfo, err := parseMaterialInfo([]byte(ciArtifact.MaterialInfo), ciArtifact.DataSource) + if err != nil { + mInfo = []byte("[]") + impl.logger.Errorw("error in parsing ciArtifact material info", "err", err, "ciArtifact", ciArtifact) + } + userEmail := userEmails[cdWfr.TriggeredBy] + deployedCiArtifacts = append(deployedCiArtifacts, bean2.CiArtifactBean{ + Id: ciArtifact.Id, + Image: ciArtifact.Image, + MaterialInfo: mInfo, + DeployedTime: formatDate(cdWfr.StartedOn, bean2.LayoutRFC3339), + WfrId: cdWfr.Id, + DeployedBy: userEmail, + }) + artifactIds = append(artifactIds, ciArtifact.Id) + } + return deployedCiArtifacts, artifactIds, totalCount, nil + +} + func (impl *AppArtifactManagerImpl) RetrieveArtifactsByCDPipeline(pipeline *pipelineConfig.Pipeline, stage bean.WorkflowType) (*bean2.CiArtifactResponse, error) { // retrieve parent details From 43570707b172435b71a557dd0ddf753d84afc466 Mon Sep 17 00:00:00 2001 From: Gireesh Naidu Date: Mon, 30 Oct 2023 13:17:18 +0530 Subject: [PATCH 074/143] added artifact createdOn time --- pkg/bean/app.go | 1 + pkg/pipeline/AppArtifactManager.go | 10 +++++++++- 2 files changed, 10 insertions(+), 1 deletion(-) diff --git a/pkg/bean/app.go b/pkg/bean/app.go index 4924609dd9..fa7f73f5e7 100644 --- a/pkg/bean/app.go +++ b/pkg/bean/app.go @@ -721,6 +721,7 @@ type CiArtifactBean struct { CiConfigureSourceValue string `json:"ciConfigureSourceValue"` ImageReleaseTags []*repository2.ImageTag `json:"imageReleaseTags"` ImageComment *repository2.ImageComment `json:"imageComment"` + CreatedTime string `json:"createdTime"` ExternalCiPipelineId int `json:"-"` ParentCiArtifact int `json:"-"` CiWorkflowId int `json:"-"` diff --git a/pkg/pipeline/AppArtifactManager.go b/pkg/pipeline/AppArtifactManager.go index ab297faee1..aba6fcd3c8 100644 --- a/pkg/pipeline/AppArtifactManager.go +++ b/pkg/pipeline/AppArtifactManager.go @@ -29,6 +29,7 @@ import ( lo "github.com/samber/lo" "go.uber.org/zap" "sort" + "strings" ) type AppArtifactManager interface { @@ -672,6 +673,7 @@ func (impl *AppArtifactManagerImpl) BuildArtifactsList(listingFilterOpts *bean.A Deployed: true, DeployedTime: formatDate(latestWf[0].CdWorkflow.CreatedOn, bean2.LayoutRFC3339), Latest: true, + CreatedTime: formatDate(currentRunningArtifact.CreatedOn, bean2.LayoutRFC3339), } } //2) get artifact list limited by filterOptions @@ -691,8 +693,12 @@ func (impl *AppArtifactManagerImpl) BuildArtifactsList(listingFilterOpts *bean.A //if no artifact deployed skip adding currentRunningArtifactBean in ciArtifacts arr if currentRunningArtifactBean != nil { - ciArtifacts = append(ciArtifacts, currentRunningArtifactBean) + searchString := listingFilterOpts.SearchString[1 : len(listingFilterOpts.SearchString)-1] + if strings.Contains(currentRunningArtifactBean.Image, searchString) { + ciArtifacts = append(ciArtifacts, currentRunningArtifactBean) + } } + return ciArtifacts, currentRunningArtifactId, currentRunningWorkflowStatus, nil } @@ -735,6 +741,7 @@ func (impl *AppArtifactManagerImpl) BuildArtifactsForCdStageV2(listingFilterOpts RunningOnParentCd: wfr.CdWorkflow.CiArtifact.Id == artifactRunningOnParentCd, ExternalCiPipelineId: wfr.CdWorkflow.CiArtifact.ExternalCiPipelineId, ParentCiArtifact: wfr.CdWorkflow.CiArtifact.ParentCiArtifact, + CreatedTime: formatDate(wfr.CdWorkflow.CiArtifact.CreatedOn, bean2.LayoutRFC3339), } if wfr.CdWorkflow.CiArtifact.WorkflowId != nil { ciArtifact.CiWorkflowId = *wfr.CdWorkflow.CiArtifact.WorkflowId @@ -772,6 +779,7 @@ func (impl *AppArtifactManagerImpl) BuildArtifactsForCIParentV2(listingFilterOpt DeployedTime: formatDate(artifact.DeployedTime, bean2.LayoutRFC3339), ExternalCiPipelineId: artifact.ExternalCiPipelineId, ParentCiArtifact: artifact.ParentCiArtifact, + CreatedTime: formatDate(artifact.CreatedOn, bean2.LayoutRFC3339), } if artifact.WorkflowId != nil { ciArtifact.CiWorkflowId = *artifact.WorkflowId From 73bcb328d67c20de68009a012ca2df6d56c374c0 Mon Sep 17 00:00:00 2001 From: Gireesh Naidu Date: Mon, 30 Oct 2023 14:29:40 +0530 Subject: [PATCH 075/143] pagination fix --- .../sql/repository/CiArtifactRepository.go | 63 ++++++++++++------- .../pipelineConfig/CdWorfkflowRepository.go | 36 +++++++---- pkg/pipeline/AppArtifactManager.go | 37 +++++------ 3 files changed, 81 insertions(+), 55 deletions(-) diff --git a/internal/sql/repository/CiArtifactRepository.go b/internal/sql/repository/CiArtifactRepository.go index 6cde743368..9dd9b13ee5 100644 --- a/internal/sql/repository/CiArtifactRepository.go +++ b/internal/sql/repository/CiArtifactRepository.go @@ -59,7 +59,7 @@ type CiArtifactRepository interface { GetArtifactParentCiAndWorkflowDetailsByIds(ids []int) ([]*CiArtifact, error) GetByWfId(wfId int) (artifact *CiArtifact, err error) GetArtifactsByCDPipeline(cdPipelineId, limit int, parentId int, parentType bean.WorkflowType) ([]*CiArtifact, error) - GetArtifactsByCDPipelineV3(listingFilterOpts *bean.ArtifactsListFilterOptions) ([]*CiArtifact, error) + GetArtifactsByCDPipelineV3(listingFilterOpts *bean.ArtifactsListFilterOptions) ([]*CiArtifact, int, error) GetLatestArtifactTimeByCiPipelineIds(ciPipelineIds []int) ([]*CiArtifact, error) GetLatestArtifactTimeByCiPipelineId(ciPipelineId int) (*CiArtifact, error) GetArtifactsByCDPipelineV2(cdPipelineId int) ([]CiArtifact, error) @@ -240,48 +240,63 @@ func (impl CiArtifactRepositoryImpl) GetArtifactsByCDPipeline(cdPipelineId, limi return artifactsAll, err } -func (impl CiArtifactRepositoryImpl) GetArtifactsByCDPipelineV3(listingFilterOpts *bean.ArtifactsListFilterOptions) ([]*CiArtifact, error) { - //TODO Gireesh: listingFilterOpts.SearchString should be conditional, +func (impl CiArtifactRepositoryImpl) GetArtifactsByCDPipelineV3(listingFilterOpts *bean.ArtifactsListFilterOptions) ([]*CiArtifact, int, error) { artifacts := make([]*CiArtifact, 0, listingFilterOpts.Limit) - commonPaginationQueryPart := " cia.image LIKE ?" + - " ORDER BY cia.id DESC" + - " LIMIT ?" + - " OFFSET ?;" + totalCount := 0 + commonPaginatedQueryPart := " cia.image LIKE ?" + orderByClause := " ORDER BY cia.id DESC" + limitOffsetQueryPart := + " LIMIT ? OFFSET ?;" if listingFilterOpts.ParentStageType == bean.CI_WORKFLOW_TYPE { - //TODO Gireesh: listingFilterOpts.PipelineId is ciPipelineId in this case why are we taking join - query := " SELECT cia.* " + - " FROM ci_artifact cia" + + selectQuery := " SELECT cia.* " + remainingQuery := " FROM ci_artifact cia" + " INNER JOIN ci_pipeline cp ON cp.id=cia.pipeline_id" + " INNER JOIN pipeline p ON p.ci_pipeline_id = cp.id and p.id=?" + " WHERE " if len(listingFilterOpts.ExcludeArtifactIds) > 0 { - query += fmt.Sprintf(" cia.id NOT IN (%s) AND ", helper.GetCommaSepratedString(listingFilterOpts.ExcludeArtifactIds)) + remainingQuery += fmt.Sprintf(" cia.id NOT IN (%s) AND ", helper.GetCommaSepratedString(listingFilterOpts.ExcludeArtifactIds)) } - query += commonPaginationQueryPart - _, err := impl.dbConnection.Query(&artifacts, query, listingFilterOpts.PipelineId, listingFilterOpts.SearchString, listingFilterOpts.Limit, listingFilterOpts.Offset) + countQuery := " SELECT count(cia.id) " + totalCountQuery := countQuery + remainingQuery + commonPaginatedQueryPart + _, err := impl.dbConnection.Query(&totalCount, totalCountQuery, listingFilterOpts.PipelineId, listingFilterOpts.SearchString) if err != nil { - return artifacts, err + return artifacts, totalCount, err + } + + finalQuery := selectQuery + remainingQuery + commonPaginatedQueryPart + orderByClause + limitOffsetQueryPart + _, err = impl.dbConnection.Query(&artifacts, finalQuery, listingFilterOpts.PipelineId, listingFilterOpts.SearchString, listingFilterOpts.Limit, listingFilterOpts.Offset) + if err != nil { + return artifacts, totalCount, err } } else if listingFilterOpts.ParentStageType == bean.WEBHOOK_WORKFLOW_TYPE { - query := " SELECT cia.* " + - " FROM ci_artifact cia " + + selectQuery := " SELECT cia.* " + remainingQuery := " FROM ci_artifact cia " + " WHERE cia.external_ci_pipeline_id = ? AND " if len(listingFilterOpts.ExcludeArtifactIds) > 0 { - query += fmt.Sprintf(" cia.id NOT IN (%s) AND ", helper.GetCommaSepratedString(listingFilterOpts.ExcludeArtifactIds)) + remainingQuery += fmt.Sprintf(" cia.id NOT IN (%s) AND ", helper.GetCommaSepratedString(listingFilterOpts.ExcludeArtifactIds)) + } + + countQuery := " SELECT count(cia.id) " + totalCountQuery := countQuery + remainingQuery + commonPaginatedQueryPart + _, err := impl.dbConnection.Query(&totalCount, totalCountQuery, listingFilterOpts.PipelineId, listingFilterOpts.SearchString) + if err != nil { + return artifacts, totalCount, err } - query += commonPaginationQueryPart - _, err := impl.dbConnection.Query(&artifacts, query, listingFilterOpts.ParentId, listingFilterOpts.SearchString, listingFilterOpts.Limit, listingFilterOpts.Offset) + + finalQuery := selectQuery + remainingQuery + commonPaginatedQueryPart + orderByClause + limitOffsetQueryPart + + _, err = impl.dbConnection.Query(&artifacts, finalQuery, listingFilterOpts.ParentId, listingFilterOpts.SearchString, listingFilterOpts.Limit, listingFilterOpts.Offset) if err != nil { - return artifacts, err + return artifacts, totalCount, err } } else { - return artifacts, nil + return artifacts, totalCount, nil } if len(artifacts) == 0 { - return artifacts, nil + return artifacts, totalCount, nil } //processing artifactsMap := make(map[int]*CiArtifact) @@ -303,7 +318,7 @@ func (impl CiArtifactRepositoryImpl) GetArtifactsByCDPipelineV3(listingFilterOpt _, err := impl.dbConnection.Query(&artifactsDeployed, query, pg.In(artifactsIds)) if err != nil { - return artifacts, nil + return artifacts, totalCount, nil } //set deployed time and latest deployed artifact @@ -317,7 +332,7 @@ func (impl CiArtifactRepositoryImpl) GetArtifactsByCDPipelineV3(listingFilterOpt //TODO Gireesh: create separate meaningful functions of these queries - return artifacts, nil + return artifacts, totalCount, nil } diff --git a/internal/sql/repository/pipelineConfig/CdWorfkflowRepository.go b/internal/sql/repository/pipelineConfig/CdWorfkflowRepository.go index 4a9337efff..18e56c0629 100644 --- a/internal/sql/repository/pipelineConfig/CdWorfkflowRepository.go +++ b/internal/sql/repository/pipelineConfig/CdWorfkflowRepository.go @@ -41,7 +41,7 @@ type CdWorkflowRepository interface { FindCdWorkflowMetaByEnvironmentId(appId int, environmentId int, offset int, size int) ([]CdWorkflowRunner, error) FindCdWorkflowMetaByPipelineId(pipelineId int, offset int, size int) ([]CdWorkflowRunner, error) FindArtifactByPipelineIdAndRunnerType(pipelineId int, runnerType bean.WorkflowType, limit int) ([]CdWorkflowRunner, error) - FindArtifactByListFilter(listingFilterOptions *bean.ArtifactsListFilterOptions) ([]CdWorkflowRunner, error) + FindArtifactByListFilter(listingFilterOptions *bean.ArtifactsListFilterOptions) ([]CdWorkflowRunner, int, error) SaveWorkFlowRunner(wfr *CdWorkflowRunner) (*CdWorkflowRunner, error) UpdateWorkFlowRunner(wfr *CdWorkflowRunner) error UpdateWorkFlowRunnersWithTxn(wfrs []*CdWorkflowRunner, tx *pg.Tx) error @@ -378,12 +378,11 @@ func (impl *CdWorkflowRepositoryImpl) FindCdWorkflowMetaByPipelineId(pipelineId } return wfrList, err } -func (impl *CdWorkflowRepositoryImpl) FindArtifactByListFilter(listingFilterOptions *bean.ArtifactsListFilterOptions) ([]CdWorkflowRunner, error) { +func (impl *CdWorkflowRepositoryImpl) FindArtifactByListFilter(listingFilterOptions *bean.ArtifactsListFilterOptions) ([]CdWorkflowRunner, int, error) { var wfrList []CdWorkflowRunner - var wfIds []int //TODO Gireesh: why are we extracting artifacts which belongs to current pipeline as it will impact page size of response ?? - query := impl.dbConnection.Model(&wfIds). - Column("MAX(cd_workflow_runner.id) AS id"). + query := impl.dbConnection.Model(&wfrList). + ColumnExpr("MAX(cd_workflow_runner.id) AS id"). Join("INNER JOIN cd_workflow ON cd_workflow.id=cd_workflow_runner.cd_workflow_id"). Join("INNER JOIN ci_artifact cia ON cia.id = cd_workflow.ci_artifact_id"). Where("(cd_workflow.pipeline_id = ? AND cd_workflow_runner.workflow_type = ?) "+ @@ -397,26 +396,37 @@ func (impl *CdWorkflowRepositoryImpl) FindArtifactByListFilter(listingFilterOpti if len(listingFilterOptions.ExcludeArtifactIds) > 0 { query = query.Where("cd_workflow.ci_artifact_id NOT IN (?)", pg.In(listingFilterOptions.ExcludeArtifactIds)) } + + query = query.Group("cd_workflow.ci_artifact_id") + totalCount, err := query.Count() + if err == pg.ErrNoRows { + return wfrList, totalCount, err + } + query = query. - Group("cd_workflow.ci_artifact_id"). Limit(listingFilterOptions.Limit). Offset(listingFilterOptions.Offset) - err := query.Select() - - if err == pg.ErrNoRows || len(wfIds) == 0 { - return wfrList, nil + err = query.Select() + if err == pg.ErrNoRows || len(wfrList) == 0 { + return wfrList, totalCount, nil + } + wfIds := make([]int, len(wfrList)) + for i, wf := range wfrList { + wfIds[i] = wf.Id } + wfrList = make([]CdWorkflowRunner, 0) + err = impl.dbConnection. Model(&wfrList). Column("cd_workflow_runner.*", "CdWorkflow", "CdWorkflow.Pipeline", "CdWorkflow.CiArtifact"). - Where("cd_workflow_runner IN (?) ", pg.In(wfIds)). + Where("cd_workflow_runner.id IN (?) ", pg.In(wfIds)). Select() if err == pg.ErrNoRows { - return wfrList, nil + return wfrList, totalCount, nil } - return wfrList, err + return wfrList, totalCount, err } func (impl *CdWorkflowRepositoryImpl) FindArtifactByPipelineIdAndRunnerType(pipelineId int, runnerType bean.WorkflowType, limit int) ([]CdWorkflowRunner, error) { diff --git a/pkg/pipeline/AppArtifactManager.go b/pkg/pipeline/AppArtifactManager.go index aba6fcd3c8..8ce9097c10 100644 --- a/pkg/pipeline/AppArtifactManager.go +++ b/pkg/pipeline/AppArtifactManager.go @@ -555,7 +555,7 @@ func (impl *AppArtifactManagerImpl) RetrieveArtifactsByCDPipelineV2(pipeline *pi artifactListingFilterOpts.ParentStageType = parentType artifactListingFilterOpts.StageType = stage artifactListingFilterOpts.SearchString = "%" + artifactListingFilterOpts.SearchString + "%" - ciArtifactsRefs, latestWfArtifactId, latestWfArtifactStatus, err := impl.BuildArtifactsList(artifactListingFilterOpts) + ciArtifactsRefs, latestWfArtifactId, latestWfArtifactStatus, totalCount, err := impl.BuildArtifactsList(artifactListingFilterOpts) if err != nil && err != pg.ErrNoRows { impl.logger.Errorw("error in getting artifacts for child cd stage", "err", err, "stage", stage) return nil, err @@ -625,6 +625,7 @@ func (impl *AppArtifactManagerImpl) RetrieveArtifactsByCDPipelineV2(pipeline *pi } ciArtifactsResponse.CdPipelineId = pipeline.Id + ciArtifactsResponse.TotalCount = totalCount ciArtifactsResponse.LatestWfArtifactId = latestWfArtifactId ciArtifactsResponse.LatestWfArtifactStatus = latestWfArtifactStatus if ciArtifacts == nil { @@ -634,15 +635,15 @@ func (impl *AppArtifactManagerImpl) RetrieveArtifactsByCDPipelineV2(pipeline *pi return ciArtifactsResponse, nil } -func (impl *AppArtifactManagerImpl) BuildArtifactsList(listingFilterOpts *bean.ArtifactsListFilterOptions) ([]*bean2.CiArtifactBean, int, string, error) { +func (impl *AppArtifactManagerImpl) BuildArtifactsList(listingFilterOpts *bean.ArtifactsListFilterOptions) ([]*bean2.CiArtifactBean, int, string, int, error) { var ciArtifacts []*bean2.CiArtifactBean - + totalCount := 0 //1)get current deployed artifact on this pipeline latestWf, err := impl.cdWorkflowRepository.FindArtifactByPipelineIdAndRunnerType(listingFilterOpts.PipelineId, listingFilterOpts.StageType, 1) if err != nil && err != pg.ErrNoRows { impl.logger.Errorw("error in getting latest workflow by pipelineId", "pipelineId", listingFilterOpts.PipelineId, "currentStageType", listingFilterOpts.StageType) - return ciArtifacts, 0, "", err + return ciArtifacts, 0, "", totalCount, err } var currentRunningArtifactBean *bean2.CiArtifactBean @@ -678,16 +679,16 @@ func (impl *AppArtifactManagerImpl) BuildArtifactsList(listingFilterOpts *bean.A } //2) get artifact list limited by filterOptions if listingFilterOpts.ParentStageType == bean.CI_WORKFLOW_TYPE || listingFilterOpts.ParentStageType == bean.WEBHOOK_WORKFLOW_TYPE { - ciArtifacts, err = impl.BuildArtifactsForCIParentV2(listingFilterOpts) + ciArtifacts, totalCount, err = impl.BuildArtifactsForCIParentV2(listingFilterOpts) if err != nil { impl.logger.Errorw("error in getting ci artifacts for ci/webhook type parent", "pipelineId", listingFilterOpts.PipelineId, "parentPipelineId", listingFilterOpts.ParentId, "parentStageType", listingFilterOpts.ParentStageType, "currentStageType", listingFilterOpts.StageType) - return ciArtifacts, 0, "", err + return ciArtifacts, 0, "", totalCount, err } } else { - ciArtifacts, err = impl.BuildArtifactsForCdStageV2(listingFilterOpts) + ciArtifacts, totalCount, err = impl.BuildArtifactsForCdStageV2(listingFilterOpts) if err != nil { impl.logger.Errorw("error in getting ci artifacts for ci/webhook type parent", "pipelineId", listingFilterOpts.PipelineId, "parentPipelineId", listingFilterOpts.ParentId, "parentStageType", listingFilterOpts.ParentStageType, "currentStageType", listingFilterOpts.StageType) - return ciArtifacts, 0, "", err + return ciArtifacts, 0, "", totalCount, err } } @@ -699,14 +700,14 @@ func (impl *AppArtifactManagerImpl) BuildArtifactsList(listingFilterOpts *bean.A } } - return ciArtifacts, currentRunningArtifactId, currentRunningWorkflowStatus, nil + return ciArtifacts, currentRunningArtifactId, currentRunningWorkflowStatus, totalCount, nil } -func (impl *AppArtifactManagerImpl) BuildArtifactsForCdStageV2(listingFilterOpts *bean.ArtifactsListFilterOptions) ([]*bean2.CiArtifactBean, error) { - cdWfrList, err := impl.cdWorkflowRepository.FindArtifactByListFilter(listingFilterOpts) +func (impl *AppArtifactManagerImpl) BuildArtifactsForCdStageV2(listingFilterOpts *bean.ArtifactsListFilterOptions) ([]*bean2.CiArtifactBean, int, error) { + cdWfrList, totalCount, err := impl.cdWorkflowRepository.FindArtifactByListFilter(listingFilterOpts) if err != nil { impl.logger.Errorw("error in fetching cd workflow runners using filter", "filterOptions", listingFilterOpts, "err", err) - return nil, err + return nil, totalCount, err } //TODO Gireesh: initialized array with size but are using append, not optimized solution ciArtifacts := make([]*bean2.CiArtifactBean, 0, len(cdWfrList)) @@ -718,7 +719,7 @@ func (impl *AppArtifactManagerImpl) BuildArtifactsForCdStageV2(listingFilterOpts parentCdWfrList, err := impl.cdWorkflowRepository.FindArtifactByPipelineIdAndRunnerType(listingFilterOpts.ParentCdId, bean.CD_WORKFLOW_TYPE_DEPLOY, 1) if err != nil || len(parentCdWfrList) == 0 { impl.logger.Errorw("error in getting artifact for parent cd", "parentCdPipelineId", listingFilterOpts.ParentCdId) - return ciArtifacts, err + return ciArtifacts, totalCount, err } artifactRunningOnParentCd = parentCdWfrList[0].CdWorkflow.CiArtifact.Id } @@ -749,15 +750,15 @@ func (impl *AppArtifactManagerImpl) BuildArtifactsForCdStageV2(listingFilterOpts ciArtifacts = append(ciArtifacts, ciArtifact) } - return ciArtifacts, nil + return ciArtifacts, totalCount, nil } -func (impl *AppArtifactManagerImpl) BuildArtifactsForCIParentV2(listingFilterOpts *bean.ArtifactsListFilterOptions) ([]*bean2.CiArtifactBean, error) { +func (impl *AppArtifactManagerImpl) BuildArtifactsForCIParentV2(listingFilterOpts *bean.ArtifactsListFilterOptions) ([]*bean2.CiArtifactBean, int, error) { - artifacts, err := impl.ciArtifactRepository.GetArtifactsByCDPipelineV3(listingFilterOpts) + artifacts, totalCount, err := impl.ciArtifactRepository.GetArtifactsByCDPipelineV3(listingFilterOpts) if err != nil { impl.logger.Errorw("error in getting artifacts for ci", "err", err) - return nil, err + return nil, totalCount, err } //TODO Gireesh: if initialized then no need of using append, put value directly to index @@ -787,5 +788,5 @@ func (impl *AppArtifactManagerImpl) BuildArtifactsForCIParentV2(listingFilterOpt ciArtifacts = append(ciArtifacts, ciArtifact) } - return ciArtifacts, nil + return ciArtifacts, totalCount, nil } From dc61990876ad54c159e5f89894a20f634138a83e Mon Sep 17 00:00:00 2001 From: Gireesh Naidu Date: Mon, 30 Oct 2023 17:06:40 +0530 Subject: [PATCH 076/143] fix --- pkg/pipeline/AppArtifactManager.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/pkg/pipeline/AppArtifactManager.go b/pkg/pipeline/AppArtifactManager.go index 8ce9097c10..447242e989 100644 --- a/pkg/pipeline/AppArtifactManager.go +++ b/pkg/pipeline/AppArtifactManager.go @@ -676,6 +676,9 @@ func (impl *AppArtifactManagerImpl) BuildArtifactsList(listingFilterOpts *bean.A Latest: true, CreatedTime: formatDate(currentRunningArtifact.CreatedOn, bean2.LayoutRFC3339), } + if currentRunningArtifact.WorkflowId != nil { + currentRunningArtifactBean.CiWorkflowId = *currentRunningArtifact.WorkflowId + } } //2) get artifact list limited by filterOptions if listingFilterOpts.ParentStageType == bean.CI_WORKFLOW_TYPE || listingFilterOpts.ParentStageType == bean.WEBHOOK_WORKFLOW_TYPE { From c9acbb97776034efda4704705e6ec954d4324028 Mon Sep 17 00:00:00 2001 From: ayushmaheshwari Date: Mon, 30 Oct 2023 17:18:11 +0530 Subject: [PATCH 077/143] adding support for parent stage --- .../pipelineConfig/CdWorfkflowRepository.go | 60 ++++++++++--------- pkg/pipeline/AppArtifactManager.go | 30 +++++----- pkg/pipeline/CiCdPipelineOrchestrator.go | 2 +- pkg/pipeline/CustomTagService.go | 3 + pkg/pipeline/WorkflowDagExecutor.go | 4 +- pkg/plugin/GlobalPluginService.go | 3 + 6 files changed, 55 insertions(+), 47 deletions(-) diff --git a/internal/sql/repository/pipelineConfig/CdWorfkflowRepository.go b/internal/sql/repository/pipelineConfig/CdWorfkflowRepository.go index c1531040ca..0851118639 100644 --- a/internal/sql/repository/pipelineConfig/CdWorfkflowRepository.go +++ b/internal/sql/repository/pipelineConfig/CdWorfkflowRepository.go @@ -29,6 +29,7 @@ import ( "github.com/devtron-labs/devtron/internal/util" "github.com/devtron-labs/devtron/pkg/sql" "github.com/go-pg/pg" + "github.com/go-pg/pg/orm" "go.opentelemetry.io/otel" "go.uber.org/zap" "time" @@ -41,7 +42,7 @@ type CdWorkflowRepository interface { FindCdWorkflowMetaByEnvironmentId(appId int, environmentId int, offset int, size int) ([]CdWorkflowRunner, error) FindCdWorkflowMetaByPipelineId(pipelineId int, offset int, size int) ([]CdWorkflowRunner, error) FindArtifactByPipelineIdAndRunnerType(pipelineId int, runnerType bean.WorkflowType, limit int) ([]CdWorkflowRunner, error) - FindArtifactByListFilter(listingFilterOptions *bean.ArtifactsListFilterOptions) ([]CdWorkflowRunner, error) + FindArtifactByListFilter(listingFilterOptions *bean.ArtifactsListFilterOptions) ([]repository.CiArtifact, error) SaveWorkFlowRunner(wfr *CdWorkflowRunner) (*CdWorkflowRunner, error) UpdateWorkFlowRunner(wfr *CdWorkflowRunner) error UpdateWorkFlowRunnersWithTxn(wfrs []*CdWorkflowRunner, tx *pg.Tx) error @@ -378,45 +379,46 @@ func (impl *CdWorkflowRepositoryImpl) FindCdWorkflowMetaByPipelineId(pipelineId } return wfrList, err } -func (impl *CdWorkflowRepositoryImpl) FindArtifactByListFilter(listingFilterOptions *bean.ArtifactsListFilterOptions) ([]CdWorkflowRunner, error) { - var wfrList []CdWorkflowRunner - var wfIds []int +func (impl *CdWorkflowRepositoryImpl) FindArtifactByListFilter(listingFilterOptions *bean.ArtifactsListFilterOptions) ([]repository.CiArtifact, error) { + + var ciArtifacts []repository.CiArtifact //TODO Gireesh: why are we extracting artifacts which belongs to current pipeline as it will impact page size of response ?? - query := impl.dbConnection.Model(&wfIds). - Column("MAX(cd_workflow_runner.id) AS id"). - Join("INNER JOIN cd_workflow ON cd_workflow.id=cd_workflow_runner.cd_workflow_id"). - Join("INNER JOIN ci_artifact cia ON cia.id = cd_workflow.ci_artifact_id"). - Where("(cd_workflow.pipeline_id = ? AND cd_workflow_runner.workflow_type = ?) "+ - "OR (cd_workflow.pipeline_id = ? AND cd_workflow_runner.workflow_type = ? AND cd_workflow_runner.status IN (?))", - listingFilterOptions.PipelineId, - listingFilterOptions.StageType, - listingFilterOptions.ParentId, - listingFilterOptions.ParentStageType, - pg.In([]string{application.Healthy, application.SUCCEEDED})). - Where("cia.image LIKE ?", listingFilterOptions.SearchString) + query := impl.dbConnection.Model(&ciArtifacts). + Column("ci_artifact.*"). + Join("LEFT JOIN cd_workflow ON ci_artifact.id = cd_workflow.ci_artifact_id"). + Join("INNER JOIN cd_workflow_runner ON cd_workflow_runner.cd_workflow_id=cd_workflow.id"). + WhereGroup(func(q *orm.Query) (*orm.Query, error) { + q = q.WhereGroup(func(sq *orm.Query) (*orm.Query, error) { + sq.Where("cd_workflow_runner.id IN (select MAX(cd_workflow_runner.id) OVER (PARTITION BY cd_workflow.ci_artifact_id) FROM cd_workflow_runner inner join cd_workflow on cd_workflow.id=cd_workflow_runner.cd_workflow_id) ") + sq.WhereGroup(func(ssq *orm.Query) (*orm.Query, error) { + ssq.WhereOr(" cd_workflow.pipeline_id = ? AND cd_workflow_runner.workflow_type = ? ", + listingFilterOptions.PipelineId, listingFilterOptions.StageType). + WhereOr("cd_workflow.pipeline_id = ? AND cd_workflow_runner.workflow_type = ? AND cd_workflow_runner.status IN (?)", + listingFilterOptions.ParentId, + listingFilterOptions.ParentStageType, + pg.In([]string{application.Healthy, application.SUCCEEDED})) + return ssq, nil + }) + return sq, nil + }) + q = q.WhereOr("ci_artifact.data_source=? AND ci_artifact.component_id=?", + listingFilterOptions.ParentStageType, + listingFilterOptions.ParentId) + return q, nil + }). + Where("ci_artifact.image LIKE ?", listingFilterOptions.SearchString) if len(listingFilterOptions.ExcludeArtifactIds) > 0 { query = query.Where("cd_workflow.ci_artifact_id NOT IN (?)", pg.In(listingFilterOptions.ExcludeArtifactIds)) } query = query. - Group("cd_workflow.ci_artifact_id"). Limit(listingFilterOptions.Limit). Offset(listingFilterOptions.Offset) err := query.Select() - - if err == pg.ErrNoRows || len(wfIds) == 0 { - return wfrList, nil - } - err = impl.dbConnection. - Model(&wfrList). - Column("cd_workflow_runner.*", "CdWorkflow", "CdWorkflow.Pipeline", "CdWorkflow.CiArtifact"). - Where("cd_workflow_runner IN (?) ", pg.In(wfIds)). - Select() - if err == pg.ErrNoRows { - return wfrList, nil + return ciArtifacts, nil } - return wfrList, err + return ciArtifacts, err } func (impl *CdWorkflowRepositoryImpl) FindArtifactByPipelineIdAndRunnerType(pipelineId int, runnerType bean.WorkflowType, limit int) ([]CdWorkflowRunner, error) { diff --git a/pkg/pipeline/AppArtifactManager.go b/pkg/pipeline/AppArtifactManager.go index 11b9e4b055..2051d95ee1 100644 --- a/pkg/pipeline/AppArtifactManager.go +++ b/pkg/pipeline/AppArtifactManager.go @@ -585,13 +585,13 @@ func (impl *AppArtifactManagerImpl) BuildArtifactsList(listingFilterOpts *bean.A } func (impl *AppArtifactManagerImpl) BuildArtifactsForCdStageV2(listingFilterOpts *bean.ArtifactsListFilterOptions) ([]*bean2.CiArtifactBean, error) { - cdWfrList, err := impl.cdWorkflowRepository.FindArtifactByListFilter(listingFilterOpts) + cdArtifacts, err := impl.cdWorkflowRepository.FindArtifactByListFilter(listingFilterOpts) if err != nil { impl.logger.Errorw("error in fetching cd workflow runners using filter", "filterOptions", listingFilterOpts, "err", err) return nil, err } //TODO Gireesh: initialized array with size but are using append, not optimized solution - ciArtifacts := make([]*bean2.CiArtifactBean, 0, len(cdWfrList)) + ciArtifacts := make([]*bean2.CiArtifactBean, 0, len(cdArtifacts)) //get artifact running on parent cd artifactRunningOnParentCd := 0 @@ -605,27 +605,27 @@ func (impl *AppArtifactManagerImpl) BuildArtifactsForCdStageV2(listingFilterOpts artifactRunningOnParentCd = parentCdWfrList[0].CdWorkflow.CiArtifact.Id } - for _, wfr := range cdWfrList { + for _, artifact := range cdArtifacts { //TODO Gireesh: Refactoring needed - mInfo, err := parseMaterialInfo([]byte(wfr.CdWorkflow.CiArtifact.MaterialInfo), wfr.CdWorkflow.CiArtifact.DataSource) + mInfo, err := parseMaterialInfo([]byte(artifact.MaterialInfo), artifact.DataSource) if err != nil { mInfo = []byte("[]") impl.logger.Errorw("Error in parsing artifact material info", "err", err) } ciArtifact := &bean2.CiArtifactBean{ - Id: wfr.CdWorkflow.CiArtifact.Id, - Image: wfr.CdWorkflow.CiArtifact.Image, - ImageDigest: wfr.CdWorkflow.CiArtifact.ImageDigest, + Id: artifact.Id, + Image: artifact.Image, + ImageDigest: artifact.ImageDigest, MaterialInfo: mInfo, //TODO:LastSuccessfulTriggerOnParent - Scanned: wfr.CdWorkflow.CiArtifact.Scanned, - ScanEnabled: wfr.CdWorkflow.CiArtifact.ScanEnabled, - RunningOnParentCd: wfr.CdWorkflow.CiArtifact.Id == artifactRunningOnParentCd, - ExternalCiPipelineId: wfr.CdWorkflow.CiArtifact.ExternalCiPipelineId, - ParentCiArtifact: wfr.CdWorkflow.CiArtifact.ParentCiArtifact, - } - if wfr.CdWorkflow.CiArtifact.WorkflowId != nil { - ciArtifact.CiWorkflowId = *wfr.CdWorkflow.CiArtifact.WorkflowId + Scanned: artifact.Scanned, + ScanEnabled: artifact.ScanEnabled, + RunningOnParentCd: artifact.Id == artifactRunningOnParentCd, + ExternalCiPipelineId: artifact.ExternalCiPipelineId, + ParentCiArtifact: artifact.ParentCiArtifact, + } + if artifact.WorkflowId != nil { + ciArtifact.CiWorkflowId = *artifact.WorkflowId } ciArtifacts = append(ciArtifacts, ciArtifact) } diff --git a/pkg/pipeline/CiCdPipelineOrchestrator.go b/pkg/pipeline/CiCdPipelineOrchestrator.go index 1d96de20d1..7fa1ce2c20 100644 --- a/pkg/pipeline/CiCdPipelineOrchestrator.go +++ b/pkg/pipeline/CiCdPipelineOrchestrator.go @@ -774,7 +774,7 @@ func (impl CiCdPipelineOrchestratorImpl) CreateCiConf(createRequest *bean.CiConf } //If customTagObejct has been passed, save it - if ciPipeline.CustomTagObject != nil { + if ciPipeline.CustomTagObject != nil && len(ciPipeline.CustomTagObject.TagPattern) != 0 { customTag := &bean4.CustomTag{ EntityKey: bean2.EntityTypeCiPipelineId, EntityValue: strconv.Itoa(ciPipeline.Id), diff --git a/pkg/pipeline/CustomTagService.go b/pkg/pipeline/CustomTagService.go index 8403311649..196c1eba96 100644 --- a/pkg/pipeline/CustomTagService.go +++ b/pkg/pipeline/CustomTagService.go @@ -38,6 +38,9 @@ func (impl *CustomTagServiceImpl) DeactivateImagePathReservation(id int) error { } func (impl *CustomTagServiceImpl) CreateOrUpdateCustomTag(tag *bean.CustomTag) error { + if len(tag.TagPattern) == 0 && tag.Enabled { + return fmt.Errorf("tag pattern cannot be empty") + } if tag.Enabled { if err := validateTagPattern(tag.TagPattern); err != nil { return err diff --git a/pkg/pipeline/WorkflowDagExecutor.go b/pkg/pipeline/WorkflowDagExecutor.go index d5b0c43a1c..12f29eecd8 100644 --- a/pkg/pipeline/WorkflowDagExecutor.go +++ b/pkg/pipeline/WorkflowDagExecutor.go @@ -755,7 +755,7 @@ func (impl *WorkflowDagExecutorImpl) TriggerPreStage(ctx context.Context, cdWf * // handling plugin specific logic skopeoRefPluginId, err := impl.globalPluginService.GetRefPluginIdByRefPluginName(SKOPEO) for _, step := range cdStageWorkflowRequest.PreCiSteps { - if step.RefPluginId == skopeoRefPluginId { + if skopeoRefPluginId != 0 && step.RefPluginId == skopeoRefPluginId { // for Skopeo plugin parse destination images and save its data in image path reservation table registryDestinationImageMap, registryCredentialMap, err := impl.pluginInputVariableParser.ParseSkopeoPluginInputVariables(step.InputVars, bean3.EntityTypePreCD, strconv.Itoa(pipeline.Id), cdStageWorkflowRequest.CiArtifactDTO.Image, cdStageWorkflowRequest.DockerRegistryId) if err != nil { @@ -888,7 +888,7 @@ func (impl *WorkflowDagExecutorImpl) TriggerPostStage(cdWf *pipelineConfig.CdWor // handling plugin specific logic skopeoRefPluginId, err := impl.globalPluginService.GetRefPluginIdByRefPluginName(SKOPEO) for _, step := range cdStageWorkflowRequest.PostCiSteps { - if step.RefPluginId == skopeoRefPluginId { + if skopeoRefPluginId != 0 && step.RefPluginId == skopeoRefPluginId { // for Skopeo plugin parse destination images and save its data in image path reservation table registryDestinationImageMap, registryCredentialMap, err := impl.pluginInputVariableParser.ParseSkopeoPluginInputVariables(step.InputVars, bean3.EntityTypePostCD, strconv.Itoa(pipeline.Id), cdStageWorkflowRequest.CiArtifactDTO.Image, cdStageWorkflowRequest.DockerRegistryId) if err != nil { diff --git a/pkg/plugin/GlobalPluginService.go b/pkg/plugin/GlobalPluginService.go index 69a4047834..5ad015896b 100644 --- a/pkg/plugin/GlobalPluginService.go +++ b/pkg/plugin/GlobalPluginService.go @@ -317,5 +317,8 @@ func (impl *GlobalPluginServiceImpl) GetRefPluginIdByRefPluginName(pluginName st impl.logger.Errorw("error in fetching plugin metadata by name", "err", err) return 0, err } + if pluginMetadata == nil { + return 0, nil + } return pluginMetadata[0].Id, nil } From 0ba2890f966c0c4859c984ff32efa5d5e3a32697 Mon Sep 17 00:00:00 2001 From: Gireesh Naidu Date: Mon, 30 Oct 2023 17:38:16 +0530 Subject: [PATCH 078/143] delete redundant ids --- pkg/pipeline/AppArtifactManager.go | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/pkg/pipeline/AppArtifactManager.go b/pkg/pipeline/AppArtifactManager.go index 447242e989..5282775f8b 100644 --- a/pkg/pipeline/AppArtifactManager.go +++ b/pkg/pipeline/AppArtifactManager.go @@ -26,7 +26,6 @@ import ( repository2 "github.com/devtron-labs/devtron/pkg/pipeline/repository" "github.com/devtron-labs/devtron/pkg/user" "github.com/go-pg/pg" - lo "github.com/samber/lo" "go.uber.org/zap" "sort" "strings" @@ -573,8 +572,8 @@ func (impl *AppArtifactManagerImpl) RetrieveArtifactsByCDPipelineV2(pipeline *pi } //TODO Gireesh: need to check this behaviour, can we use this instead of below loop ?? - artifactIds := lo.FlatMap(ciArtifacts, func(artifact bean2.CiArtifactBean, _ int) []int { return []int{artifact.Id} }) - //artifactIds := make([]int, 0, len(ciArtifacts)) + //artifactIds := lo.FlatMap(ciArtifacts, func(artifact bean2.CiArtifactBean, _ int) []int { return []int{artifact.Id} }) + artifactIds := make([]int, 0, len(ciArtifacts)) for _, artifact := range ciArtifacts { artifactIds = append(artifactIds, artifact.Id) } From b505dc7bbf9f3d50c6369ebaa7a358e8d0699f75 Mon Sep 17 00:00:00 2001 From: ayushmaheshwari Date: Mon, 30 Oct 2023 19:32:14 +0530 Subject: [PATCH 079/143] adding registry type and registry name flag in api --- .../app/DeploymentPipelineRestHandler.go | 9 +- .../sql/repository/CiArtifactRepository.go | 63 +++-- .../pipelineConfig/CdWorfkflowRepository.go | 50 +++- pkg/bean/app.go | 7 + pkg/pipeline/AppArtifactManager.go | 251 ++++++++++++++---- pkg/pipeline/PipelineBuilder.go | 2 +- wire_gen.go | 2 +- 7 files changed, 304 insertions(+), 80 deletions(-) diff --git a/api/restHandler/app/DeploymentPipelineRestHandler.go b/api/restHandler/app/DeploymentPipelineRestHandler.go index 44b44cace4..4b4b920208 100644 --- a/api/restHandler/app/DeploymentPipelineRestHandler.go +++ b/api/restHandler/app/DeploymentPipelineRestHandler.go @@ -1462,8 +1462,8 @@ func (handler PipelineConfigRestHandlerImpl) GetArtifactsForRollback(w http.Resp common.WriteJsonResp(w, err, "invalid size", http.StatusBadRequest) return } + searchString := r.URL.Query().Get("search") - searchString := r.URL.Query().Get("searchString") //rbac block starts from here object := handler.enforcerUtil.GetAppRBACName(app.AppName) if ok := handler.enforcer.Enforce(token, casbin.ResourceApplications, casbin.ActionGet, object); !ok { @@ -1477,9 +1477,14 @@ func (handler PipelineConfigRestHandlerImpl) GetArtifactsForRollback(w http.Resp } //rbac block ends here //rbac for edit tags access + var ciArtifactResponse bean.CiArtifactResponse triggerAccess := handler.enforcer.Enforce(token, casbin.ResourceApplications, casbin.ActionTrigger, object) + if handler.pipelineRestHandlerEnvConfig.UseArtifactListApiV2 { + ciArtifactResponse, err = handler.pipelineBuilder.FetchArtifactForRollbackV2(cdPipelineId, app.Id, offset, limit, searchString, app, deploymentPipeline) + } else { + ciArtifactResponse, err = handler.pipelineBuilder.FetchArtifactForRollback(cdPipelineId, app.Id, offset, limit, searchString) + } - ciArtifactResponse, err := handler.pipelineBuilder.FetchArtifactForRollback(cdPipelineId, app.Id, offset, limit, searchString) if err != nil { handler.Logger.Errorw("service err, GetArtifactsForRollback", "err", err, "cdPipelineId", cdPipelineId) common.WriteJsonResp(w, err, "unable to fetch artifacts", http.StatusInternalServerError) diff --git a/internal/sql/repository/CiArtifactRepository.go b/internal/sql/repository/CiArtifactRepository.go index 2d072973c6..cf0cdeb797 100644 --- a/internal/sql/repository/CiArtifactRepository.go +++ b/internal/sql/repository/CiArtifactRepository.go @@ -75,7 +75,7 @@ type CiArtifactRepository interface { GetArtifactParentCiAndWorkflowDetailsByIds(ids []int) ([]*CiArtifact, error) GetByWfId(wfId int) (artifact *CiArtifact, err error) GetArtifactsByCDPipeline(cdPipelineId, limit int, parentId int, parentType bean.WorkflowType) ([]*CiArtifact, error) - GetArtifactsByCDPipelineV3(listingFilterOpts *bean.ArtifactsListFilterOptions) ([]*CiArtifact, error) + GetArtifactsByCDPipelineV3(listingFilterOpts *bean.ArtifactsListFilterOptions) ([]*CiArtifact, int, error) GetLatestArtifactTimeByCiPipelineIds(ciPipelineIds []int) ([]*CiArtifact, error) GetLatestArtifactTimeByCiPipelineId(ciPipelineId int) (*CiArtifact, error) GetArtifactsByCDPipelineV2(cdPipelineId int) ([]CiArtifact, error) @@ -256,48 +256,63 @@ func (impl CiArtifactRepositoryImpl) GetArtifactsByCDPipeline(cdPipelineId, limi return artifactsAll, err } -func (impl CiArtifactRepositoryImpl) GetArtifactsByCDPipelineV3(listingFilterOpts *bean.ArtifactsListFilterOptions) ([]*CiArtifact, error) { - //TODO Gireesh: listingFilterOpts.SearchString should be conditional, +func (impl CiArtifactRepositoryImpl) GetArtifactsByCDPipelineV3(listingFilterOpts *bean.ArtifactsListFilterOptions) ([]*CiArtifact, int, error) { artifacts := make([]*CiArtifact, 0, listingFilterOpts.Limit) - commonPaginationQueryPart := " cia.image LIKE ?" + - " ORDER BY cia.id DESC" + - " LIMIT ?" + - " OFFSET ?;" + totalCount := 0 + commonPaginatedQueryPart := " cia.image LIKE ?" + orderByClause := " ORDER BY cia.id DESC" + limitOffsetQueryPart := + " LIMIT ? OFFSET ?;" if listingFilterOpts.ParentStageType == bean.CI_WORKFLOW_TYPE { - //TODO Gireesh: listingFilterOpts.PipelineId is ciPipelineId in this case why are we taking join - query := " SELECT cia.* " + - " FROM ci_artifact cia" + + selectQuery := " SELECT cia.* " + remainingQuery := " FROM ci_artifact cia" + " INNER JOIN ci_pipeline cp ON cp.id=cia.pipeline_id" + " INNER JOIN pipeline p ON p.ci_pipeline_id = cp.id and p.id=?" + " WHERE " if len(listingFilterOpts.ExcludeArtifactIds) > 0 { - query += fmt.Sprintf(" cia.id NOT IN (%s) AND ", helper.GetCommaSepratedString(listingFilterOpts.ExcludeArtifactIds)) + remainingQuery += fmt.Sprintf(" cia.id NOT IN (%s) AND ", helper.GetCommaSepratedString(listingFilterOpts.ExcludeArtifactIds)) } - query += commonPaginationQueryPart - _, err := impl.dbConnection.Query(&artifacts, query, listingFilterOpts.PipelineId, listingFilterOpts.SearchString, listingFilterOpts.Limit, listingFilterOpts.Offset) + countQuery := " SELECT count(cia.id) " + totalCountQuery := countQuery + remainingQuery + commonPaginatedQueryPart + _, err := impl.dbConnection.Query(&totalCount, totalCountQuery, listingFilterOpts.PipelineId, listingFilterOpts.SearchString) if err != nil { - return artifacts, err + return artifacts, totalCount, err + } + + finalQuery := selectQuery + remainingQuery + commonPaginatedQueryPart + orderByClause + limitOffsetQueryPart + _, err = impl.dbConnection.Query(&artifacts, finalQuery, listingFilterOpts.PipelineId, listingFilterOpts.SearchString, listingFilterOpts.Limit, listingFilterOpts.Offset) + if err != nil { + return artifacts, totalCount, err } } else if listingFilterOpts.ParentStageType == bean.WEBHOOK_WORKFLOW_TYPE { - query := " SELECT cia.* " + - " FROM ci_artifact cia " + + selectQuery := " SELECT cia.* " + remainingQuery := " FROM ci_artifact cia " + " WHERE cia.external_ci_pipeline_id = ? AND " if len(listingFilterOpts.ExcludeArtifactIds) > 0 { - query += fmt.Sprintf(" cia.id NOT IN (%s) AND ", helper.GetCommaSepratedString(listingFilterOpts.ExcludeArtifactIds)) + remainingQuery += fmt.Sprintf(" cia.id NOT IN (%s) AND ", helper.GetCommaSepratedString(listingFilterOpts.ExcludeArtifactIds)) + } + + countQuery := " SELECT count(cia.id) " + totalCountQuery := countQuery + remainingQuery + commonPaginatedQueryPart + _, err := impl.dbConnection.Query(&totalCount, totalCountQuery, listingFilterOpts.PipelineId, listingFilterOpts.SearchString) + if err != nil { + return artifacts, totalCount, err } - query += commonPaginationQueryPart - _, err := impl.dbConnection.Query(&artifacts, query, listingFilterOpts.ParentId, listingFilterOpts.SearchString, listingFilterOpts.Limit, listingFilterOpts.Offset) + + finalQuery := selectQuery + remainingQuery + commonPaginatedQueryPart + orderByClause + limitOffsetQueryPart + + _, err = impl.dbConnection.Query(&artifacts, finalQuery, listingFilterOpts.ParentId, listingFilterOpts.SearchString, listingFilterOpts.Limit, listingFilterOpts.Offset) if err != nil { - return artifacts, err + return artifacts, totalCount, err } } else { - return artifacts, nil + return artifacts, totalCount, nil } if len(artifacts) == 0 { - return artifacts, nil + return artifacts, totalCount, nil } //processing artifactsMap := make(map[int]*CiArtifact) @@ -319,7 +334,7 @@ func (impl CiArtifactRepositoryImpl) GetArtifactsByCDPipelineV3(listingFilterOpt _, err := impl.dbConnection.Query(&artifactsDeployed, query, pg.In(artifactsIds)) if err != nil { - return artifacts, nil + return artifacts, totalCount, nil } //set deployed time and latest deployed artifact @@ -333,7 +348,7 @@ func (impl CiArtifactRepositoryImpl) GetArtifactsByCDPipelineV3(listingFilterOpt //TODO Gireesh: create separate meaningful functions of these queries - return artifacts, nil + return artifacts, totalCount, nil } diff --git a/internal/sql/repository/pipelineConfig/CdWorfkflowRepository.go b/internal/sql/repository/pipelineConfig/CdWorfkflowRepository.go index 0851118639..0e82607529 100644 --- a/internal/sql/repository/pipelineConfig/CdWorfkflowRepository.go +++ b/internal/sql/repository/pipelineConfig/CdWorfkflowRepository.go @@ -42,7 +42,7 @@ type CdWorkflowRepository interface { FindCdWorkflowMetaByEnvironmentId(appId int, environmentId int, offset int, size int) ([]CdWorkflowRunner, error) FindCdWorkflowMetaByPipelineId(pipelineId int, offset int, size int) ([]CdWorkflowRunner, error) FindArtifactByPipelineIdAndRunnerType(pipelineId int, runnerType bean.WorkflowType, limit int) ([]CdWorkflowRunner, error) - FindArtifactByListFilter(listingFilterOptions *bean.ArtifactsListFilterOptions) ([]repository.CiArtifact, error) + FindArtifactByListFilter(listingFilterOptions *bean.ArtifactsListFilterOptions) ([]repository.CiArtifact, int, error) SaveWorkFlowRunner(wfr *CdWorkflowRunner) (*CdWorkflowRunner, error) UpdateWorkFlowRunner(wfr *CdWorkflowRunner) error UpdateWorkFlowRunnersWithTxn(wfrs []*CdWorkflowRunner, tx *pg.Tx) error @@ -71,6 +71,7 @@ type CdWorkflowRepository interface { ExistsByStatus(status string) (bool, error) FetchArtifactsByCdPipelineId(pipelineId int, runnerType bean.WorkflowType, offset, limit int, searchString string) ([]CdWorkflowRunner, error) + FetchArtifactsByCdPipelineIdV2(listingFilterOptions bean.ArtifactsListFilterOptions) ([]CdWorkflowRunner, int, error) GetLatestTriggersOfHelmPipelinesStuckInNonTerminalStatuses(getPipelineDeployedWithinHours int) ([]*CdWorkflowRunner, error) } @@ -379,7 +380,7 @@ func (impl *CdWorkflowRepositoryImpl) FindCdWorkflowMetaByPipelineId(pipelineId } return wfrList, err } -func (impl *CdWorkflowRepositoryImpl) FindArtifactByListFilter(listingFilterOptions *bean.ArtifactsListFilterOptions) ([]repository.CiArtifact, error) { +func (impl *CdWorkflowRepositoryImpl) FindArtifactByListFilter(listingFilterOptions *bean.ArtifactsListFilterOptions) ([]repository.CiArtifact, int, error) { var ciArtifacts []repository.CiArtifact //TODO Gireesh: why are we extracting artifacts which belongs to current pipeline as it will impact page size of response ?? @@ -410,15 +411,21 @@ func (impl *CdWorkflowRepositoryImpl) FindArtifactByListFilter(listingFilterOpti if len(listingFilterOptions.ExcludeArtifactIds) > 0 { query = query.Where("cd_workflow.ci_artifact_id NOT IN (?)", pg.In(listingFilterOptions.ExcludeArtifactIds)) } + + totalCount, err := query.Count() + if err == pg.ErrNoRows { + return ciArtifacts, totalCount, err + } + query = query. Limit(listingFilterOptions.Limit). Offset(listingFilterOptions.Offset) - err := query.Select() + err = query.Select() if err == pg.ErrNoRows { - return ciArtifacts, nil + return ciArtifacts, totalCount, nil } - return ciArtifacts, err + return ciArtifacts, totalCount, err } func (impl *CdWorkflowRepositoryImpl) FindArtifactByPipelineIdAndRunnerType(pipelineId int, runnerType bean.WorkflowType, limit int) ([]CdWorkflowRunner, error) { @@ -611,12 +618,13 @@ func (impl *CdWorkflowRepositoryImpl) ExistsByStatus(status string) (bool, error func (impl *CdWorkflowRepositoryImpl) FetchArtifactsByCdPipelineId(pipelineId int, runnerType bean.WorkflowType, offset, limit int, searchString string) ([]CdWorkflowRunner, error) { var wfrList []CdWorkflowRunner + searchStringFinal := "%" + searchString + "%" err := impl.dbConnection. Model(&wfrList). Column("cd_workflow_runner.*", "CdWorkflow", "CdWorkflow.Pipeline", "CdWorkflow.CiArtifact"). Where("cd_workflow.pipeline_id = ?", pipelineId). Where("cd_workflow_runner.workflow_type = ?", runnerType). - Where("ci_artifact.image ILIKE %?%", searchString). + Where("cd_workflow__ci_artifact.image LIKE ?", searchStringFinal). Order("cd_workflow_runner.id DESC"). Limit(limit).Offset(offset). Select() @@ -627,6 +635,36 @@ func (impl *CdWorkflowRepositoryImpl) FetchArtifactsByCdPipelineId(pipelineId in return wfrList, err } +func (impl *CdWorkflowRepositoryImpl) FetchArtifactsByCdPipelineIdV2(listingFilterOptions bean.ArtifactsListFilterOptions) ([]CdWorkflowRunner, int, error) { + var wfrList []CdWorkflowRunner + query := impl.dbConnection. + Model(&wfrList). + Column("cd_workflow_runner.*", "CdWorkflow", "CdWorkflow.Pipeline", "CdWorkflow.CiArtifact"). + Where("cd_workflow.pipeline_id = ?", listingFilterOptions.PipelineId). + Where("cd_workflow_runner.workflow_type = ?", listingFilterOptions.StageType). + Where("cd_workflow__ci_artifact.image LIKE ?", listingFilterOptions.SearchString) + + if len(listingFilterOptions.ExcludeArtifactIds) > 0 { + query = query.Where("cd_workflow__ci_artifact.id NOT IN (?)", pg.In(listingFilterOptions.ExcludeArtifactIds)) + } + totalCount, err := query.Count() + if err != nil && err != pg.ErrNoRows { + impl.logger.Errorw("error in getting Wfrs count and ci artifacts by pipelineId", "err", err, "pipelineId", listingFilterOptions.PipelineId) + return nil, totalCount, err + } + + query = query.Order("cd_workflow_runner.id DESC"). + Limit(listingFilterOptions.Limit). + Offset(listingFilterOptions.Offset) + + err = query.Select() + if err != nil && err != pg.ErrNoRows { + impl.logger.Errorw("error in getting Wfrs and ci artifacts by pipelineId", "err", err, "pipelineId", listingFilterOptions.PipelineId) + return nil, totalCount, err + } + return wfrList, totalCount, nil +} + func (impl *CdWorkflowRepositoryImpl) GetLatestTriggersOfHelmPipelinesStuckInNonTerminalStatuses(getPipelineDeployedWithinHours int) ([]*CdWorkflowRunner, error) { var wfrList []*CdWorkflowRunner err := impl.dbConnection. diff --git a/pkg/bean/app.go b/pkg/bean/app.go index a0278c294d..2a3f1b89e5 100644 --- a/pkg/bean/app.go +++ b/pkg/bean/app.go @@ -734,9 +734,15 @@ type CiArtifactBean struct { CiConfigureSourceValue string `json:"ciConfigureSourceValue"` ImageReleaseTags []*repository2.ImageTag `json:"imageReleaseTags"` ImageComment *repository2.ImageComment `json:"imageComment"` + CreatedTime string `json:"createdTime"` ExternalCiPipelineId int `json:"-"` ParentCiArtifact int `json:"-"` CiWorkflowId int `json:"-"` + RegistryType string `json:"registryType"` + RegistryName string `json:"registryName"` + CiPipelineId int `json:"-"` + CredentialsSourceType string `json:"containerRegistryId"` + CredentialsSourceValue string `json:"credentialsSourceValue"` } type CiArtifactResponse struct { @@ -748,6 +754,7 @@ type CiArtifactResponse struct { TagsEditable bool `json:"tagsEditable"` AppReleaseTagNames []string `json:"appReleaseTagNames"` //unique list of tags exists in the app HideImageTaggingHardDelete bool `json:"hideImageTaggingHardDelete"` + TotalCount int `json:"totalCount"` } type AppLabelsDto struct { diff --git a/pkg/pipeline/AppArtifactManager.go b/pkg/pipeline/AppArtifactManager.go index 2051d95ee1..902f320a53 100644 --- a/pkg/pipeline/AppArtifactManager.go +++ b/pkg/pipeline/AppArtifactManager.go @@ -21,6 +21,7 @@ import ( "github.com/devtron-labs/devtron/api/bean" "github.com/devtron-labs/devtron/client/argocdServer/application" "github.com/devtron-labs/devtron/internal/sql/repository" + dockerArtifactStoreRegistry "github.com/devtron-labs/devtron/internal/sql/repository/dockerRegistry" "github.com/devtron-labs/devtron/internal/sql/repository/pipelineConfig" bean2 "github.com/devtron-labs/devtron/pkg/bean" repository2 "github.com/devtron-labs/devtron/pkg/pipeline/repository" @@ -29,6 +30,7 @@ import ( lo "github.com/samber/lo" "go.uber.org/zap" "sort" + "strings" ) type AppArtifactManager interface { @@ -40,6 +42,8 @@ type AppArtifactManager interface { //FetchArtifactForRollback : FetchArtifactForRollback(cdPipelineId, appId, offset, limit int, searchString string) (bean2.CiArtifactResponse, error) + FetchArtifactForRollbackV2(cdPipelineId, appId, offset, limit int, searchString string, app *bean2.CreateAppDTO, deploymentPipeline *pipelineConfig.Pipeline) (bean2.CiArtifactResponse, error) + BuildArtifactsForCdStage(pipelineId int, stageType bean.WorkflowType, ciArtifacts []bean2.CiArtifactBean, artifactMap map[int]int, parent bool, limit int, parentCdId int) ([]bean2.CiArtifactBean, map[int]int, int, string, error) BuildArtifactsForParentStage(cdPipelineId int, parentId int, parentType bean.WorkflowType, ciArtifacts []bean2.CiArtifactBean, artifactMap map[int]int, limit int, parentCdId int) ([]bean2.CiArtifactBean, error) @@ -54,6 +58,8 @@ type AppArtifactManagerImpl struct { ciWorkflowRepository pipelineConfig.CiWorkflowRepository pipelineStageService PipelineStageService cdPipelineConfigService CdPipelineConfigService + dockerArtifactRegistry dockerArtifactStoreRegistry.DockerArtifactStoreRepository + CiPipelineRepository pipelineConfig.CiPipelineRepository } func NewAppArtifactManagerImpl( @@ -64,7 +70,9 @@ func NewAppArtifactManagerImpl( ciArtifactRepository repository.CiArtifactRepository, ciWorkflowRepository pipelineConfig.CiWorkflowRepository, pipelineStageService PipelineStageService, - cdPipelineConfigService CdPipelineConfigService) *AppArtifactManagerImpl { + cdPipelineConfigService CdPipelineConfigService, + dockerArtifactRegistry dockerArtifactStoreRegistry.DockerArtifactStoreRepository, + CiPipelineRepository pipelineConfig.CiPipelineRepository) *AppArtifactManagerImpl { return &AppArtifactManagerImpl{ logger: logger, @@ -75,6 +83,8 @@ func NewAppArtifactManagerImpl( ciWorkflowRepository: ciWorkflowRepository, cdPipelineConfigService: cdPipelineConfigService, pipelineStageService: pipelineStageService, + dockerArtifactRegistry: dockerArtifactRegistry, + CiPipelineRepository: CiPipelineRepository, } } @@ -264,6 +274,116 @@ func (impl *AppArtifactManagerImpl) FetchArtifactForRollback(cdPipelineId, appId return deployedCiArtifactsResponse, nil } +func (impl *AppArtifactManagerImpl) FetchArtifactForRollbackV2(cdPipelineId, appId, offset, limit int, searchString string, app *bean2.CreateAppDTO, deploymentPipeline *pipelineConfig.Pipeline) (bean2.CiArtifactResponse, error) { + var deployedCiArtifactsResponse bean2.CiArtifactResponse + imageTagsDataMap, err := impl.imageTaggingService.GetTagsDataMapByAppId(appId) + if err != nil { + impl.logger.Errorw("error in getting image tagging data with appId", "err", err, "appId", appId) + return deployedCiArtifactsResponse, err + } + + artifactListingFilterOpts := bean.ArtifactsListFilterOptions{} + artifactListingFilterOpts.PipelineId = cdPipelineId + artifactListingFilterOpts.StageType = bean.CD_WORKFLOW_TYPE_DEPLOY + artifactListingFilterOpts.SearchString = "%" + searchString + "%" + artifactListingFilterOpts.Limit = limit + artifactListingFilterOpts.Offset = offset + deployedCiArtifacts, artifactIds, totalCount, err := impl.BuildRollbackArtifactsList(artifactListingFilterOpts) + if err != nil { + impl.logger.Errorw("error in building ci artifacts for rollback", "err", err, "cdPipelineId", cdPipelineId) + return deployedCiArtifactsResponse, err + } + + imageCommentsDataMap, err := impl.imageTaggingService.GetImageCommentsDataMapByArtifactIds(artifactIds) + if err != nil { + impl.logger.Errorw("error in getting GetImageCommentsDataMapByArtifactIds", "err", err, "appId", appId, "artifactIds", artifactIds) + return deployedCiArtifactsResponse, err + } + + for i, _ := range deployedCiArtifacts { + imageTaggingResp := imageTagsDataMap[deployedCiArtifacts[i].Id] + if imageTaggingResp != nil { + deployedCiArtifacts[i].ImageReleaseTags = imageTaggingResp + } + if imageCommentResp := imageCommentsDataMap[deployedCiArtifacts[i].Id]; imageCommentResp != nil { + deployedCiArtifacts[i].ImageComment = imageCommentResp + } + } + + deployedCiArtifactsResponse.CdPipelineId = cdPipelineId + if deployedCiArtifacts == nil { + deployedCiArtifacts = []bean2.CiArtifactBean{} + } + deployedCiArtifactsResponse.CiArtifacts = deployedCiArtifacts + deployedCiArtifactsResponse.TotalCount = totalCount + return deployedCiArtifactsResponse, nil +} + +func (impl *AppArtifactManagerImpl) BuildRollbackArtifactsList(artifactListingFilterOpts bean.ArtifactsListFilterOptions) ([]bean2.CiArtifactBean, []int, int, error) { + var deployedCiArtifacts []bean2.CiArtifactBean + + //1)get current deployed artifact on this pipeline + latestWf, err := impl.cdWorkflowRepository.FindArtifactByPipelineIdAndRunnerType(artifactListingFilterOpts.PipelineId, artifactListingFilterOpts.StageType, 1) + if err != nil && err != pg.ErrNoRows { + impl.logger.Errorw("error in getting latest workflow by pipelineId", "pipelineId", artifactListingFilterOpts.PipelineId, "currentStageType", artifactListingFilterOpts.StageType) + return deployedCiArtifacts, nil, 0, err + } + if len(latestWf) > 0 { + //we should never show current deployed artifact in rollback API + artifactListingFilterOpts.ExcludeArtifactIds = []int{latestWf[0].CdWorkflow.CiArtifactId} + } + + cdWfrs, totalCount, err := impl.cdWorkflowRepository.FetchArtifactsByCdPipelineIdV2(artifactListingFilterOpts) + + if err != nil { + impl.logger.Errorw("error in getting artifacts for rollback by cdPipelineId", "err", err, "cdPipelineId", artifactListingFilterOpts.PipelineId) + return deployedCiArtifacts, nil, totalCount, err + } + + var ids []int32 + for _, item := range cdWfrs { + ids = append(ids, item.TriggeredBy) + } + + userEmails := make(map[int32]string) + users, err := impl.userService.GetByIds(ids) + if err != nil { + impl.logger.Errorw("unable to fetch users by ids", "err", err, "ids", ids) + } + for _, item := range users { + userEmails[item.Id] = item.EmailId + } + + artifactIds := make([]int, 0) + + for _, cdWfr := range cdWfrs { + ciArtifact := &repository.CiArtifact{} + if cdWfr.CdWorkflow != nil && cdWfr.CdWorkflow.CiArtifact != nil { + ciArtifact = cdWfr.CdWorkflow.CiArtifact + } + if ciArtifact == nil { + continue + } + mInfo, err := parseMaterialInfo([]byte(ciArtifact.MaterialInfo), ciArtifact.DataSource) + if err != nil { + mInfo = []byte("[]") + impl.logger.Errorw("error in parsing ciArtifact material info", "err", err, "ciArtifact", ciArtifact) + } + userEmail := userEmails[cdWfr.TriggeredBy] + deployedCiArtifacts = append(deployedCiArtifacts, bean2.CiArtifactBean{ + Id: ciArtifact.Id, + Image: ciArtifact.Image, + MaterialInfo: mInfo, + DeployedTime: formatDate(cdWfr.StartedOn, bean2.LayoutRFC3339), + WfrId: cdWfr.Id, + DeployedBy: userEmail, + }) + artifactIds = append(artifactIds, ciArtifact.Id) + } + return deployedCiArtifacts, artifactIds, totalCount, nil + +} + func (impl *AppArtifactManagerImpl) RetrieveArtifactsByCDPipeline(pipeline *pipelineConfig.Pipeline, stage bean.WorkflowType) (*bean2.CiArtifactResponse, error) { // retrieve parent details @@ -442,7 +562,7 @@ func (impl *AppArtifactManagerImpl) RetrieveArtifactsByCDPipelineV2(pipeline *pi artifactListingFilterOpts.ParentStageType = parentType artifactListingFilterOpts.StageType = stage artifactListingFilterOpts.SearchString = "%" + artifactListingFilterOpts.SearchString + "%" - ciArtifactsRefs, latestWfArtifactId, latestWfArtifactStatus, err := impl.BuildArtifactsList(artifactListingFilterOpts) + ciArtifactsRefs, latestWfArtifactId, latestWfArtifactStatus, totalCount, err := impl.BuildArtifactsList(artifactListingFilterOpts) if err != nil && err != pg.ErrNoRows { impl.logger.Errorw("error in getting artifacts for child cd stage", "err", err, "stage", stage) return nil, err @@ -491,7 +611,27 @@ func (impl *AppArtifactManagerImpl) RetrieveArtifactsByCDPipelineV2(pipeline *pi // if external webhook continue continue } - + var dockerRegistryId string + if artifact.CiPipelineId != 0 { + ciPipeline, err := impl.CiPipelineRepository.FindById(artifact.CiPipelineId) + if err != nil { + impl.logger.Errorw("error in fetching ciPipeline", "ciPipelineId", ciPipeline.Id, "error", err) + return nil, err + } + dockerRegistryId = *ciPipeline.CiTemplate.DockerRegistryId + } else { + if artifact.CredentialsSourceType == repository.GLOBAL_CONTAINER_REGISTRY { + dockerRegistryId = artifact.CredentialsSourceValue + } + } + if len(dockerRegistryId) > 0 { + dockerArtifact, err := impl.dockerArtifactRegistry.FindOne(dockerRegistryId) + if err != nil { + impl.logger.Errorw("error in getting docker registry details", "err", err, "dockerArtifactStoreId", dockerRegistryId) + } + ciArtifacts[i].RegistryType = string(dockerArtifact.RegistryType) + ciArtifacts[i].RegistryName = dockerRegistryId + } //TODO: can be optimised var ciWorkflow *pipelineConfig.CiWorkflow if artifact.ParentCiArtifact != 0 { @@ -500,7 +640,6 @@ func (impl *AppArtifactManagerImpl) RetrieveArtifactsByCDPipelineV2(pipeline *pi impl.logger.Errorw("error in getting ci_workflow for artifacts", "err", err, "artifact", artifact, "parentStage", parentType, "stage", stage) return ciArtifactsResponse, err } - } else { ciWorkflow, err = impl.ciWorkflowRepository.FindCiWorkflowGitTriggersById(artifact.CiWorkflowId) if err != nil { @@ -512,6 +651,7 @@ func (impl *AppArtifactManagerImpl) RetrieveArtifactsByCDPipelineV2(pipeline *pi } ciArtifactsResponse.CdPipelineId = pipeline.Id + ciArtifactsResponse.TotalCount = totalCount ciArtifactsResponse.LatestWfArtifactId = latestWfArtifactId ciArtifactsResponse.LatestWfArtifactStatus = latestWfArtifactStatus if ciArtifacts == nil { @@ -521,15 +661,15 @@ func (impl *AppArtifactManagerImpl) RetrieveArtifactsByCDPipelineV2(pipeline *pi return ciArtifactsResponse, nil } -func (impl *AppArtifactManagerImpl) BuildArtifactsList(listingFilterOpts *bean.ArtifactsListFilterOptions) ([]*bean2.CiArtifactBean, int, string, error) { +func (impl *AppArtifactManagerImpl) BuildArtifactsList(listingFilterOpts *bean.ArtifactsListFilterOptions) ([]*bean2.CiArtifactBean, int, string, int, error) { var ciArtifacts []*bean2.CiArtifactBean - + totalCount := 0 //1)get current deployed artifact on this pipeline latestWf, err := impl.cdWorkflowRepository.FindArtifactByPipelineIdAndRunnerType(listingFilterOpts.PipelineId, listingFilterOpts.StageType, 1) if err != nil && err != pg.ErrNoRows { impl.logger.Errorw("error in getting latest workflow by pipelineId", "pipelineId", listingFilterOpts.PipelineId, "currentStageType", listingFilterOpts.StageType) - return ciArtifacts, 0, "", err + return ciArtifacts, 0, "", totalCount, err } var currentRunningArtifactBean *bean2.CiArtifactBean @@ -551,44 +691,55 @@ func (impl *AppArtifactManagerImpl) BuildArtifactsList(listingFilterOpts *bean.A impl.logger.Errorw("Error in parsing artifact material info", "err", err, "artifact", currentRunningArtifact) } currentRunningArtifactBean = &bean2.CiArtifactBean{ - Id: currentRunningArtifact.Id, - Image: currentRunningArtifact.Image, - ImageDigest: currentRunningArtifact.ImageDigest, - MaterialInfo: mInfo, - ScanEnabled: currentRunningArtifact.ScanEnabled, - Scanned: currentRunningArtifact.Scanned, - Deployed: true, - DeployedTime: formatDate(latestWf[0].CdWorkflow.CreatedOn, bean2.LayoutRFC3339), - Latest: true, + Id: currentRunningArtifact.Id, + Image: currentRunningArtifact.Image, + ImageDigest: currentRunningArtifact.ImageDigest, + MaterialInfo: mInfo, + ScanEnabled: currentRunningArtifact.ScanEnabled, + Scanned: currentRunningArtifact.Scanned, + Deployed: true, + DeployedTime: formatDate(latestWf[0].CdWorkflow.CreatedOn, bean2.LayoutRFC3339), + Latest: true, + CreatedTime: formatDate(currentRunningArtifact.CreatedOn, bean2.LayoutRFC3339), + CiPipelineId: currentRunningArtifact.PipelineId, + CredentialsSourceType: currentRunningArtifact.CredentialsSourceType, + CredentialsSourceValue: currentRunningArtifact.CredentialSourceValue, + } + if currentRunningArtifact.WorkflowId != nil { + currentRunningArtifactBean.CiWorkflowId = *currentRunningArtifact.WorkflowId } } //2) get artifact list limited by filterOptions if listingFilterOpts.ParentStageType == bean.CI_WORKFLOW_TYPE || listingFilterOpts.ParentStageType == bean.WEBHOOK_WORKFLOW_TYPE { - ciArtifacts, err = impl.BuildArtifactsForCIParentV2(listingFilterOpts) + ciArtifacts, totalCount, err = impl.BuildArtifactsForCIParentV2(listingFilterOpts) if err != nil { impl.logger.Errorw("error in getting ci artifacts for ci/webhook type parent", "pipelineId", listingFilterOpts.PipelineId, "parentPipelineId", listingFilterOpts.ParentId, "parentStageType", listingFilterOpts.ParentStageType, "currentStageType", listingFilterOpts.StageType) - return ciArtifacts, 0, "", err + return ciArtifacts, 0, "", totalCount, err } } else { - ciArtifacts, err = impl.BuildArtifactsForCdStageV2(listingFilterOpts) + ciArtifacts, totalCount, err = impl.BuildArtifactsForCdStageV2(listingFilterOpts) if err != nil { impl.logger.Errorw("error in getting ci artifacts for ci/webhook type parent", "pipelineId", listingFilterOpts.PipelineId, "parentPipelineId", listingFilterOpts.ParentId, "parentStageType", listingFilterOpts.ParentStageType, "currentStageType", listingFilterOpts.StageType) - return ciArtifacts, 0, "", err + return ciArtifacts, 0, "", totalCount, err } } //if no artifact deployed skip adding currentRunningArtifactBean in ciArtifacts arr if currentRunningArtifactBean != nil { - ciArtifacts = append(ciArtifacts, currentRunningArtifactBean) + searchString := listingFilterOpts.SearchString[1 : len(listingFilterOpts.SearchString)-1] + if strings.Contains(currentRunningArtifactBean.Image, searchString) { + ciArtifacts = append(ciArtifacts, currentRunningArtifactBean) + } } - return ciArtifacts, currentRunningArtifactId, currentRunningWorkflowStatus, nil + + return ciArtifacts, currentRunningArtifactId, currentRunningWorkflowStatus, totalCount, nil } -func (impl *AppArtifactManagerImpl) BuildArtifactsForCdStageV2(listingFilterOpts *bean.ArtifactsListFilterOptions) ([]*bean2.CiArtifactBean, error) { - cdArtifacts, err := impl.cdWorkflowRepository.FindArtifactByListFilter(listingFilterOpts) +func (impl *AppArtifactManagerImpl) BuildArtifactsForCdStageV2(listingFilterOpts *bean.ArtifactsListFilterOptions) ([]*bean2.CiArtifactBean, int, error) { + cdArtifacts, totalCount, err := impl.cdWorkflowRepository.FindArtifactByListFilter(listingFilterOpts) if err != nil { impl.logger.Errorw("error in fetching cd workflow runners using filter", "filterOptions", listingFilterOpts, "err", err) - return nil, err + return nil, totalCount, err } //TODO Gireesh: initialized array with size but are using append, not optimized solution ciArtifacts := make([]*bean2.CiArtifactBean, 0, len(cdArtifacts)) @@ -600,7 +751,7 @@ func (impl *AppArtifactManagerImpl) BuildArtifactsForCdStageV2(listingFilterOpts parentCdWfrList, err := impl.cdWorkflowRepository.FindArtifactByPipelineIdAndRunnerType(listingFilterOpts.ParentCdId, bean.CD_WORKFLOW_TYPE_DEPLOY, 1) if err != nil || len(parentCdWfrList) == 0 { impl.logger.Errorw("error in getting artifact for parent cd", "parentCdPipelineId", listingFilterOpts.ParentCdId) - return ciArtifacts, err + return ciArtifacts, totalCount, err } artifactRunningOnParentCd = parentCdWfrList[0].CdWorkflow.CiArtifact.Id } @@ -618,11 +769,15 @@ func (impl *AppArtifactManagerImpl) BuildArtifactsForCdStageV2(listingFilterOpts ImageDigest: artifact.ImageDigest, MaterialInfo: mInfo, //TODO:LastSuccessfulTriggerOnParent - Scanned: artifact.Scanned, - ScanEnabled: artifact.ScanEnabled, - RunningOnParentCd: artifact.Id == artifactRunningOnParentCd, - ExternalCiPipelineId: artifact.ExternalCiPipelineId, - ParentCiArtifact: artifact.ParentCiArtifact, + Scanned: artifact.Scanned, + ScanEnabled: artifact.ScanEnabled, + RunningOnParentCd: artifact.Id == artifactRunningOnParentCd, + ExternalCiPipelineId: artifact.ExternalCiPipelineId, + ParentCiArtifact: artifact.ParentCiArtifact, + CreatedTime: formatDate(artifact.CreatedOn, bean2.LayoutRFC3339), + CiPipelineId: artifact.PipelineId, + CredentialsSourceType: artifact.CredentialsSourceType, + CredentialsSourceValue: artifact.CredentialSourceValue, } if artifact.WorkflowId != nil { ciArtifact.CiWorkflowId = *artifact.WorkflowId @@ -630,15 +785,15 @@ func (impl *AppArtifactManagerImpl) BuildArtifactsForCdStageV2(listingFilterOpts ciArtifacts = append(ciArtifacts, ciArtifact) } - return ciArtifacts, nil + return ciArtifacts, totalCount, nil } -func (impl *AppArtifactManagerImpl) BuildArtifactsForCIParentV2(listingFilterOpts *bean.ArtifactsListFilterOptions) ([]*bean2.CiArtifactBean, error) { +func (impl *AppArtifactManagerImpl) BuildArtifactsForCIParentV2(listingFilterOpts *bean.ArtifactsListFilterOptions) ([]*bean2.CiArtifactBean, int, error) { - artifacts, err := impl.ciArtifactRepository.GetArtifactsByCDPipelineV3(listingFilterOpts) + artifacts, totalCount, err := impl.ciArtifactRepository.GetArtifactsByCDPipelineV3(listingFilterOpts) if err != nil { impl.logger.Errorw("error in getting artifacts for ci", "err", err) - return nil, err + return nil, totalCount, err } //TODO Gireesh: if initialized then no need of using append, put value directly to index @@ -650,16 +805,20 @@ func (impl *AppArtifactManagerImpl) BuildArtifactsForCIParentV2(listingFilterOpt impl.logger.Errorw("Error in parsing artifact material info", "err", err, "artifact", artifact) } ciArtifact := &bean2.CiArtifactBean{ - Id: artifact.Id, - Image: artifact.Image, - ImageDigest: artifact.ImageDigest, - MaterialInfo: mInfo, - ScanEnabled: artifact.ScanEnabled, - Scanned: artifact.Scanned, - Deployed: artifact.Deployed, - DeployedTime: formatDate(artifact.DeployedTime, bean2.LayoutRFC3339), - ExternalCiPipelineId: artifact.ExternalCiPipelineId, - ParentCiArtifact: artifact.ParentCiArtifact, + Id: artifact.Id, + Image: artifact.Image, + ImageDigest: artifact.ImageDigest, + MaterialInfo: mInfo, + ScanEnabled: artifact.ScanEnabled, + Scanned: artifact.Scanned, + Deployed: artifact.Deployed, + DeployedTime: formatDate(artifact.DeployedTime, bean2.LayoutRFC3339), + ExternalCiPipelineId: artifact.ExternalCiPipelineId, + ParentCiArtifact: artifact.ParentCiArtifact, + CreatedTime: formatDate(artifact.CreatedOn, bean2.LayoutRFC3339), + CiPipelineId: artifact.PipelineId, + CredentialsSourceType: artifact.CredentialsSourceType, + CiConfigureSourceValue: artifact.CredentialSourceValue, } if artifact.WorkflowId != nil { ciArtifact.CiWorkflowId = *artifact.WorkflowId @@ -667,5 +826,5 @@ func (impl *AppArtifactManagerImpl) BuildArtifactsForCIParentV2(listingFilterOpt ciArtifacts = append(ciArtifacts, ciArtifact) } - return ciArtifacts, nil + return ciArtifacts, totalCount, nil } diff --git a/pkg/pipeline/PipelineBuilder.go b/pkg/pipeline/PipelineBuilder.go index 0cd2fbbd5e..da87b37fc7 100644 --- a/pkg/pipeline/PipelineBuilder.go +++ b/pkg/pipeline/PipelineBuilder.go @@ -250,7 +250,7 @@ type ConfigMapSecretsResponse struct { } func parseMaterialInfo(materialInfo json.RawMessage, source string) (json.RawMessage, error) { - if source != "GOCD" && source != "CI-RUNNER" && source != "EXTERNAL" { + if source != "GOCD" && source != "CI-RUNNER" && source != "EXTERNAL" && source != "PRE_CD" && source != "POST_CD" && source != "POST_CI" && source != "PRE_CI" { return nil, fmt.Errorf("datasource: %s not supported", source) } var ciMaterials []repository.CiMaterialInfo diff --git a/wire_gen.go b/wire_gen.go index 2c9845e17e..b5016612da 100644 --- a/wire_gen.go +++ b/wire_gen.go @@ -491,7 +491,7 @@ func InitializeApp() (*App, error) { } devtronAppCMCSServiceImpl := pipeline.NewDevtronAppCMCSServiceImpl(sugaredLogger, appServiceImpl, attributesRepositoryImpl) cdPipelineConfigServiceImpl := pipeline.NewCdPipelineConfigServiceImpl(sugaredLogger, pipelineRepositoryImpl, environmentRepositoryImpl, pipelineConfigRepositoryImpl, appWorkflowRepositoryImpl, pipelineStageServiceImpl, appRepositoryImpl, appServiceImpl, deploymentGroupRepositoryImpl, ciCdPipelineOrchestratorImpl, appStatusRepositoryImpl, ciPipelineRepositoryImpl, prePostCdScriptHistoryServiceImpl, clusterRepositoryImpl, helmAppServiceImpl, enforcerUtilImpl, gitOpsConfigRepositoryImpl, pipelineStrategyHistoryServiceImpl, chartRepositoryImpl, resourceGroupServiceImpl, chartDeploymentServiceImpl, chartTemplateServiceImpl, propertiesConfigServiceImpl, appLevelMetricsRepositoryImpl, deploymentTemplateHistoryServiceImpl, variableEntityMappingServiceImpl, variableTemplateParserImpl, pipelineDeploymentServiceTypeConfig, applicationServiceClientImpl, devtronAppCMCSServiceImpl, customTagServiceImpl) - appArtifactManagerImpl := pipeline.NewAppArtifactManagerImpl(sugaredLogger, cdWorkflowRepositoryImpl, userServiceImpl, imageTaggingServiceImpl, ciArtifactRepositoryImpl, ciWorkflowRepositoryImpl, pipelineStageServiceImpl, cdPipelineConfigServiceImpl) + appArtifactManagerImpl := pipeline.NewAppArtifactManagerImpl(sugaredLogger, cdWorkflowRepositoryImpl, userServiceImpl, imageTaggingServiceImpl, ciArtifactRepositoryImpl, ciWorkflowRepositoryImpl, pipelineStageServiceImpl, cdPipelineConfigServiceImpl, dockerArtifactStoreRepositoryImpl, ciPipelineRepositoryImpl) globalStrategyMetadataChartRefMappingRepositoryImpl := chartRepoRepository.NewGlobalStrategyMetadataChartRefMappingRepositoryImpl(db, sugaredLogger) devtronAppStrategyServiceImpl := pipeline.NewDevtronAppStrategyServiceImpl(sugaredLogger, chartRepositoryImpl, globalStrategyMetadataChartRefMappingRepositoryImpl, ciCdPipelineOrchestratorImpl, cdPipelineConfigServiceImpl) appDeploymentTypeChangeManagerImpl := pipeline.NewAppDeploymentTypeChangeManagerImpl(sugaredLogger, pipelineRepositoryImpl, workflowDagExecutorImpl, appServiceImpl, chartTemplateServiceImpl, appStatusRepositoryImpl, helmAppServiceImpl, applicationServiceClientImpl, appArtifactManagerImpl, cdPipelineConfigServiceImpl) From 964d135593796b343e2fb8ead13b73994087d776 Mon Sep 17 00:00:00 2001 From: ayushmaheshwari Date: Mon, 30 Oct 2023 19:52:54 +0530 Subject: [PATCH 080/143] hiding credential source type and value from request --- pkg/bean/app.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pkg/bean/app.go b/pkg/bean/app.go index 2a3f1b89e5..ef7411ec35 100644 --- a/pkg/bean/app.go +++ b/pkg/bean/app.go @@ -741,8 +741,8 @@ type CiArtifactBean struct { RegistryType string `json:"registryType"` RegistryName string `json:"registryName"` CiPipelineId int `json:"-"` - CredentialsSourceType string `json:"containerRegistryId"` - CredentialsSourceValue string `json:"credentialsSourceValue"` + CredentialsSourceType string `json:"-"` + CredentialsSourceValue string `json:"-"` } type CiArtifactResponse struct { From 8d2d3fcb400aa11c37dd7ea90319d250408c1697 Mon Sep 17 00:00:00 2001 From: ayushmaheshwari Date: Mon, 30 Oct 2023 20:10:55 +0530 Subject: [PATCH 081/143] adding registry type and name flag --- .../sql/repository/CiArtifactRepository.go | 2 +- pkg/pipeline/AppArtifactManager.go | 40 +++++++++++++++---- 2 files changed, 34 insertions(+), 8 deletions(-) diff --git a/internal/sql/repository/CiArtifactRepository.go b/internal/sql/repository/CiArtifactRepository.go index cf0cdeb797..312e3f030b 100644 --- a/internal/sql/repository/CiArtifactRepository.go +++ b/internal/sql/repository/CiArtifactRepository.go @@ -133,7 +133,7 @@ func (impl CiArtifactRepositoryImpl) GetArtifactParentCiAndWorkflowDetailsByIds( } err := impl.dbConnection.Model(&artifacts). - Column("ci_artifact.ci_workflow_id", "ci_artifact.parent_ci_artifact", "ci_artifact.external_ci_pipeline_id", "ci_artifact.id"). + Column("ci_artifact.ci_workflow_id", "ci_artifact.parent_ci_artifact", "ci_artifact.external_ci_pipeline_id", "ci_artifact.id", "ci_artifact.pipeline_id"). Where("ci_artifact.id in (?)", pg.In(ids)). Select() diff --git a/pkg/pipeline/AppArtifactManager.go b/pkg/pipeline/AppArtifactManager.go index 902f320a53..09a9266c05 100644 --- a/pkg/pipeline/AppArtifactManager.go +++ b/pkg/pipeline/AppArtifactManager.go @@ -146,6 +146,9 @@ func (impl *AppArtifactManagerImpl) BuildArtifactsForCdStage(pipelineId int, sta Latest: latest, Scanned: wfr.CdWorkflow.CiArtifact.Scanned, ScanEnabled: wfr.CdWorkflow.CiArtifact.ScanEnabled, + CiPipelineId: wfr.CdWorkflow.CiArtifact.PipelineId, + CredentialsSourceType: wfr.CdWorkflow.CiArtifact.CredentialsSourceType, + CredentialsSourceValue: wfr.CdWorkflow.CiArtifact.CredentialSourceValue, } if !parent { ciArtifact.Deployed = true @@ -185,12 +188,15 @@ func (impl *AppArtifactManagerImpl) BuildArtifactsForCIParent(cdPipelineId int, impl.logger.Errorw("Error in parsing artifact material info", "err", err, "artifact", artifact) } ciArtifacts = append(ciArtifacts, bean2.CiArtifactBean{ - Id: artifact.Id, - Image: artifact.Image, - ImageDigest: artifact.ImageDigest, - MaterialInfo: mInfo, - ScanEnabled: artifact.ScanEnabled, - Scanned: artifact.Scanned, + Id: artifact.Id, + Image: artifact.Image, + ImageDigest: artifact.ImageDigest, + MaterialInfo: mInfo, + ScanEnabled: artifact.ScanEnabled, + Scanned: artifact.Scanned, + CiPipelineId: artifact.PipelineId, + CredentialsSourceType: artifact.CredentialsSourceType, + CredentialsSourceValue: artifact.CredentialSourceValue, }) } } @@ -480,7 +486,27 @@ func (impl *AppArtifactManagerImpl) RetrieveArtifactsByCDPipeline(pipeline *pipe // if external webhook continue continue } - + var dockerRegistryId string + if artifact.PipelineId != 0 { + ciPipeline, err := impl.CiPipelineRepository.FindById(artifact.PipelineId) + if err != nil { + impl.logger.Errorw("error in fetching ciPipeline", "ciPipelineId", ciPipeline.Id, "error", err) + return nil, err + } + dockerRegistryId = *ciPipeline.CiTemplate.DockerRegistryId + } else { + if artifact.CredentialsSourceType == repository.GLOBAL_CONTAINER_REGISTRY { + dockerRegistryId = artifact.CredentialSourceValue + } + } + if len(dockerRegistryId) > 0 { + dockerArtifact, err := impl.dockerArtifactRegistry.FindOne(dockerRegistryId) + if err != nil { + impl.logger.Errorw("error in getting docker registry details", "err", err, "dockerArtifactStoreId", dockerRegistryId) + } + ciArtifacts[i].RegistryType = string(dockerArtifact.RegistryType) + ciArtifacts[i].RegistryName = dockerRegistryId + } var ciWorkflow *pipelineConfig.CiWorkflow if artifact.ParentCiArtifact != 0 { ciWorkflow, err = impl.ciWorkflowRepository.FindLastTriggeredWorkflowGitTriggersByArtifactId(artifact.ParentCiArtifact) From 8faf24a47d2c1d209af5eab81ac04feb38ba311a Mon Sep 17 00:00:00 2001 From: ayushmaheshwari Date: Tue, 31 Oct 2023 13:35:54 +0530 Subject: [PATCH 082/143] fixes: trigger pre cd --- pkg/pipeline/WorkflowDagExecutor.go | 2 +- pkg/pipeline/pipelineStageVariableParser.go | 11 +++++++---- scripts/sql/182_skopeo_plugin.up.sql | 4 ++-- 3 files changed, 10 insertions(+), 7 deletions(-) diff --git a/pkg/pipeline/WorkflowDagExecutor.go b/pkg/pipeline/WorkflowDagExecutor.go index 12f29eecd8..e5ca13109b 100644 --- a/pkg/pipeline/WorkflowDagExecutor.go +++ b/pkg/pipeline/WorkflowDagExecutor.go @@ -754,7 +754,7 @@ func (impl *WorkflowDagExecutorImpl) TriggerPreStage(ctx context.Context, cdWf * cdStageWorkflowRequest.StageType = PRE // handling plugin specific logic skopeoRefPluginId, err := impl.globalPluginService.GetRefPluginIdByRefPluginName(SKOPEO) - for _, step := range cdStageWorkflowRequest.PreCiSteps { + for _, step := range cdStageWorkflowRequest.PrePostDeploySteps { if skopeoRefPluginId != 0 && step.RefPluginId == skopeoRefPluginId { // for Skopeo plugin parse destination images and save its data in image path reservation table registryDestinationImageMap, registryCredentialMap, err := impl.pluginInputVariableParser.ParseSkopeoPluginInputVariables(step.InputVars, bean3.EntityTypePreCD, strconv.Itoa(pipeline.Id), cdStageWorkflowRequest.CiArtifactDTO.Image, cdStageWorkflowRequest.DockerRegistryId) diff --git a/pkg/pipeline/pipelineStageVariableParser.go b/pkg/pipeline/pipelineStageVariableParser.go index 0bff56e6da..da818298c5 100644 --- a/pkg/pipeline/pipelineStageVariableParser.go +++ b/pkg/pipeline/pipelineStageVariableParser.go @@ -97,18 +97,21 @@ func (impl *PluginInputVariableParserImpl) getRegistryDetailsAndDestinationImage AWSAccessKeyId: registryCredentials.AWSAccessKeyId, } - registryRepoDetails := strings.Split(destinationInfo, "\n") - for _, detail := range registryRepoDetails { + destinationRegistryRepoDetails := strings.Split(destinationInfo, "\n") + for _, detail := range destinationRegistryRepoDetails { registryRepoSplit := strings.Split(detail, "|") registryName := strings.Trim(registryRepoSplit[0], " ") registryCredentials, err := impl.dockerRegistryConfig.FetchOneDockerAccount(registryName) if err != nil { impl.logger.Errorw("error in fetching registry details by registry name", "err", err) + if err == pg.ErrNoRows { + return registryDestinationImageMap, registryCredentialsMap, fmt.Errorf("invalid registry name: registry details not found in global container registries") + } return registryDestinationImageMap, registryCredentialsMap, err } var destinationImages []string - repositoryValues := registryRepoSplit[1] - repositoryValuesSplit := strings.Split(repositoryValues, ",") + destinationRepositoryValues := registryRepoSplit[1] + repositoryValuesSplit := strings.Split(destinationRepositoryValues, ",") for _, repositoryName := range repositoryValuesSplit { repositoryName = strings.Trim(repositoryName, " ") diff --git a/scripts/sql/182_skopeo_plugin.up.sql b/scripts/sql/182_skopeo_plugin.up.sql index 72a5be3a9b..c3f3b13915 100644 --- a/scripts/sql/182_skopeo_plugin.up.sql +++ b/scripts/sql/182_skopeo_plugin.up.sql @@ -11,7 +11,7 @@ INSERT INTO "plugin_stage_mapping" ("plugin_id","stage_type","created_on", "crea VALUES ((SELECT id FROM plugin_metadata WHERE name='Skopeo'),0,'now()', 1, 'now()', 1); INSERT INTO "plugin_pipeline_script" ("id","type","mount_directory_from_host","container_image_path","deleted","created_on", "created_by", "updated_on", "updated_by") -VALUES (nextval('id_seq_plugin_pipeline_script'),'CONTAINER_IMAGE','t','','f','now()',1,'now()',1); +VALUES (nextval('id_seq_plugin_pipeline_script'),'CONTAINER_IMAGE','t','quay.io/devtron/test:ec27cbd0-81-446','f','now()',1,'now()',1); INSERT INTO "plugin_step" ("id", "plugin_id","name","description","index","step_type","script_id","deleted", "created_on", "created_by", "updated_on", "updated_by") VALUES (nextval('id_seq_plugin_step'), (SELECT id FROM plugin_metadata WHERE name='Skopeo'),'Step 1','Step 1 - Copy container images','1','INLINE',(SELECT last_value FROM id_seq_plugin_pipeline_script),'f','now()', 1, 'now()', 1); @@ -26,7 +26,7 @@ INSERT INTO "plugin_step_variable" ("id", "plugin_step_id", "name", "format", "d VALUES (nextval('id_seq_plugin_step_variable'), (SELECT ps.id FROM plugin_metadata p inner JOIN plugin_step ps on ps.plugin_id=p.id WHERE p.name='Skopeo' and ps."index"=1 and ps.deleted=false), 'DOCKER_IMAGE','STRING','',false,true,'INPUT','GLOBAL',1 ,'DOCKER_IMAGE','f','now()', 1, 'now()', 1); INSERT INTO "plugin_step_variable" ("id", "plugin_step_id", "name", "format", "description", "is_exposed", "allow_empty_value","variable_type", "value_type", "variable_step_index",reference_variable_name, "deleted", "created_on", "created_by", "updated_on", "updated_by") -VALUES (nextval('id_seq_plugin_step_variable'), (SELECT ps.id FROM plugin_metadata p inner JOIN plugin_step ps on ps.plugin_id=p.id WHERE p.name='Skopeo' and ps."index"=1 and ps.deleted=false), 'REGISTRY_DESTINATION_IMAGE_MAP','STRING','',false,true,'INPUT','GLOBAL',1 ,'REGISTRY_DESTINATION_IMAGE_MAP','f','now()', 1, 'now()', 1); +VALUES (nextval('id_seq_plugin_step_variable'), (SELECT ps.id FROM plugin_metadata p inner JOIN plugin_step ps on ps.plugin_id=p.id WHERE p.name='Skopeo' and ps."index"=1 and ps.deleted=false), 'REGISTRY_DESTINATION_IMAGE_MAP','STRING','map of registry name and images needed to be copied in that images',false,true,'INPUT','GLOBAL',1 ,'REGISTRY_DESTINATION_IMAGE_MAP','f','now()', 1, 'now()', 1); INSERT INTO "plugin_step_variable" ("id", "plugin_step_id", "name", "format", "description", "is_exposed", "allow_empty_value","variable_type", "value_type", "variable_step_index",reference_variable_name, "deleted", "created_on", "created_by", "updated_on", "updated_by") VALUES (nextval('id_seq_plugin_step_variable'), (SELECT ps.id FROM plugin_metadata p inner JOIN plugin_step ps on ps.plugin_id=p.id WHERE p.name='Skopeo' and ps."index"=1 and ps.deleted=false), 'REGISTRY_CREDENTIALS','STRING','',false,true,'INPUT','GLOBAL',1 ,'REGISTRY_CREDENTIALS','f','now()', 1, 'now()', 1); From b9948a1b0f1c23b4d103c987b81cd230b2abd2d9 Mon Sep 17 00:00:00 2001 From: ayushmaheshwari Date: Tue, 31 Oct 2023 14:07:03 +0530 Subject: [PATCH 083/143] deleting cd custom tag on pipeline delete --- .../DeploymentPipelineConfigService.go | 20 +++++++++++++++++++ 1 file changed, 20 insertions(+) diff --git a/pkg/pipeline/DeploymentPipelineConfigService.go b/pkg/pipeline/DeploymentPipelineConfigService.go index 78e59330fc..884dce3d64 100644 --- a/pkg/pipeline/DeploymentPipelineConfigService.go +++ b/pkg/pipeline/DeploymentPipelineConfigService.go @@ -722,6 +722,26 @@ func (impl *CdPipelineConfigServiceImpl) DeleteCdPipeline(pipeline *pipelineConf return deleteResponse, err } } + if cdPipelinePluginDeleteReq.PreDeployStage != nil { + tag := bean2.CustomTag{ + EntityKey: bean3.EntityTypePreCD, + EntityValue: strconv.Itoa(pipeline.Id), + } + err = impl.customTagService.DeleteCustomTagIfExists(tag) + if err != nil { + impl.logger.Errorw("error in deleting custom tag for pre-cd stage", "Err", err, "cd-pipeline-id", pipeline.Id) + } + } + if cdPipelinePluginDeleteReq.PostDeployStage != nil { + tag := bean2.CustomTag{ + EntityKey: bean3.EntityTypePostCD, + EntityValue: strconv.Itoa(pipeline.Id), + } + err = impl.customTagService.DeleteCustomTagIfExists(tag) + if err != nil { + impl.logger.Errorw("error in deleting custom tag for pre-cd stage", "Err", err, "cd-pipeline-id", pipeline.Id) + } + } //delete app from argo cd, if created if pipeline.DeploymentAppCreated == true { deploymentAppName := fmt.Sprintf("%s-%s", pipeline.App.AppName, pipeline.Environment.Name) From 15cd7bc6169eeb08aa6099b25c2efe7e0d9608da Mon Sep 17 00:00:00 2001 From: Gireesh Naidu Date: Tue, 31 Oct 2023 15:27:13 +0530 Subject: [PATCH 084/143] setting data source value --- pkg/pipeline/AppArtifactManager.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/pkg/pipeline/AppArtifactManager.go b/pkg/pipeline/AppArtifactManager.go index 5282775f8b..23adc0932d 100644 --- a/pkg/pipeline/AppArtifactManager.go +++ b/pkg/pipeline/AppArtifactManager.go @@ -674,6 +674,7 @@ func (impl *AppArtifactManagerImpl) BuildArtifactsList(listingFilterOpts *bean.A DeployedTime: formatDate(latestWf[0].CdWorkflow.CreatedOn, bean2.LayoutRFC3339), Latest: true, CreatedTime: formatDate(currentRunningArtifact.CreatedOn, bean2.LayoutRFC3339), + DataSource: currentRunningArtifact.DataSource, } if currentRunningArtifact.WorkflowId != nil { currentRunningArtifactBean.CiWorkflowId = *currentRunningArtifact.WorkflowId @@ -745,6 +746,7 @@ func (impl *AppArtifactManagerImpl) BuildArtifactsForCdStageV2(listingFilterOpts ExternalCiPipelineId: wfr.CdWorkflow.CiArtifact.ExternalCiPipelineId, ParentCiArtifact: wfr.CdWorkflow.CiArtifact.ParentCiArtifact, CreatedTime: formatDate(wfr.CdWorkflow.CiArtifact.CreatedOn, bean2.LayoutRFC3339), + DataSource: wfr.CdWorkflow.CiArtifact.DataSource, } if wfr.CdWorkflow.CiArtifact.WorkflowId != nil { ciArtifact.CiWorkflowId = *wfr.CdWorkflow.CiArtifact.WorkflowId @@ -783,6 +785,7 @@ func (impl *AppArtifactManagerImpl) BuildArtifactsForCIParentV2(listingFilterOpt ExternalCiPipelineId: artifact.ExternalCiPipelineId, ParentCiArtifact: artifact.ParentCiArtifact, CreatedTime: formatDate(artifact.CreatedOn, bean2.LayoutRFC3339), + DataSource: artifact.DataSource, } if artifact.WorkflowId != nil { ciArtifact.CiWorkflowId = *artifact.WorkflowId From d282ca2f84faa595f506b2d3d7e54fb523e2befb Mon Sep 17 00:00:00 2001 From: Gireesh Naidu Date: Tue, 31 Oct 2023 15:28:24 +0530 Subject: [PATCH 085/143] sql script --- scripts/sql/184_resource_filter_audit_update.down.sql | 1 + scripts/sql/184_resource_filter_audit_update.up.sql | 1 + 2 files changed, 2 insertions(+) create mode 100644 scripts/sql/184_resource_filter_audit_update.down.sql create mode 100644 scripts/sql/184_resource_filter_audit_update.up.sql diff --git a/scripts/sql/184_resource_filter_audit_update.down.sql b/scripts/sql/184_resource_filter_audit_update.down.sql new file mode 100644 index 0000000000..0440043059 --- /dev/null +++ b/scripts/sql/184_resource_filter_audit_update.down.sql @@ -0,0 +1 @@ +ALTER TABLE resource_filter_audit DROP COLUMN filter_name; \ No newline at end of file diff --git a/scripts/sql/184_resource_filter_audit_update.up.sql b/scripts/sql/184_resource_filter_audit_update.up.sql new file mode 100644 index 0000000000..4b8b7ae7aa --- /dev/null +++ b/scripts/sql/184_resource_filter_audit_update.up.sql @@ -0,0 +1 @@ +ALTER TABLE resource_filter_audit ADD COLUMN filter_name varchar(300); \ No newline at end of file From 37071c767592cc7ec5bd3af48730401257b1f475 Mon Sep 17 00:00:00 2001 From: ayushmaheshwari Date: Tue, 31 Oct 2023 16:22:20 +0530 Subject: [PATCH 086/143] artifacts save code fix --- pkg/pipeline/WorkflowDagExecutor.go | 40 ++++++++++++----------------- 1 file changed, 17 insertions(+), 23 deletions(-) diff --git a/pkg/pipeline/WorkflowDagExecutor.go b/pkg/pipeline/WorkflowDagExecutor.go index e5ca13109b..b833f96b85 100644 --- a/pkg/pipeline/WorkflowDagExecutor.go +++ b/pkg/pipeline/WorkflowDagExecutor.go @@ -214,16 +214,16 @@ type CiArtifactDTO struct { } type CdStageCompleteEvent struct { - CiProjectDetails []bean3.CiProjectDetails `json:"ciProjectDetails"` - WorkflowId int `json:"workflowId"` - WorkflowRunnerId int `json:"workflowRunnerId"` - CdPipelineId int `json:"cdPipelineId"` - TriggeredBy int32 `json:"triggeredBy"` - StageYaml string `json:"stageYaml"` - ArtifactLocation string `json:"artifactLocation"` - PipelineName string `json:"pipelineName"` - CiArtifactDTO pipelineConfig.CiArtifactDTO `json:"ciArtifactDTO"` - PluginRegistryImageDetails map[string][]string + CiProjectDetails []bean3.CiProjectDetails `json:"ciProjectDetails"` + WorkflowId int `json:"workflowId"` + WorkflowRunnerId int `json:"workflowRunnerId"` + CdPipelineId int `json:"cdPipelineId"` + TriggeredBy int32 `json:"triggeredBy"` + StageYaml string `json:"stageYaml"` + ArtifactLocation string `json:"artifactLocation"` + PipelineName string `json:"pipelineName"` + CiArtifactDTO pipelineConfig.CiArtifactDTO `json:"ciArtifactDTO"` + PluginRegistryArtifactDetails map[string][]string `json:"PluginRegistryArtifactDetails"` } type GitMetadata struct { @@ -424,7 +424,7 @@ func (impl *WorkflowDagExecutorImpl) Subscribe() error { } } else if wf.WorkflowType == bean.CD_WORKFLOW_TYPE_POST { impl.logger.Debugw("received post stage success event for workflow runner ", "wfId", strconv.Itoa(wf.Id)) - err = impl.HandlePostStageSuccessEvent(wf.CdWorkflowId, cdStageCompleteEvent.CdPipelineId, cdStageCompleteEvent.TriggeredBy, cdStageCompleteEvent.PluginRegistryImageDetails) + err = impl.HandlePostStageSuccessEvent(wf.CdWorkflowId, cdStageCompleteEvent.CdPipelineId, cdStageCompleteEvent.TriggeredBy, cdStageCompleteEvent.PluginRegistryArtifactDetails) if err != nil { impl.logger.Errorw("deployment success event error", "err", err) return @@ -599,14 +599,13 @@ func (impl *WorkflowDagExecutorImpl) HandlePreStageSuccessEvent(cdStageCompleteE if err != nil { return err } - var PreCDArtifacts []*repository.CiArtifact + PreCDArtifacts, err := impl.SavePluginArtifacts(ciArtifact, cdStageCompleteEvent.PluginRegistryArtifactDetails, pipeline.Id, repository.PRE_CD) + if err != nil { + impl.logger.Errorw("error in saving plugin artifacts", "err", err) + return err + } if pipeline.TriggerType == pipelineConfig.TRIGGER_TYPE_AUTOMATIC { - if len(cdStageCompleteEvent.PluginRegistryImageDetails) > 0 { - PreCDArtifacts, err = impl.SavePluginArtifacts(ciArtifact, cdStageCompleteEvent.PluginRegistryImageDetails, pipeline.Id, repository.PRE_CD) - if err != nil { - impl.logger.Errorw("error in saving plugin artifacts", "err", err) - return err - } + if len(cdStageCompleteEvent.PluginRegistryArtifactDetails) > 0 { if len(PreCDArtifacts) > 0 { ciArtifact = PreCDArtifacts[0] // deployment will be trigger with artifact copied by plugin } @@ -651,11 +650,6 @@ func (impl *WorkflowDagExecutorImpl) SavePluginArtifacts(ciArtifact *repository. CDArtifacts = append(CDArtifacts, pluginArtifact) } } - err := impl.ciArtifactRepository.SaveAll(CDArtifacts) - if err != nil { - impl.logger.Errorw("Error in saving artifacts metadata generated by plugin") - return CDArtifacts, err - } return CDArtifacts, nil } From bf4f173136eac4eab9ca4b700d6fa5bf717e6b83 Mon Sep 17 00:00:00 2001 From: ayushmaheshwari Date: Tue, 31 Oct 2023 16:28:08 +0530 Subject: [PATCH 087/143] removing unnecessary information --- pkg/pipeline/WorkflowDagExecutor.go | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/pkg/pipeline/WorkflowDagExecutor.go b/pkg/pipeline/WorkflowDagExecutor.go index b833f96b85..7691cf33e1 100644 --- a/pkg/pipeline/WorkflowDagExecutor.go +++ b/pkg/pipeline/WorkflowDagExecutor.go @@ -605,10 +605,8 @@ func (impl *WorkflowDagExecutorImpl) HandlePreStageSuccessEvent(cdStageCompleteE return err } if pipeline.TriggerType == pipelineConfig.TRIGGER_TYPE_AUTOMATIC { - if len(cdStageCompleteEvent.PluginRegistryArtifactDetails) > 0 { - if len(PreCDArtifacts) > 0 { - ciArtifact = PreCDArtifacts[0] // deployment will be trigger with artifact copied by plugin - } + if len(PreCDArtifacts) > 0 { + ciArtifact = PreCDArtifacts[0] // deployment will be trigger with artifact copied by plugin } cdWorkflow, err := impl.cdWorkflowRepository.FindById(cdStageCompleteEvent.WorkflowId) if err != nil { From dea49baa7cc1251c027772313069478af0059294 Mon Sep 17 00:00:00 2001 From: ayushmaheshwari Date: Wed, 1 Nov 2023 11:40:46 +0530 Subject: [PATCH 088/143] fix image api --- .../pipelineConfig/CdWorfkflowRepository.go | 94 ++++++++++--------- pkg/pipeline/AppArtifactManager.go | 6 +- pkg/pipeline/WorkflowDagExecutor.go | 5 + 3 files changed, 61 insertions(+), 44 deletions(-) diff --git a/internal/sql/repository/pipelineConfig/CdWorfkflowRepository.go b/internal/sql/repository/pipelineConfig/CdWorfkflowRepository.go index 0e82607529..0ec9cf0889 100644 --- a/internal/sql/repository/pipelineConfig/CdWorfkflowRepository.go +++ b/internal/sql/repository/pipelineConfig/CdWorfkflowRepository.go @@ -29,7 +29,6 @@ import ( "github.com/devtron-labs/devtron/internal/util" "github.com/devtron-labs/devtron/pkg/sql" "github.com/go-pg/pg" - "github.com/go-pg/pg/orm" "go.opentelemetry.io/otel" "go.uber.org/zap" "time" @@ -383,49 +382,60 @@ func (impl *CdWorkflowRepositoryImpl) FindCdWorkflowMetaByPipelineId(pipelineId func (impl *CdWorkflowRepositoryImpl) FindArtifactByListFilter(listingFilterOptions *bean.ArtifactsListFilterOptions) ([]repository.CiArtifact, int, error) { var ciArtifacts []repository.CiArtifact - //TODO Gireesh: why are we extracting artifacts which belongs to current pipeline as it will impact page size of response ?? - query := impl.dbConnection.Model(&ciArtifacts). - Column("ci_artifact.*"). - Join("LEFT JOIN cd_workflow ON ci_artifact.id = cd_workflow.ci_artifact_id"). - Join("INNER JOIN cd_workflow_runner ON cd_workflow_runner.cd_workflow_id=cd_workflow.id"). - WhereGroup(func(q *orm.Query) (*orm.Query, error) { - q = q.WhereGroup(func(sq *orm.Query) (*orm.Query, error) { - sq.Where("cd_workflow_runner.id IN (select MAX(cd_workflow_runner.id) OVER (PARTITION BY cd_workflow.ci_artifact_id) FROM cd_workflow_runner inner join cd_workflow on cd_workflow.id=cd_workflow_runner.cd_workflow_id) ") - sq.WhereGroup(func(ssq *orm.Query) (*orm.Query, error) { - ssq.WhereOr(" cd_workflow.pipeline_id = ? AND cd_workflow_runner.workflow_type = ? ", - listingFilterOptions.PipelineId, listingFilterOptions.StageType). - WhereOr("cd_workflow.pipeline_id = ? AND cd_workflow_runner.workflow_type = ? AND cd_workflow_runner.status IN (?)", - listingFilterOptions.ParentId, - listingFilterOptions.ParentStageType, - pg.In([]string{application.Healthy, application.SUCCEEDED})) - return ssq, nil - }) - return sq, nil - }) - q = q.WhereOr("ci_artifact.data_source=? AND ci_artifact.component_id=?", - listingFilterOptions.ParentStageType, - listingFilterOptions.ParentId) - return q, nil - }). - Where("ci_artifact.image LIKE ?", listingFilterOptions.SearchString) - if len(listingFilterOptions.ExcludeArtifactIds) > 0 { - query = query.Where("cd_workflow.ci_artifact_id NOT IN (?)", pg.In(listingFilterOptions.ExcludeArtifactIds)) - } - - totalCount, err := query.Count() - if err == pg.ErrNoRows { - return ciArtifacts, totalCount, err - } - query = query. - Limit(listingFilterOptions.Limit). - Offset(listingFilterOptions.Offset) - - err = query.Select() - if err == pg.ErrNoRows { - return ciArtifacts, totalCount, nil + query := "SELECT ci_artifact.* FROM ci_artifact" + + " LEFT JOIN cd_workflow ON ci_artifact.id = cd_workflow.ci_artifact_id" + + " LEFT JOIN cd_workflow_runner ON cd_workflow_runner.cd_workflow_id=cd_workflow.id " + + " Where (((cd_workflow_runner.id in (select MAX(cd_workflow_runner.id) OVER (PARTITION BY cd_workflow.ci_artifact_id) FROM cd_workflow_runner inner join cd_workflow on cd_workflow.id=cd_workflow_runner.cd_workflow_id))" + + " AND ((cd_workflow.pipeline_id= ? and cd_workflow_runner.workflow_type = ? ) OR (cd_workflow.pipeline_id = ? AND cd_workflow_runner.workflow_type = ? AND cd_workflow_runner.status IN ( ? ) )))" + + " OR (ci_artifact.component_id = ? and ci_artifact.data_source= ? ))" + + " AND (ci_artifact.image LIKE ? )" + if len(listingFilterOptions.ExcludeArtifactIds) > 0 { + query = query + " AND (cd_workflow.ci_artifact_id NOT IN (?) )" + query = query + fmt.Sprintf(" LIMIT %d", listingFilterOptions.Limit) + query = query + fmt.Sprintf(" OFFSET %d", listingFilterOptions.Offset) + _, err := impl.dbConnection.Query(&ciArtifacts, query, + listingFilterOptions.PipelineId, + listingFilterOptions.StageType, + listingFilterOptions.ParentId, + listingFilterOptions.ParentStageType, + pg.In([]string{application.Healthy, application.SUCCEEDED}), + listingFilterOptions.ParentId, + listingFilterOptions.ParentStageType, + listingFilterOptions.SearchString, + pg.In(listingFilterOptions.ExcludeArtifactIds), + ) + if err != nil { + return ciArtifacts, 0, err + } + } else { + query = query + fmt.Sprintf(" LIMIT %d", listingFilterOptions.Limit) + query = query + fmt.Sprintf(" OFFSET %d", listingFilterOptions.Offset) + _, err := impl.dbConnection.Query(&ciArtifacts, query, + listingFilterOptions.PipelineId, + listingFilterOptions.StageType, + listingFilterOptions.ParentId, + listingFilterOptions.ParentStageType, + pg.In([]string{application.Healthy, application.SUCCEEDED}), + listingFilterOptions.ParentId, + "pre_cd", + listingFilterOptions.SearchString, + ) + if err != nil { + return ciArtifacts, 0, err + } } - return ciArtifacts, totalCount, err + // + //totalCount, err := query.Count() + //if err == pg.ErrNoRows { + // return ciArtifacts, totalCount, err + //} + + //err = query.Select() + //if err == pg.ErrNoRows { + // return ciArtifacts, totalCount, nil + //} + return ciArtifacts, 0, nil } func (impl *CdWorkflowRepositoryImpl) FindArtifactByPipelineIdAndRunnerType(pipelineId int, runnerType bean.WorkflowType, limit int) ([]CdWorkflowRunner, error) { diff --git a/pkg/pipeline/AppArtifactManager.go b/pkg/pipeline/AppArtifactManager.go index 09a9266c05..8e4b3676dd 100644 --- a/pkg/pipeline/AppArtifactManager.go +++ b/pkg/pipeline/AppArtifactManager.go @@ -666,14 +666,16 @@ func (impl *AppArtifactManagerImpl) RetrieveArtifactsByCDPipelineV2(pipeline *pi impl.logger.Errorw("error in getting ci_workflow for artifacts", "err", err, "artifact", artifact, "parentStage", parentType, "stage", stage) return ciArtifactsResponse, err } - } else { + } else if artifact.CiWorkflowId != 0 { ciWorkflow, err = impl.ciWorkflowRepository.FindCiWorkflowGitTriggersById(artifact.CiWorkflowId) if err != nil { impl.logger.Errorw("error in getting ci_workflow for artifacts", "err", err, "artifact", artifact, "parentStage", parentType, "stage", stage) return ciArtifactsResponse, err } } - ciArtifacts[i].CiConfigureSourceType = ciWorkflow.GitTriggers[ciWorkflow.CiPipelineId].CiConfigureSourceType + if ciWorkflow != nil { + ciArtifacts[i].CiConfigureSourceType = ciWorkflow.GitTriggers[ciWorkflow.CiPipelineId].CiConfigureSourceType + } } ciArtifactsResponse.CdPipelineId = pipeline.Id diff --git a/pkg/pipeline/WorkflowDagExecutor.go b/pkg/pipeline/WorkflowDagExecutor.go index 7691cf33e1..bd850dcd5c 100644 --- a/pkg/pipeline/WorkflowDagExecutor.go +++ b/pkg/pipeline/WorkflowDagExecutor.go @@ -646,6 +646,11 @@ func (impl *WorkflowDagExecutorImpl) SavePluginArtifacts(ciArtifact *repository. }, } CDArtifacts = append(CDArtifacts, pluginArtifact) + err := impl.ciArtifactRepository.SaveAll(CDArtifacts) + if err != nil { + impl.logger.Errorw("Error in saving artifacts metadata generated by plugin") + return CDArtifacts, err + } } } return CDArtifacts, nil From 9f7aad3e66af79d556b5e3caeba1db6ba24032c4 Mon Sep 17 00:00:00 2001 From: ayushmaheshwari Date: Wed, 1 Nov 2023 12:35:45 +0530 Subject: [PATCH 089/143] adding plugin parent --- api/bean/ValuesOverrideRequest.go | 3 +++ .../sql/repository/pipelineConfig/CdWorfkflowRepository.go | 5 ++--- pkg/pipeline/AppArtifactManager.go | 5 +++++ 3 files changed, 10 insertions(+), 3 deletions(-) diff --git a/api/bean/ValuesOverrideRequest.go b/api/bean/ValuesOverrideRequest.go index f5c2146f77..aeefabb91a 100644 --- a/api/bean/ValuesOverrideRequest.go +++ b/api/bean/ValuesOverrideRequest.go @@ -113,4 +113,7 @@ type ArtifactsListFilterOptions struct { //excludeArtifactIds ExcludeArtifactIds []int + + //pluginStage + PluginStage string } diff --git a/internal/sql/repository/pipelineConfig/CdWorfkflowRepository.go b/internal/sql/repository/pipelineConfig/CdWorfkflowRepository.go index 0ec9cf0889..8df3776f91 100644 --- a/internal/sql/repository/pipelineConfig/CdWorfkflowRepository.go +++ b/internal/sql/repository/pipelineConfig/CdWorfkflowRepository.go @@ -382,7 +382,6 @@ func (impl *CdWorkflowRepositoryImpl) FindCdWorkflowMetaByPipelineId(pipelineId func (impl *CdWorkflowRepositoryImpl) FindArtifactByListFilter(listingFilterOptions *bean.ArtifactsListFilterOptions) ([]repository.CiArtifact, int, error) { var ciArtifacts []repository.CiArtifact - query := "SELECT ci_artifact.* FROM ci_artifact" + " LEFT JOIN cd_workflow ON ci_artifact.id = cd_workflow.ci_artifact_id" + " LEFT JOIN cd_workflow_runner ON cd_workflow_runner.cd_workflow_id=cd_workflow.id " + @@ -401,7 +400,7 @@ func (impl *CdWorkflowRepositoryImpl) FindArtifactByListFilter(listingFilterOpti listingFilterOptions.ParentStageType, pg.In([]string{application.Healthy, application.SUCCEEDED}), listingFilterOptions.ParentId, - listingFilterOptions.ParentStageType, + listingFilterOptions.PluginStage, listingFilterOptions.SearchString, pg.In(listingFilterOptions.ExcludeArtifactIds), ) @@ -418,7 +417,7 @@ func (impl *CdWorkflowRepositoryImpl) FindArtifactByListFilter(listingFilterOpti listingFilterOptions.ParentStageType, pg.In([]string{application.Healthy, application.SUCCEEDED}), listingFilterOptions.ParentId, - "pre_cd", + listingFilterOptions.PluginStage, listingFilterOptions.SearchString, ) if err != nil { diff --git a/pkg/pipeline/AppArtifactManager.go b/pkg/pipeline/AppArtifactManager.go index 8e4b3676dd..38971c9a54 100644 --- a/pkg/pipeline/AppArtifactManager.go +++ b/pkg/pipeline/AppArtifactManager.go @@ -745,6 +745,11 @@ func (impl *AppArtifactManagerImpl) BuildArtifactsList(listingFilterOpts *bean.A return ciArtifacts, 0, "", totalCount, err } } else { + if listingFilterOpts.ParentStageType == PRE { + listingFilterOpts.PluginStage = repository.PRE_CD + } else if listingFilterOpts.ParentStageType == POST { + listingFilterOpts.PluginStage = repository.POST_CD + } ciArtifacts, totalCount, err = impl.BuildArtifactsForCdStageV2(listingFilterOpts) if err != nil { impl.logger.Errorw("error in getting ci artifacts for ci/webhook type parent", "pipelineId", listingFilterOpts.PipelineId, "parentPipelineId", listingFilterOpts.ParentId, "parentStageType", listingFilterOpts.ParentStageType, "currentStageType", listingFilterOpts.StageType) From 686ba66d3a626e4f9fb2fdd7a156c320a0849a3b Mon Sep 17 00:00:00 2001 From: Gireesh Naidu Date: Wed, 1 Nov 2023 17:37:25 +0530 Subject: [PATCH 090/143] queries optimised and created queryBuilder for getting artifacts list --- .../sql/repository/CiArtifactRepository.go | 103 ++++++++---------- .../CiArtifactsListingQueryBuilder.go | 83 ++++++++++++++ .../pipelineConfig/CdWorfkflowRepository.go | 79 +++----------- pkg/pipeline/AppArtifactManager.go | 22 ++-- 4 files changed, 153 insertions(+), 134 deletions(-) create mode 100644 internal/sql/repository/CiArtifactsListingQueryBuilder.go diff --git a/internal/sql/repository/CiArtifactRepository.go b/internal/sql/repository/CiArtifactRepository.go index 9dd9b13ee5..5fe8df0110 100644 --- a/internal/sql/repository/CiArtifactRepository.go +++ b/internal/sql/repository/CiArtifactRepository.go @@ -31,6 +31,15 @@ import ( "go.uber.org/zap" ) +type CiArtifactWithExtraData struct { + CiArtifact + PayloadSchema string + TotalCount int + TriggeredBy int32 + StartedOn time.Time + CdWorkflowRunnerId int +} + type CiArtifact struct { tableName struct{} `sql:"ci_artifact" pg:",discard_unknown_columns"` Id int `sql:"id,pk"` @@ -72,6 +81,7 @@ type CiArtifactRepository interface { GetByIds(ids []int) ([]*CiArtifact, error) GetArtifactByCdWorkflowId(cdWorkflowId int) (artifact *CiArtifact, err error) GetArtifactsByParentCiWorkflowId(parentCiWorkflowId int) ([]string, error) + FetchArtifactsByCdPipelineIdV2(listingFilterOptions bean.ArtifactsListFilterOptions) ([]CiArtifactWithExtraData, int, error) } type CiArtifactRepositoryImpl struct { @@ -241,63 +251,33 @@ func (impl CiArtifactRepositoryImpl) GetArtifactsByCDPipeline(cdPipelineId, limi } func (impl CiArtifactRepositoryImpl) GetArtifactsByCDPipelineV3(listingFilterOpts *bean.ArtifactsListFilterOptions) ([]*CiArtifact, int, error) { - artifacts := make([]*CiArtifact, 0, listingFilterOpts.Limit) - totalCount := 0 - commonPaginatedQueryPart := " cia.image LIKE ?" - orderByClause := " ORDER BY cia.id DESC" - limitOffsetQueryPart := - " LIMIT ? OFFSET ?;" - if listingFilterOpts.ParentStageType == bean.CI_WORKFLOW_TYPE { - selectQuery := " SELECT cia.* " - remainingQuery := " FROM ci_artifact cia" + - " INNER JOIN ci_pipeline cp ON cp.id=cia.pipeline_id" + - " INNER JOIN pipeline p ON p.ci_pipeline_id = cp.id and p.id=?" + - " WHERE " - if len(listingFilterOpts.ExcludeArtifactIds) > 0 { - remainingQuery += fmt.Sprintf(" cia.id NOT IN (%s) AND ", helper.GetCommaSepratedString(listingFilterOpts.ExcludeArtifactIds)) - } - countQuery := " SELECT count(cia.id) " - totalCountQuery := countQuery + remainingQuery + commonPaginatedQueryPart - _, err := impl.dbConnection.Query(&totalCount, totalCountQuery, listingFilterOpts.PipelineId, listingFilterOpts.SearchString) - if err != nil { - return artifacts, totalCount, err - } - - finalQuery := selectQuery + remainingQuery + commonPaginatedQueryPart + orderByClause + limitOffsetQueryPart - _, err = impl.dbConnection.Query(&artifacts, finalQuery, listingFilterOpts.PipelineId, listingFilterOpts.SearchString, listingFilterOpts.Limit, listingFilterOpts.Offset) - if err != nil { - return artifacts, totalCount, err - } - - } else if listingFilterOpts.ParentStageType == bean.WEBHOOK_WORKFLOW_TYPE { - selectQuery := " SELECT cia.* " - remainingQuery := " FROM ci_artifact cia " + - " WHERE cia.external_ci_pipeline_id = ? AND " - if len(listingFilterOpts.ExcludeArtifactIds) > 0 { - remainingQuery += fmt.Sprintf(" cia.id NOT IN (%s) AND ", helper.GetCommaSepratedString(listingFilterOpts.ExcludeArtifactIds)) - } - - countQuery := " SELECT count(cia.id) " - totalCountQuery := countQuery + remainingQuery + commonPaginatedQueryPart - _, err := impl.dbConnection.Query(&totalCount, totalCountQuery, listingFilterOpts.PipelineId, listingFilterOpts.SearchString) - if err != nil { - return artifacts, totalCount, err - } - - finalQuery := selectQuery + remainingQuery + commonPaginatedQueryPart + orderByClause + limitOffsetQueryPart + if listingFilterOpts.ParentStageType != bean.CI_WORKFLOW_TYPE && listingFilterOpts.ParentStageType != bean.WEBHOOK_WORKFLOW_TYPE { + return nil, 0, nil + } - _, err = impl.dbConnection.Query(&artifacts, finalQuery, listingFilterOpts.ParentId, listingFilterOpts.SearchString, listingFilterOpts.Limit, listingFilterOpts.Offset) - if err != nil { - return artifacts, totalCount, err - } - } else { - return artifacts, totalCount, nil + artifactsResp := make([]*CiArtifactWithExtraData, 0, listingFilterOpts.Limit) + var artifacts []*CiArtifact + totalCount := 0 + finalQuery := BuildQueryForParentTypeCIOrWebhook(*listingFilterOpts) + _, err := impl.dbConnection.Query(&artifactsResp, finalQuery) + if err != nil { + return nil, totalCount, err + } + artifacts = make([]*CiArtifact, len(artifactsResp)) + for i, _ := range artifactsResp { + artifacts[i] = &artifactsResp[i].CiArtifact + totalCount = artifactsResp[i].TotalCount } if len(artifacts) == 0 { return artifacts, totalCount, nil } + artifacts, err = impl.setDeployedDataInArtifacts(artifacts) + return artifacts, totalCount, err +} + +func (impl CiArtifactRepositoryImpl) setDeployedDataInArtifacts(artifacts []*CiArtifact) ([]*CiArtifact, error) { //processing artifactsMap := make(map[int]*CiArtifact) artifactsIds := make([]int, 0, len(artifacts)) @@ -308,7 +288,6 @@ func (impl CiArtifactRepositoryImpl) GetArtifactsByCDPipelineV3(listingFilterOpt //(this will fetch all the artifacts that were deployed on the given pipeline atleast once in new->old deployed order) artifactsDeployed := make([]*CiArtifact, 0, len(artifactsIds)) - //TODO Gireesh: compare this query plan with cd_workflow & cd_workflow_runner join query Plan, since pco is heavy query := " SELECT cia.id,pco.created_on AS created_on " + " FROM ci_artifact cia" + " INNER JOIN pipeline_config_override pco ON pco.ci_artifact_id=cia.id" + @@ -318,7 +297,7 @@ func (impl CiArtifactRepositoryImpl) GetArtifactsByCDPipelineV3(listingFilterOpt _, err := impl.dbConnection.Query(&artifactsDeployed, query, pg.In(artifactsIds)) if err != nil { - return artifacts, totalCount, nil + return artifacts, nil } //set deployed time and latest deployed artifact @@ -330,10 +309,7 @@ func (impl CiArtifactRepositoryImpl) GetArtifactsByCDPipelineV3(listingFilterOpt } } - //TODO Gireesh: create separate meaningful functions of these queries - - return artifacts, totalCount, nil - + return artifacts, nil } func (impl CiArtifactRepositoryImpl) GetLatestArtifactTimeByCiPipelineIds(ciPipelineIds []int) ([]*CiArtifact, error) { @@ -678,3 +654,18 @@ func (impl CiArtifactRepositoryImpl) GetArtifactsByParentCiWorkflowId(parentCiWo } return artifacts, err } + +func (impl CiArtifactRepositoryImpl) FetchArtifactsByCdPipelineIdV2(listingFilterOptions bean.ArtifactsListFilterOptions) ([]CiArtifactWithExtraData, int, error) { + var wfrList []CiArtifactWithExtraData + totalCount := 0 + finalQuery := BuildQueryForArtifactsForRollback(listingFilterOptions) + _, err := impl.dbConnection.Query(&wfrList, finalQuery) + if err != nil && err != pg.ErrNoRows { + impl.logger.Errorw("error in getting Wfrs and ci artifacts by pipelineId", "err", err, "pipelineId", listingFilterOptions.PipelineId) + return nil, totalCount, err + } + if len(wfrList) > 0 { + totalCount = wfrList[0].TotalCount + } + return wfrList, totalCount, nil +} diff --git a/internal/sql/repository/CiArtifactsListingQueryBuilder.go b/internal/sql/repository/CiArtifactsListingQueryBuilder.go new file mode 100644 index 0000000000..1f7d000492 --- /dev/null +++ b/internal/sql/repository/CiArtifactsListingQueryBuilder.go @@ -0,0 +1,83 @@ +package repository + +import ( + "fmt" + "github.com/devtron-labs/devtron/api/bean" + "github.com/devtron-labs/devtron/internal/sql/repository/helper" + "github.com/go-pg/pg" +) + +func BuildQueryForParentTypeCIOrWebhook(listingFilterOpts bean.ArtifactsListFilterOptions) string { + commonPaginatedQueryPart := fmt.Sprintf(" cia.image LIKE '%v'", listingFilterOpts.SearchString) + orderByClause := " ORDER BY cia.id DESC" + limitOffsetQueryPart := fmt.Sprintf(" LIMIT %v OFFSET %v", listingFilterOpts.Limit, listingFilterOpts.Offset) + finalQuery := "" + if listingFilterOpts.ParentStageType == bean.CI_WORKFLOW_TYPE { + selectQuery := " SELECT cia.* " + remainingQuery := " FROM ci_artifact cia" + + " INNER JOIN ci_pipeline cp ON cp.id=cia.pipeline_id" + + " INNER JOIN pipeline p ON p.ci_pipeline_id = cp.id and p.id=%v" + + " WHERE " + remainingQuery = fmt.Sprintf(remainingQuery, listingFilterOpts.PipelineId) + if len(listingFilterOpts.ExcludeArtifactIds) > 0 { + remainingQuery += fmt.Sprintf("cia.id NOT IN (%s) AND ", helper.GetCommaSepratedString(listingFilterOpts.ExcludeArtifactIds)) + } + + countQuery := " SELECT count(cia.id) as total_count" + totalCountQuery := countQuery + remainingQuery + commonPaginatedQueryPart + selectQuery = fmt.Sprintf("%s,(%s) ", selectQuery, totalCountQuery) + finalQuery = selectQuery + remainingQuery + commonPaginatedQueryPart + orderByClause + limitOffsetQueryPart + } else if listingFilterOpts.ParentStageType == bean.WEBHOOK_WORKFLOW_TYPE { + selectQuery := " SELECT cia.* " + remainingQuery := " FROM ci_artifact cia " + + " WHERE cia.external_ci_pipeline_id = %v AND " + remainingQuery = fmt.Sprintf(remainingQuery, listingFilterOpts.ParentId) + if len(listingFilterOpts.ExcludeArtifactIds) > 0 { + remainingQuery += fmt.Sprintf("cia.id NOT IN (%s) AND ", helper.GetCommaSepratedString(listingFilterOpts.ExcludeArtifactIds)) + } + + countQuery := " SELECT count(cia.id) as total_count" + totalCountQuery := countQuery + remainingQuery + commonPaginatedQueryPart + selectQuery = fmt.Sprintf("%s,(%s) ", selectQuery, totalCountQuery) + finalQuery = selectQuery + remainingQuery + commonPaginatedQueryPart + orderByClause + limitOffsetQueryPart + + } + return finalQuery +} + +func BuildQueryForArtifactsForCdStage(listingFilterOptions bean.ArtifactsListFilterOptions) string { + commonQuery := " FROM cd_workflow_runner " + + " INNER JOIN cd_workflow ON cd_workflow.id=cd_workflow_runner.cd_workflow_id " + + " INNER JOIN ci_artifact cia ON cia.id = cd_workflow.ci_artifact_id " + + " WHERE (cd_workflow.pipeline_id = %v AND cd_workflow_runner.workflow_type = '%v') " + + " OR (cd_workflow.pipeline_id = %v AND cd_workflow_runner.workflow_type = '%v' AND cd_workflow_runner.status IN ('Healthy','Succeeded'))" + + " AND cia.image LIKE '%v' " + commonQuery = fmt.Sprintf(commonQuery, listingFilterOptions.PipelineId, listingFilterOptions.StageType, listingFilterOptions.ParentId, listingFilterOptions.ParentStageType, listingFilterOptions.SearchString) + if len(listingFilterOptions.ExcludeArtifactIds) > 0 { + commonQuery = commonQuery + fmt.Sprintf(" AND cd_workflow.ci_artifact_id NOT IN (%v)", helper.GetCommaSepratedString(listingFilterOptions.ExcludeArtifactIds)) + } + + totalCountQuery := "SELECT COUNT(DISTINCT ci_artifact_id) as total_count " + commonQuery + selectQuery := fmt.Sprintf("SELECT MAX(cd_workflow_runner.id) AS id, (%v) ", totalCountQuery) + GroupByQuery := " GROUP BY cd_workflow.ci_artifact_id " + limitOffSetQuery := fmt.Sprintf(" LIMIT %v OFFSET %v", listingFilterOptions.Limit, listingFilterOptions.Offset) + + finalQuery := selectQuery + commonQuery + GroupByQuery + limitOffSetQuery + return finalQuery +} + +func BuildQueryForArtifactsForRollback(listingFilterOptions bean.ArtifactsListFilterOptions) string { + commonQuery := " FROM cd_workflow_runner cdwr " + + " INNER JOIN cd_workflow cdw ON cdw.id=cdwr.cd_workflow_id " + + " INNER JOIN ci_artifact cia ON cia.id=cdw.ci_artifact_id " + + " WHERE cdw.pipeline_id=%v AND cdwr.workflow_type = '%v' AND cia.image LIKE '%v'" + commonQuery = fmt.Sprintf(commonQuery, listingFilterOptions.PipelineId, listingFilterOptions.StageType, listingFilterOptions.SearchString) + if len(listingFilterOptions.ExcludeArtifactIds) > 0 { + commonQuery = fmt.Sprintf(" %s AND cd_workflow__ci_artifact.id NOT IN (%s)", commonQuery, pg.In(listingFilterOptions.ExcludeArtifactIds)) + } + totalCountQuery := " SELECT COUNT(cia.id) as total_count " + commonQuery + orderByQuery := " ORDER BY cdwr.id DESC " + limitOffsetQuery := fmt.Sprintf("LIMIT %v OFFSET %v", listingFilterOptions.Limit, listingFilterOptions.Offset) + finalQuery := fmt.Sprintf(" SELECT cdwr.id as cd_workflow_runner_id,cdwr.triggered_by,cdwr.started_on,cia.*,(%s) ", totalCountQuery) + commonQuery + orderByQuery + limitOffsetQuery + return finalQuery +} diff --git a/internal/sql/repository/pipelineConfig/CdWorfkflowRepository.go b/internal/sql/repository/pipelineConfig/CdWorfkflowRepository.go index 18e56c0629..3ed5346154 100644 --- a/internal/sql/repository/pipelineConfig/CdWorfkflowRepository.go +++ b/internal/sql/repository/pipelineConfig/CdWorfkflowRepository.go @@ -70,8 +70,6 @@ type CdWorkflowRepository interface { ExistsByStatus(status string) (bool, error) FetchArtifactsByCdPipelineId(pipelineId int, runnerType bean.WorkflowType, offset, limit int, searchString string) ([]CdWorkflowRunner, error) - FetchArtifactsByCdPipelineIdV2(listingFilterOptions bean.ArtifactsListFilterOptions) ([]CdWorkflowRunner, int, error) - GetLatestTriggersOfHelmPipelinesStuckInNonTerminalStatuses(getPipelineDeployedWithinHours int) ([]*CdWorkflowRunner, error) } @@ -145,6 +143,11 @@ type WorkflowExecutorType string const WORKFLOW_EXECUTOR_TYPE_AWF = "AWF" const WORKFLOW_EXECUTOR_TYPE_SYSTEM = "SYSTEM" +type CdWorkflowRunnerWithExtraFields struct { + CdWorkflowRunner + TotalCount int +} + type CdWorkflowRunner struct { tableName struct{} `sql:"cd_workflow_runner" pg:",discard_unknown_columns"` Id int `sql:"id,pk"` @@ -378,44 +381,22 @@ func (impl *CdWorkflowRepositoryImpl) FindCdWorkflowMetaByPipelineId(pipelineId } return wfrList, err } -func (impl *CdWorkflowRepositoryImpl) FindArtifactByListFilter(listingFilterOptions *bean.ArtifactsListFilterOptions) ([]CdWorkflowRunner, int, error) { - var wfrList []CdWorkflowRunner - //TODO Gireesh: why are we extracting artifacts which belongs to current pipeline as it will impact page size of response ?? - query := impl.dbConnection.Model(&wfrList). - ColumnExpr("MAX(cd_workflow_runner.id) AS id"). - Join("INNER JOIN cd_workflow ON cd_workflow.id=cd_workflow_runner.cd_workflow_id"). - Join("INNER JOIN ci_artifact cia ON cia.id = cd_workflow.ci_artifact_id"). - Where("(cd_workflow.pipeline_id = ? AND cd_workflow_runner.workflow_type = ?) "+ - "OR (cd_workflow.pipeline_id = ? AND cd_workflow_runner.workflow_type = ? AND cd_workflow_runner.status IN (?))", - listingFilterOptions.PipelineId, - listingFilterOptions.StageType, - listingFilterOptions.ParentId, - listingFilterOptions.ParentStageType, - pg.In([]string{application.Healthy, application.SUCCEEDED})). - Where("cia.image LIKE ?", listingFilterOptions.SearchString) - if len(listingFilterOptions.ExcludeArtifactIds) > 0 { - query = query.Where("cd_workflow.ci_artifact_id NOT IN (?)", pg.In(listingFilterOptions.ExcludeArtifactIds)) - } - query = query.Group("cd_workflow.ci_artifact_id") - totalCount, err := query.Count() - if err == pg.ErrNoRows { - return wfrList, totalCount, err - } - - query = query. - Limit(listingFilterOptions.Limit). - Offset(listingFilterOptions.Offset) +func (impl *CdWorkflowRepositoryImpl) FindArtifactByListFilter(listingFilterOptions *bean.ArtifactsListFilterOptions) ([]CdWorkflowRunner, int, error) { - err = query.Select() - if err == pg.ErrNoRows || len(wfrList) == 0 { + var wfrListResp []CdWorkflowRunnerWithExtraFields + var wfrList []CdWorkflowRunner + totalCount := 0 + finalQuery := repository.BuildQueryForArtifactsForCdStage(*listingFilterOptions) + _, err := impl.dbConnection.Query(&wfrListResp, finalQuery) + if err == pg.ErrNoRows || len(wfrListResp) == 0 { return wfrList, totalCount, nil } - wfIds := make([]int, len(wfrList)) - for i, wf := range wfrList { + wfIds := make([]int, len(wfrListResp)) + for i, wf := range wfrListResp { wfIds[i] = wf.Id + totalCount = wf.TotalCount } - wfrList = make([]CdWorkflowRunner, 0) err = impl.dbConnection. Model(&wfrList). @@ -636,36 +617,6 @@ func (impl *CdWorkflowRepositoryImpl) FetchArtifactsByCdPipelineId(pipelineId in return wfrList, err } -func (impl *CdWorkflowRepositoryImpl) FetchArtifactsByCdPipelineIdV2(listingFilterOptions bean.ArtifactsListFilterOptions) ([]CdWorkflowRunner, int, error) { - var wfrList []CdWorkflowRunner - query := impl.dbConnection. - Model(&wfrList). - Column("cd_workflow_runner.*", "CdWorkflow", "CdWorkflow.Pipeline", "CdWorkflow.CiArtifact"). - Where("cd_workflow.pipeline_id = ?", listingFilterOptions.PipelineId). - Where("cd_workflow_runner.workflow_type = ?", listingFilterOptions.StageType). - Where("cd_workflow__ci_artifact.image LIKE ?", listingFilterOptions.SearchString) - - if len(listingFilterOptions.ExcludeArtifactIds) > 0 { - query = query.Where("cd_workflow__ci_artifact.id NOT IN (?)", pg.In(listingFilterOptions.ExcludeArtifactIds)) - } - totalCount, err := query.Count() - if err != nil && err != pg.ErrNoRows { - impl.logger.Errorw("error in getting Wfrs count and ci artifacts by pipelineId", "err", err, "pipelineId", listingFilterOptions.PipelineId) - return nil, totalCount, err - } - - query = query.Order("cd_workflow_runner.id DESC"). - Limit(listingFilterOptions.Limit). - Offset(listingFilterOptions.Offset) - - err = query.Select() - if err != nil && err != pg.ErrNoRows { - impl.logger.Errorw("error in getting Wfrs and ci artifacts by pipelineId", "err", err, "pipelineId", listingFilterOptions.PipelineId) - return nil, totalCount, err - } - return wfrList, totalCount, nil -} - func (impl *CdWorkflowRepositoryImpl) GetLatestTriggersOfHelmPipelinesStuckInNonTerminalStatuses(getPipelineDeployedWithinHours int) ([]*CdWorkflowRunner, error) { var wfrList []*CdWorkflowRunner err := impl.dbConnection. diff --git a/pkg/pipeline/AppArtifactManager.go b/pkg/pipeline/AppArtifactManager.go index 23adc0932d..e7d46a2484 100644 --- a/pkg/pipeline/AppArtifactManager.go +++ b/pkg/pipeline/AppArtifactManager.go @@ -313,19 +313,20 @@ func (impl *AppArtifactManagerImpl) FetchArtifactForRollbackV2(cdPipelineId, app func (impl *AppArtifactManagerImpl) BuildRollbackArtifactsList(artifactListingFilterOpts bean.ArtifactsListFilterOptions) ([]bean2.CiArtifactBean, []int, int, error) { var deployedCiArtifacts []bean2.CiArtifactBean + totalCount := 0 //1)get current deployed artifact on this pipeline latestWf, err := impl.cdWorkflowRepository.FindArtifactByPipelineIdAndRunnerType(artifactListingFilterOpts.PipelineId, artifactListingFilterOpts.StageType, 1) if err != nil && err != pg.ErrNoRows { impl.logger.Errorw("error in getting latest workflow by pipelineId", "pipelineId", artifactListingFilterOpts.PipelineId, "currentStageType", artifactListingFilterOpts.StageType) - return deployedCiArtifacts, nil, 0, err + return deployedCiArtifacts, nil, totalCount, err } if len(latestWf) > 0 { //we should never show current deployed artifact in rollback API artifactListingFilterOpts.ExcludeArtifactIds = []int{latestWf[0].CdWorkflow.CiArtifactId} } - cdWfrs, totalCount, err := impl.cdWorkflowRepository.FetchArtifactsByCdPipelineIdV2(artifactListingFilterOpts) + ciArtifacts, totalCount, err := impl.ciArtifactRepository.FetchArtifactsByCdPipelineIdV2(artifactListingFilterOpts) if err != nil { impl.logger.Errorw("error in getting artifacts for rollback by cdPipelineId", "err", err, "cdPipelineId", artifactListingFilterOpts.PipelineId) @@ -333,7 +334,7 @@ func (impl *AppArtifactManagerImpl) BuildRollbackArtifactsList(artifactListingFi } var ids []int32 - for _, item := range cdWfrs { + for _, item := range ciArtifacts { ids = append(ids, item.TriggeredBy) } @@ -348,26 +349,19 @@ func (impl *AppArtifactManagerImpl) BuildRollbackArtifactsList(artifactListingFi artifactIds := make([]int, 0) - for _, cdWfr := range cdWfrs { - ciArtifact := &repository.CiArtifact{} - if cdWfr.CdWorkflow != nil && cdWfr.CdWorkflow.CiArtifact != nil { - ciArtifact = cdWfr.CdWorkflow.CiArtifact - } - if ciArtifact == nil { - continue - } + for _, ciArtifact := range ciArtifacts { mInfo, err := parseMaterialInfo([]byte(ciArtifact.MaterialInfo), ciArtifact.DataSource) if err != nil { mInfo = []byte("[]") impl.logger.Errorw("error in parsing ciArtifact material info", "err", err, "ciArtifact", ciArtifact) } - userEmail := userEmails[cdWfr.TriggeredBy] + userEmail := userEmails[ciArtifact.TriggeredBy] deployedCiArtifacts = append(deployedCiArtifacts, bean2.CiArtifactBean{ Id: ciArtifact.Id, Image: ciArtifact.Image, MaterialInfo: mInfo, - DeployedTime: formatDate(cdWfr.StartedOn, bean2.LayoutRFC3339), - WfrId: cdWfr.Id, + DeployedTime: formatDate(ciArtifact.StartedOn, bean2.LayoutRFC3339), + WfrId: ciArtifact.CdWorkflowRunnerId, DeployedBy: userEmail, }) artifactIds = append(artifactIds, ciArtifact.Id) From f5a9425d571cb7234ebb356c40e18641e90cddbd Mon Sep 17 00:00:00 2001 From: Gireesh Naidu Date: Thu, 2 Nov 2023 02:56:17 +0530 Subject: [PATCH 091/143] sync oss code --- .../sql/repository/CiArtifactRepository.go | 28 ++++++++++++++++ .../CiArtifactsListingQueryBuilder.go | 4 +-- .../pipelineConfig/CdWorfkflowRepository.go | 29 ----------------- pkg/pipeline/AppArtifactManager.go | 32 +++++++++---------- 4 files changed, 45 insertions(+), 48 deletions(-) diff --git a/internal/sql/repository/CiArtifactRepository.go b/internal/sql/repository/CiArtifactRepository.go index 5fe8df0110..d670f5ee29 100644 --- a/internal/sql/repository/CiArtifactRepository.go +++ b/internal/sql/repository/CiArtifactRepository.go @@ -82,6 +82,7 @@ type CiArtifactRepository interface { GetArtifactByCdWorkflowId(cdWorkflowId int) (artifact *CiArtifact, err error) GetArtifactsByParentCiWorkflowId(parentCiWorkflowId int) ([]string, error) FetchArtifactsByCdPipelineIdV2(listingFilterOptions bean.ArtifactsListFilterOptions) ([]CiArtifactWithExtraData, int, error) + FindArtifactByListFilter(listingFilterOptions *bean.ArtifactsListFilterOptions) ([]CiArtifact, int, error) } type CiArtifactRepositoryImpl struct { @@ -655,6 +656,33 @@ func (impl CiArtifactRepositoryImpl) GetArtifactsByParentCiWorkflowId(parentCiWo return artifacts, err } +func (impl CiArtifactRepositoryImpl) FindArtifactByListFilter(listingFilterOptions *bean.ArtifactsListFilterOptions) ([]CiArtifact, int, error) { + + var ciArtifactsResp []CiArtifactWithExtraData + var ciArtifacts []CiArtifact + totalCount := 0 + finalQuery := BuildQueryForArtifactsForCdStage(*listingFilterOptions) + _, err := impl.dbConnection.Query(&ciArtifactsResp, finalQuery) + if err == pg.ErrNoRows || len(ciArtifactsResp) == 0 { + return ciArtifacts, totalCount, nil + } + artifactIds := make([]int, len(ciArtifactsResp)) + for i, af := range ciArtifactsResp { + artifactIds[i] = af.Id + totalCount = af.TotalCount + } + + err = impl.dbConnection. + Model(&ciArtifacts). + Where("id IN (?) ", pg.In(artifactIds)). + Select() + + if err == pg.ErrNoRows { + return ciArtifacts, totalCount, nil + } + return ciArtifacts, totalCount, err +} + func (impl CiArtifactRepositoryImpl) FetchArtifactsByCdPipelineIdV2(listingFilterOptions bean.ArtifactsListFilterOptions) ([]CiArtifactWithExtraData, int, error) { var wfrList []CiArtifactWithExtraData totalCount := 0 diff --git a/internal/sql/repository/CiArtifactsListingQueryBuilder.go b/internal/sql/repository/CiArtifactsListingQueryBuilder.go index 1f7d000492..ba0604232b 100644 --- a/internal/sql/repository/CiArtifactsListingQueryBuilder.go +++ b/internal/sql/repository/CiArtifactsListingQueryBuilder.go @@ -58,8 +58,8 @@ func BuildQueryForArtifactsForCdStage(listingFilterOptions bean.ArtifactsListFil } totalCountQuery := "SELECT COUNT(DISTINCT ci_artifact_id) as total_count " + commonQuery - selectQuery := fmt.Sprintf("SELECT MAX(cd_workflow_runner.id) AS id, (%v) ", totalCountQuery) - GroupByQuery := " GROUP BY cd_workflow.ci_artifact_id " + selectQuery := fmt.Sprintf("SELECT cia.id , (%v) ", totalCountQuery) + GroupByQuery := " GROUP BY cia.id " limitOffSetQuery := fmt.Sprintf(" LIMIT %v OFFSET %v", listingFilterOptions.Limit, listingFilterOptions.Offset) finalQuery := selectQuery + commonQuery + GroupByQuery + limitOffSetQuery diff --git a/internal/sql/repository/pipelineConfig/CdWorfkflowRepository.go b/internal/sql/repository/pipelineConfig/CdWorfkflowRepository.go index 3ed5346154..95e85e377c 100644 --- a/internal/sql/repository/pipelineConfig/CdWorfkflowRepository.go +++ b/internal/sql/repository/pipelineConfig/CdWorfkflowRepository.go @@ -41,7 +41,6 @@ type CdWorkflowRepository interface { FindCdWorkflowMetaByEnvironmentId(appId int, environmentId int, offset int, size int) ([]CdWorkflowRunner, error) FindCdWorkflowMetaByPipelineId(pipelineId int, offset int, size int) ([]CdWorkflowRunner, error) FindArtifactByPipelineIdAndRunnerType(pipelineId int, runnerType bean.WorkflowType, limit int) ([]CdWorkflowRunner, error) - FindArtifactByListFilter(listingFilterOptions *bean.ArtifactsListFilterOptions) ([]CdWorkflowRunner, int, error) SaveWorkFlowRunner(wfr *CdWorkflowRunner) (*CdWorkflowRunner, error) UpdateWorkFlowRunner(wfr *CdWorkflowRunner) error UpdateWorkFlowRunnersWithTxn(wfrs []*CdWorkflowRunner, tx *pg.Tx) error @@ -382,34 +381,6 @@ func (impl *CdWorkflowRepositoryImpl) FindCdWorkflowMetaByPipelineId(pipelineId return wfrList, err } -func (impl *CdWorkflowRepositoryImpl) FindArtifactByListFilter(listingFilterOptions *bean.ArtifactsListFilterOptions) ([]CdWorkflowRunner, int, error) { - - var wfrListResp []CdWorkflowRunnerWithExtraFields - var wfrList []CdWorkflowRunner - totalCount := 0 - finalQuery := repository.BuildQueryForArtifactsForCdStage(*listingFilterOptions) - _, err := impl.dbConnection.Query(&wfrListResp, finalQuery) - if err == pg.ErrNoRows || len(wfrListResp) == 0 { - return wfrList, totalCount, nil - } - wfIds := make([]int, len(wfrListResp)) - for i, wf := range wfrListResp { - wfIds[i] = wf.Id - totalCount = wf.TotalCount - } - - err = impl.dbConnection. - Model(&wfrList). - Column("cd_workflow_runner.*", "CdWorkflow", "CdWorkflow.Pipeline", "CdWorkflow.CiArtifact"). - Where("cd_workflow_runner.id IN (?) ", pg.In(wfIds)). - Select() - - if err == pg.ErrNoRows { - return wfrList, totalCount, nil - } - return wfrList, totalCount, err -} - func (impl *CdWorkflowRepositoryImpl) FindArtifactByPipelineIdAndRunnerType(pipelineId int, runnerType bean.WorkflowType, limit int) ([]CdWorkflowRunner, error) { var wfrList []CdWorkflowRunner err := impl.dbConnection. diff --git a/pkg/pipeline/AppArtifactManager.go b/pkg/pipeline/AppArtifactManager.go index e7d46a2484..b61f99e08c 100644 --- a/pkg/pipeline/AppArtifactManager.go +++ b/pkg/pipeline/AppArtifactManager.go @@ -701,12 +701,11 @@ func (impl *AppArtifactManagerImpl) BuildArtifactsList(listingFilterOpts *bean.A } func (impl *AppArtifactManagerImpl) BuildArtifactsForCdStageV2(listingFilterOpts *bean.ArtifactsListFilterOptions) ([]*bean2.CiArtifactBean, int, error) { - cdWfrList, totalCount, err := impl.cdWorkflowRepository.FindArtifactByListFilter(listingFilterOpts) + cdWfrList, totalCount, err := impl.ciArtifactRepository.FindArtifactByListFilter(listingFilterOpts) if err != nil { impl.logger.Errorw("error in fetching cd workflow runners using filter", "filterOptions", listingFilterOpts, "err", err) return nil, totalCount, err } - //TODO Gireesh: initialized array with size but are using append, not optimized solution ciArtifacts := make([]*bean2.CiArtifactBean, 0, len(cdWfrList)) //get artifact running on parent cd @@ -722,28 +721,27 @@ func (impl *AppArtifactManagerImpl) BuildArtifactsForCdStageV2(listingFilterOpts } for _, wfr := range cdWfrList { - //TODO Gireesh: Refactoring needed - mInfo, err := parseMaterialInfo([]byte(wfr.CdWorkflow.CiArtifact.MaterialInfo), wfr.CdWorkflow.CiArtifact.DataSource) + mInfo, err := parseMaterialInfo([]byte(wfr.MaterialInfo), wfr.DataSource) if err != nil { mInfo = []byte("[]") impl.logger.Errorw("Error in parsing artifact material info", "err", err) } ciArtifact := &bean2.CiArtifactBean{ - Id: wfr.CdWorkflow.CiArtifact.Id, - Image: wfr.CdWorkflow.CiArtifact.Image, - ImageDigest: wfr.CdWorkflow.CiArtifact.ImageDigest, + Id: wfr.Id, + Image: wfr.Image, + ImageDigest: wfr.ImageDigest, MaterialInfo: mInfo, //TODO:LastSuccessfulTriggerOnParent - Scanned: wfr.CdWorkflow.CiArtifact.Scanned, - ScanEnabled: wfr.CdWorkflow.CiArtifact.ScanEnabled, - RunningOnParentCd: wfr.CdWorkflow.CiArtifact.Id == artifactRunningOnParentCd, - ExternalCiPipelineId: wfr.CdWorkflow.CiArtifact.ExternalCiPipelineId, - ParentCiArtifact: wfr.CdWorkflow.CiArtifact.ParentCiArtifact, - CreatedTime: formatDate(wfr.CdWorkflow.CiArtifact.CreatedOn, bean2.LayoutRFC3339), - DataSource: wfr.CdWorkflow.CiArtifact.DataSource, - } - if wfr.CdWorkflow.CiArtifact.WorkflowId != nil { - ciArtifact.CiWorkflowId = *wfr.CdWorkflow.CiArtifact.WorkflowId + Scanned: wfr.Scanned, + ScanEnabled: wfr.ScanEnabled, + RunningOnParentCd: wfr.Id == artifactRunningOnParentCd, + ExternalCiPipelineId: wfr.ExternalCiPipelineId, + ParentCiArtifact: wfr.ParentCiArtifact, + CreatedTime: formatDate(wfr.CreatedOn, bean2.LayoutRFC3339), + DataSource: wfr.DataSource, + } + if wfr.WorkflowId != nil { + ciArtifact.CiWorkflowId = *wfr.WorkflowId } ciArtifacts = append(ciArtifacts, ciArtifact) } From 851c7f4b11f76ce3d35c012f028f9e3f11f13858 Mon Sep 17 00:00:00 2001 From: Gireesh Naidu Date: Thu, 2 Nov 2023 03:57:47 +0530 Subject: [PATCH 092/143] optimise getting git-triggers logic --- .../pipelineConfig/CiWorkflowRepository.go | 17 ++-- pkg/pipeline/AppArtifactManager.go | 99 +++++++++++++------ 2 files changed, 78 insertions(+), 38 deletions(-) diff --git a/internal/sql/repository/pipelineConfig/CiWorkflowRepository.go b/internal/sql/repository/pipelineConfig/CiWorkflowRepository.go index c10e22737b..d4de6c0319 100644 --- a/internal/sql/repository/pipelineConfig/CiWorkflowRepository.go +++ b/internal/sql/repository/pipelineConfig/CiWorkflowRepository.go @@ -19,6 +19,7 @@ package pipelineConfig import ( "fmt" + "github.com/devtron-labs/devtron/internal/sql/repository/helper" "github.com/go-pg/pg" "go.uber.org/zap" "time" @@ -43,7 +44,7 @@ type CiWorkflowRepository interface { FindLastTriggeredWorkflowByArtifactId(ciArtifactId int) (ciWorkflow *CiWorkflow, err error) FindAllLastTriggeredWorkflowByArtifactId(ciArtifactId []int) (ciWorkflow []*CiWorkflow, err error) FindLastTriggeredWorkflowGitTriggersByArtifactId(ciArtifactId int) (ciWorkflow *CiWorkflow, err error) - FindLastTriggeredWorkflowGitTriggersByArtifactIds(ciArtifactIds []int) ([]*CiWorkflow, error) + FindLastTriggeredWorkflowGitTriggersByArtifactIds(ciArtifactIds []int) ([]*WorkflowWithArtifact, error) ExistsByStatus(status string) (bool, error) FindBuildTypeAndStatusDataOfLast1Day() []*BuildTypeCount FIndCiWorkflowStatusesByAppId(appId int) ([]*CiWorkflowStatus, error) @@ -292,16 +293,16 @@ func (impl *CiWorkflowRepositoryImpl) FindLastTriggeredWorkflowGitTriggersByArti return workflow, err } -func (impl *CiWorkflowRepositoryImpl) FindLastTriggeredWorkflowGitTriggersByArtifactIds(ciArtifactIds []int) ([]*CiWorkflow, error) { - workflows := make([]*CiWorkflow, 0) +func (impl *CiWorkflowRepositoryImpl) FindLastTriggeredWorkflowGitTriggersByArtifactIds(ciArtifactIds []int) ([]*WorkflowWithArtifact, error) { + workflows := make([]*WorkflowWithArtifact, 0) if len(ciArtifactIds) == 0 { return workflows, nil } - err := impl.dbConnection.Model(&workflows). - Column("ci_workflow.git_triggers"). - Join("inner join ci_artifact cia on cia.ci_workflow_id = ci_workflow.id"). - Where("cia.id IN (?)", pg.In(ciArtifactIds)). - Select() + query := "SELECT cw.git_triggers,cw.id,cw.triggered_by,cw.ci_pipeline_id,cia.id as ci_artifact_id" + + " FROM ci_workflow cw INNER JOIN ci_artifact cia on cia.ci_workflow_id = ci_workflow.id " + + " WHERE cia.id IN (%s)" + query = fmt.Sprintf(query, helper.GetCommaSepratedString(ciArtifactIds)) + _, err := impl.dbConnection.Query(&workflows, query) return workflows, err } diff --git a/pkg/pipeline/AppArtifactManager.go b/pkg/pipeline/AppArtifactManager.go index b61f99e08c..5778a9e0f3 100644 --- a/pkg/pipeline/AppArtifactManager.go +++ b/pkg/pipeline/AppArtifactManager.go @@ -565,8 +565,24 @@ func (impl *AppArtifactManagerImpl) RetrieveArtifactsByCDPipelineV2(pipeline *pi }) } - //TODO Gireesh: need to check this behaviour, can we use this instead of below loop ?? - //artifactIds := lo.FlatMap(ciArtifacts, func(artifact bean2.CiArtifactBean, _ int) []int { return []int{artifact.Id} }) + ciArtifacts, err = impl.setAdditionalDataInArtifacts(ciArtifacts, pipeline) + if err != nil { + impl.logger.Errorw("error in setting additional data in fetched artifacts", "pipelineId", pipeline.Id, "err", err) + return ciArtifactsResponse, err + } + + ciArtifactsResponse.CdPipelineId = pipeline.Id + ciArtifactsResponse.LatestWfArtifactId = latestWfArtifactId + ciArtifactsResponse.LatestWfArtifactStatus = latestWfArtifactStatus + if ciArtifacts == nil { + ciArtifacts = []bean2.CiArtifactBean{} + } + ciArtifactsResponse.CiArtifacts = ciArtifacts + ciArtifactsResponse.TotalCount = totalCount + return ciArtifactsResponse, nil +} + +func (impl *AppArtifactManagerImpl) setAdditionalDataInArtifacts(ciArtifacts []bean2.CiArtifactBean, pipeline *pipelineConfig.Pipeline) ([]bean2.CiArtifactBean, error) { artifactIds := make([]int, 0, len(ciArtifacts)) for _, artifact := range ciArtifacts { artifactIds = append(artifactIds, artifact.Id) @@ -575,57 +591,80 @@ func (impl *AppArtifactManagerImpl) RetrieveArtifactsByCDPipelineV2(pipeline *pi imageTagsDataMap, err := impl.imageTaggingService.GetTagsDataMapByAppId(pipeline.AppId) if err != nil { impl.logger.Errorw("error in getting image tagging data with appId", "err", err, "appId", pipeline.AppId) - return ciArtifactsResponse, err + return ciArtifacts, err } imageCommentsDataMap, err := impl.imageTaggingService.GetImageCommentsDataMapByArtifactIds(artifactIds) if err != nil { impl.logger.Errorw("error in getting GetImageCommentsDataMapByArtifactIds", "err", err, "appId", pipeline.AppId, "artifactIds", artifactIds) - return ciArtifactsResponse, err + return ciArtifacts, err } - //TODO Gireesh: Create a meaningful func - for i, artifact := range ciArtifacts { - if imageTaggingResp := imageTagsDataMap[ciArtifacts[i].Id]; imageTaggingResp != nil { + for i, _ := range ciArtifacts { + imageTaggingResp := imageTagsDataMap[ciArtifacts[i].Id] + if imageTaggingResp != nil { ciArtifacts[i].ImageReleaseTags = imageTaggingResp } if imageCommentResp := imageCommentsDataMap[ciArtifacts[i].Id]; imageCommentResp != nil { ciArtifacts[i].ImageComment = imageCommentResp } + } + return impl.setGitTriggerData(ciArtifacts) +} + +func (impl *AppArtifactManagerImpl) setGitTriggerData(ciArtifacts []bean2.CiArtifactBean) ([]bean2.CiArtifactBean, error) { + directArtifactIndexes, directWorkflowIds, artifactsWithParentIndexes, parentArtifactIds := make([]int, 0), make([]int, 0), make([]int, 0), make([]int, 0) + for i, artifact := range ciArtifacts { if artifact.ExternalCiPipelineId != 0 { // if external webhook continue continue } - - //TODO: can be optimised - var ciWorkflow *pipelineConfig.CiWorkflow + //linked ci case if artifact.ParentCiArtifact != 0 { - ciWorkflow, err = impl.ciWorkflowRepository.FindLastTriggeredWorkflowGitTriggersByArtifactId(artifact.ParentCiArtifact) - if err != nil { - impl.logger.Errorw("error in getting ci_workflow for artifacts", "err", err, "artifact", artifact, "parentStage", parentType, "stage", stage) - return ciArtifactsResponse, err - } - + artifactsWithParentIndexes = append(artifactsWithParentIndexes, i) + parentArtifactIds = append(parentArtifactIds, artifact.ParentCiArtifact) } else { - ciWorkflow, err = impl.ciWorkflowRepository.FindCiWorkflowGitTriggersById(artifact.CiWorkflowId) - if err != nil { - impl.logger.Errorw("error in getting ci_workflow for artifacts", "err", err, "artifact", artifact, "parentStage", parentType, "stage", stage) - return ciArtifactsResponse, err - } + directArtifactIndexes = append(directArtifactIndexes, i) + directWorkflowIds = append(directWorkflowIds, artifact.CiWorkflowId) } - ciArtifacts[i].CiConfigureSourceType = ciWorkflow.GitTriggers[ciWorkflow.CiPipelineId].CiConfigureSourceType + } + ciWorkflowWithArtifacts, err := impl.ciWorkflowRepository.FindLastTriggeredWorkflowGitTriggersByArtifactIds(parentArtifactIds) + if err != nil { + impl.logger.Errorw("error in getting ci_workflow for artifacts", "err", err, "parentArtifactIds", parentArtifactIds) + return ciArtifacts, err } - ciArtifactsResponse.CdPipelineId = pipeline.Id - ciArtifactsResponse.TotalCount = totalCount - ciArtifactsResponse.LatestWfArtifactId = latestWfArtifactId - ciArtifactsResponse.LatestWfArtifactStatus = latestWfArtifactStatus - if ciArtifacts == nil { - ciArtifacts = []bean2.CiArtifactBean{} + parentArtifactIdVsCiWorkflowMap := make(map[int]*pipelineConfig.WorkflowWithArtifact) + for _, ciWorkflow := range ciWorkflowWithArtifacts { + parentArtifactIdVsCiWorkflowMap[ciWorkflow.CiArtifactId] = ciWorkflow } - ciArtifactsResponse.CiArtifacts = ciArtifacts - return ciArtifactsResponse, nil + + for _, index := range directArtifactIndexes { + ciWorkflow := parentArtifactIdVsCiWorkflowMap[ciArtifacts[index].CiWorkflowId] + if ciWorkflow != nil { + ciArtifacts[index].CiConfigureSourceType = ciWorkflow.GitTriggers[ciWorkflow.CiPipelineId].CiConfigureSourceType + ciArtifacts[index].CiConfigureSourceValue = ciWorkflow.GitTriggers[ciWorkflow.CiPipelineId].CiConfigureSourceValue + } + } + + ciWorkflows, err := impl.ciWorkflowRepository.FindCiWorkflowGitTriggersByIds(directWorkflowIds) + if err != nil { + impl.logger.Errorw("error in getting ci_workflow for artifacts", "err", err, "ciWorkflowIds", directWorkflowIds) + return ciArtifacts, err + } + ciWorkflowMap := make(map[int]*pipelineConfig.CiWorkflow) + for _, ciWorkflow := range ciWorkflows { + ciWorkflowMap[ciWorkflow.Id] = ciWorkflow + } + for _, index := range directArtifactIndexes { + ciWorkflow := ciWorkflowMap[ciArtifacts[index].CiWorkflowId] + if ciWorkflow != nil { + ciArtifacts[index].CiConfigureSourceType = ciWorkflow.GitTriggers[ciWorkflow.CiPipelineId].CiConfigureSourceType + ciArtifacts[index].CiConfigureSourceValue = ciWorkflow.GitTriggers[ciWorkflow.CiPipelineId].CiConfigureSourceValue + } + } + return ciArtifacts, nil } func (impl *AppArtifactManagerImpl) BuildArtifactsList(listingFilterOpts *bean.ArtifactsListFilterOptions) ([]*bean2.CiArtifactBean, int, string, int, error) { From b2fbcfbd6b6403d200ee89cbfd7dc0fb28701435 Mon Sep 17 00:00:00 2001 From: Gireesh Naidu Date: Thu, 2 Nov 2023 04:01:09 +0530 Subject: [PATCH 093/143] fix --- internal/sql/repository/pipelineConfig/CiWorkflowRepository.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/internal/sql/repository/pipelineConfig/CiWorkflowRepository.go b/internal/sql/repository/pipelineConfig/CiWorkflowRepository.go index d4de6c0319..d78fd8f929 100644 --- a/internal/sql/repository/pipelineConfig/CiWorkflowRepository.go +++ b/internal/sql/repository/pipelineConfig/CiWorkflowRepository.go @@ -299,7 +299,7 @@ func (impl *CiWorkflowRepositoryImpl) FindLastTriggeredWorkflowGitTriggersByArti return workflows, nil } query := "SELECT cw.git_triggers,cw.id,cw.triggered_by,cw.ci_pipeline_id,cia.id as ci_artifact_id" + - " FROM ci_workflow cw INNER JOIN ci_artifact cia on cia.ci_workflow_id = ci_workflow.id " + + " FROM ci_workflow cw INNER JOIN ci_artifact cia on cia.ci_workflow_id = cw.id " + " WHERE cia.id IN (%s)" query = fmt.Sprintf(query, helper.GetCommaSepratedString(ciArtifactIds)) _, err := impl.dbConnection.Query(&workflows, query) From 0b39d41b10e21d902ef653cca488cb580ed13e4a Mon Sep 17 00:00:00 2001 From: ayushmaheshwari Date: Thu, 2 Nov 2023 11:24:29 +0530 Subject: [PATCH 094/143] post ci fixes --- api/router/pubsub/CiEventHandler.go | 48 +++---- .../sql/repository/CiArtifactRepository.go | 2 + pkg/pipeline/CiService.go | 120 +++++++++++++----- pkg/pipeline/CustomTagService.go | 51 ++++++++ pkg/pipeline/WebhookService.go | 62 ++++++--- pkg/pipeline/WorkflowDagExecutor.go | 36 +++++- pkg/pipeline/WorkflowUtils.go | 10 +- pkg/pipeline/pipelineStageVariableParser.go | 36 +++--- wire_gen.go | 4 +- 9 files changed, 269 insertions(+), 100 deletions(-) diff --git a/api/router/pubsub/CiEventHandler.go b/api/router/pubsub/CiEventHandler.go index cf0e1fa030..1766d9be09 100644 --- a/api/router/pubsub/CiEventHandler.go +++ b/api/router/pubsub/CiEventHandler.go @@ -61,20 +61,22 @@ type ImageDetailsFromCR struct { } type CiCompleteEvent struct { - CiProjectDetails []bean2.CiProjectDetails `json:"ciProjectDetails"` - DockerImage string `json:"dockerImage" validate:"required,image-validator"` - Digest string `json:"digest"` - PipelineId int `json:"pipelineId"` - WorkflowId *int `json:"workflowId"` - TriggeredBy int32 `json:"triggeredBy"` - PipelineName string `json:"pipelineName"` - DataSource string `json:"dataSource"` - MaterialType string `json:"materialType"` - Metrics util.CIMetrics `json:"metrics"` - AppName string `json:"appName"` - IsArtifactUploaded bool `json:"isArtifactUploaded"` - FailureReason string `json:"failureReason"` - ImageDetailsFromCR *ImageDetailsFromCR `json:"imageDetailsFromCR"` + CiProjectDetails []bean2.CiProjectDetails `json:"ciProjectDetails"` + DockerImage string `json:"dockerImage" validate:"required,image-validator"` + Digest string `json:"digest"` + PipelineId int `json:"pipelineId"` + WorkflowId *int `json:"workflowId"` + TriggeredBy int32 `json:"triggeredBy"` + PipelineName string `json:"pipelineName"` + DataSource string `json:"dataSource"` + MaterialType string `json:"materialType"` + Metrics util.CIMetrics `json:"metrics"` + AppName string `json:"appName"` + IsArtifactUploaded bool `json:"isArtifactUploaded"` + FailureReason string `json:"failureReason"` + ImageDetailsFromCR *ImageDetailsFromCR `json:"imageDetailsFromCR"` + PluginRegistryArtifactDetails map[string][]string `json:"PluginRegistryArtifactDetails"` + PluginArtifactStage string `json:"pluginArtifactStage"` } func NewCiEventHandlerImpl(logger *zap.SugaredLogger, pubsubClient *pubsub.PubSubClientServiceImpl, webhookService pipeline.WebhookService, ciEventConfig *CiEventConfig) *CiEventHandlerImpl { @@ -214,14 +216,16 @@ func (impl *CiEventHandlerImpl) BuildCiArtifactRequest(event CiCompleteEvent) (* } request := &pipeline.CiArtifactWebhookRequest{ - Image: event.DockerImage, - ImageDigest: event.Digest, - DataSource: event.DataSource, - PipelineName: event.PipelineName, - MaterialInfo: rawMaterialInfo, - UserId: event.TriggeredBy, - WorkflowId: event.WorkflowId, - IsArtifactUploaded: event.IsArtifactUploaded, + Image: event.DockerImage, + ImageDigest: event.Digest, + DataSource: event.DataSource, + PipelineName: event.PipelineName, + MaterialInfo: rawMaterialInfo, + UserId: event.TriggeredBy, + WorkflowId: event.WorkflowId, + IsArtifactUploaded: event.IsArtifactUploaded, + PluginRegistryArtifactDetails: event.PluginRegistryArtifactDetails, + PluginArtifactStage: event.PluginArtifactStage, } return request, nil } diff --git a/internal/sql/repository/CiArtifactRepository.go b/internal/sql/repository/CiArtifactRepository.go index 312e3f030b..eee84cfc02 100644 --- a/internal/sql/repository/CiArtifactRepository.go +++ b/internal/sql/repository/CiArtifactRepository.go @@ -42,6 +42,8 @@ const ( WEBHOOK artifactsSourceType = "ext" PRE_CD artifactsSourceType = "pre_cd" POST_CD artifactsSourceType = "post_cd" + PRE_CI artifactsSourceType = "pre_ci" + POST_CI artifactsSourceType = "post_ci" ) type CiArtifact struct { diff --git a/pkg/pipeline/CiService.go b/pkg/pipeline/CiService.go index a8e58881ca..c4aaf1cb29 100644 --- a/pkg/pipeline/CiService.go +++ b/pkg/pipeline/CiService.go @@ -21,6 +21,7 @@ import ( "encoding/json" "errors" "fmt" + repository5 "github.com/devtron-labs/devtron/internal/sql/repository" appRepository "github.com/devtron-labs/devtron/internal/sql/repository/app" repository3 "github.com/devtron-labs/devtron/internal/sql/repository/dockerRegistry" "github.com/devtron-labs/devtron/internal/sql/repository/helper" @@ -29,6 +30,7 @@ import ( bean2 "github.com/devtron-labs/devtron/pkg/pipeline/bean" "github.com/devtron-labs/devtron/pkg/pipeline/history" "github.com/devtron-labs/devtron/pkg/pipeline/repository" + "github.com/devtron-labs/devtron/pkg/plugin" repository2 "github.com/devtron-labs/devtron/pkg/plugin/repository" "github.com/devtron-labs/devtron/pkg/resourceQualifiers" "github.com/devtron-labs/devtron/pkg/user" @@ -75,6 +77,8 @@ type CiServiceImpl struct { customTagService CustomTagService variableSnapshotHistoryService variables.VariableSnapshotHistoryService config *CiConfig + pluginInputVariableParser PluginInputVariableParser + globalPluginService plugin.GlobalPluginService } func NewCiServiceImpl(Logger *zap.SugaredLogger, workflowService WorkflowService, @@ -87,6 +91,8 @@ func NewCiServiceImpl(Logger *zap.SugaredLogger, workflowService WorkflowService ciTemplateService CiTemplateService, appCrudOperationService app.AppCrudOperationService, envRepository repository1.EnvironmentRepository, appRepository appRepository.AppRepository, variableSnapshotHistoryService variables.VariableSnapshotHistoryService, customTagService CustomTagService, + pluginInputVariableParser PluginInputVariableParser, + globalPluginService plugin.GlobalPluginService, ) *CiServiceImpl { cis := &CiServiceImpl{ Logger: Logger, @@ -106,6 +112,8 @@ func NewCiServiceImpl(Logger *zap.SugaredLogger, workflowService WorkflowService appRepository: appRepository, variableSnapshotHistoryService: variableSnapshotHistoryService, customTagService: customTagService, + pluginInputVariableParser: pluginInputVariableParser, + globalPluginService: globalPluginService, } config, err := GetCiConfig() if err != nil { @@ -479,6 +487,11 @@ func (impl *CiServiceImpl) buildWfRequestForCiPipeline(pipeline *pipelineConfig. } else { dockerImageTag = impl.buildImageTag(commitHashes, pipeline.Id, savedWf.Id) } + registryDestinationImageMap, registryCredentialMap, pluginArtifactStage, err := impl.GetEnvVariablesForSkopeoPlugin(preCiSteps, postCiSteps, customTag, fmt.Sprintf(bean2.ImagePathPattern, pipeline.CiTemplate.DockerRegistry.RegistryURL, pipeline.CiTemplate.DockerRepository, dockerImageTag), pipeline.CiTemplate.DockerRegistry.Id) + if err != nil { + impl.Logger.Errorw("error in getting env variables for skopeo plugin") + return nil, err + } if ciWorkflowConfig.CiCacheBucket == "" { ciWorkflowConfig.CiCacheBucket = impl.config.DefaultCacheBucket @@ -589,41 +602,44 @@ func (impl *CiServiceImpl) buildWfRequestForCiPipeline(pipeline *pipelineConfig. } workflowRequest := &WorkflowRequest{ - WorkflowNamePrefix: strconv.Itoa(savedWf.Id) + "-" + savedWf.Name, - PipelineName: pipeline.Name, - PipelineId: pipeline.Id, - CiCacheFileName: pipeline.Name + "-" + strconv.Itoa(pipeline.Id) + ".tar.gz", - CiProjectDetails: ciProjectDetails, - Namespace: ciWorkflowConfig.Namespace, - BlobStorageConfigured: savedWf.BlobStorageEnabled, - CiImage: ciWorkflowConfig.CiImage, - ActiveDeadlineSeconds: ciWorkflowConfig.CiTimeout, - WorkflowId: savedWf.Id, - TriggeredBy: savedWf.TriggeredBy, - CacheLimit: impl.config.CacheLimit, - ScanEnabled: pipeline.ScanEnabled, - CloudProvider: impl.config.CloudProvider, - DefaultAddressPoolBaseCidr: impl.config.GetDefaultAddressPoolBaseCidr(), - DefaultAddressPoolSize: impl.config.GetDefaultAddressPoolSize(), - PreCiSteps: preCiSteps, - PostCiSteps: postCiSteps, - RefPlugins: refPluginsData, - AppName: pipeline.App.AppName, - TriggerByAuthor: user.EmailId, - CiBuildConfig: ciBuildConfigBean, - CiBuildDockerMtuValue: impl.config.CiRunnerDockerMTUValue, - IgnoreDockerCachePush: impl.config.IgnoreDockerCacheForCI, - IgnoreDockerCachePull: impl.config.IgnoreDockerCacheForCI, - CacheInvalidate: trigger.InvalidateCache, - ExtraEnvironmentVariables: trigger.ExtraEnvironmentVariables, - EnableBuildContext: impl.config.EnableBuildContext, - OrchestratorHost: impl.config.OrchestratorHost, - OrchestratorToken: impl.config.OrchestratorToken, - ImageRetryCount: impl.config.ImageRetryCount, - ImageRetryInterval: impl.config.ImageRetryInterval, - WorkflowExecutor: impl.config.GetWorkflowExecutorType(), - Type: bean2.CI_WORKFLOW_PIPELINE_TYPE, - CiArtifactLastFetch: trigger.CiArtifactLastFetch, + WorkflowNamePrefix: strconv.Itoa(savedWf.Id) + "-" + savedWf.Name, + PipelineName: pipeline.Name, + PipelineId: pipeline.Id, + CiCacheFileName: pipeline.Name + "-" + strconv.Itoa(pipeline.Id) + ".tar.gz", + CiProjectDetails: ciProjectDetails, + Namespace: ciWorkflowConfig.Namespace, + BlobStorageConfigured: savedWf.BlobStorageEnabled, + CiImage: ciWorkflowConfig.CiImage, + ActiveDeadlineSeconds: ciWorkflowConfig.CiTimeout, + WorkflowId: savedWf.Id, + TriggeredBy: savedWf.TriggeredBy, + CacheLimit: impl.config.CacheLimit, + ScanEnabled: pipeline.ScanEnabled, + CloudProvider: impl.config.CloudProvider, + DefaultAddressPoolBaseCidr: impl.config.GetDefaultAddressPoolBaseCidr(), + DefaultAddressPoolSize: impl.config.GetDefaultAddressPoolSize(), + PreCiSteps: preCiSteps, + PostCiSteps: postCiSteps, + RefPlugins: refPluginsData, + AppName: pipeline.App.AppName, + TriggerByAuthor: user.EmailId, + CiBuildConfig: ciBuildConfigBean, + CiBuildDockerMtuValue: impl.config.CiRunnerDockerMTUValue, + IgnoreDockerCachePush: impl.config.IgnoreDockerCacheForCI, + IgnoreDockerCachePull: impl.config.IgnoreDockerCacheForCI, + CacheInvalidate: trigger.InvalidateCache, + ExtraEnvironmentVariables: trigger.ExtraEnvironmentVariables, + EnableBuildContext: impl.config.EnableBuildContext, + OrchestratorHost: impl.config.OrchestratorHost, + OrchestratorToken: impl.config.OrchestratorToken, + ImageRetryCount: impl.config.ImageRetryCount, + ImageRetryInterval: impl.config.ImageRetryInterval, + WorkflowExecutor: impl.config.GetWorkflowExecutorType(), + Type: bean2.CI_WORKFLOW_PIPELINE_TYPE, + CiArtifactLastFetch: trigger.CiArtifactLastFetch, + RegistryDestinationImageMap: registryDestinationImageMap, + RegistryCredentialMap: registryCredentialMap, + PluginArtifactStage: pluginArtifactStage, } if dockerRegistry != nil { @@ -703,6 +719,40 @@ func (impl *CiServiceImpl) buildWfRequestForCiPipeline(pipeline *pipelineConfig. return workflowRequest, nil } +func (impl *CiServiceImpl) GetEnvVariablesForSkopeoPlugin(preCiSteps []*bean2.StepObject, postCiSteps []*bean2.StepObject, customTag *repository5.CustomTag, buildImagePath string, buildImagedockerRegistryId string) (map[string][]string, map[string]plugin.RegistryCredentials, string, error) { + var registryDestinationImageMap map[string][]string + var registryCredentialMap map[string]plugin.RegistryCredentials + var pluginArtifactStage string + skopeoRefPluginId, err := impl.globalPluginService.GetRefPluginIdByRefPluginName(SKOPEO) + if err != nil && err != pg.ErrNoRows { + impl.Logger.Errorw("error in getting skopeo plugin id", "err", err) + return registryDestinationImageMap, registryCredentialMap, pluginArtifactStage, err + } + for _, step := range preCiSteps { + if skopeoRefPluginId != 0 && step.RefPluginId == skopeoRefPluginId { + // for Skopeo plugin parse destination images and save its data in image path reservation table + registryDestinationImageMap, registryCredentialMap, err = impl.pluginInputVariableParser.ParseSkopeoPluginInputVariables(step.InputVars, customTag, buildImagePath, buildImagedockerRegistryId) + if err != nil { + impl.Logger.Errorw("error in parsing skopeo input variable", "err", err) + return registryDestinationImageMap, registryCredentialMap, pluginArtifactStage, err + } + pluginArtifactStage = repository5.PRE_CI + } + } + for _, step := range postCiSteps { + if skopeoRefPluginId != 0 && step.RefPluginId == skopeoRefPluginId { + // for Skopeo plugin parse destination images and save its data in image path reservation table + registryDestinationImageMap, registryCredentialMap, err = impl.pluginInputVariableParser.ParseSkopeoPluginInputVariables(step.InputVars, customTag, buildImagePath, buildImagedockerRegistryId) + if err != nil { + impl.Logger.Errorw("error in parsing skopeo input variable", "err", err) + return registryDestinationImageMap, registryCredentialMap, pluginArtifactStage, err + } + pluginArtifactStage = repository5.POST_CI + } + } + return registryDestinationImageMap, registryCredentialMap, pluginArtifactStage, nil +} + func buildCiStepsDataFromDockerBuildScripts(dockerBuildScripts []*bean.CiScript) []*bean2.StepObject { //before plugin support, few variables were set as env vars in ci-runner //these variables are now moved to global vars in plugin steps, but to avoid error in old scripts adding those variables in payload diff --git a/pkg/pipeline/CustomTagService.go b/pkg/pipeline/CustomTagService.go index 196c1eba96..2bcf48def5 100644 --- a/pkg/pipeline/CustomTagService.go +++ b/pkg/pipeline/CustomTagService.go @@ -19,6 +19,8 @@ type CustomTagService interface { GenerateImagePath(entityKey int, entityValue string, dockerRegistryURL string, dockerRepo string) (*repository.ImagePathReservation, error) DeleteCustomTagIfExists(tag bean.CustomTag) error DeactivateImagePathReservation(id int) error + GetCustomTag(entityKey int, entityValue string) (*repository.CustomTag, error) + ReserveImagePath(imagePath string, customTagId int) error } type CustomTagServiceImpl struct { @@ -101,6 +103,7 @@ func (impl *CustomTagServiceImpl) GenerateImagePath(entityKey int, entityValue s tag, err := validateAndConstructTag(customTagData) if err != nil { return nil, err + return nil, err } imagePath := fmt.Sprintf(bean2.ImagePathPattern, dockerRegistryURL, dockerRepo, tag) imagePathReservations, err := impl.customTagRepository.FindByImagePath(tx, imagePath) @@ -173,6 +176,54 @@ func isValidDockerImageTag(tag string) bool { return re.MatchString(tag) } +func (impl *CustomTagServiceImpl) GetCustomTag(entityKey int, entityValue string) (*repository.CustomTag, error) { + connection := impl.customTagRepository.GetConnection() + tx, err := connection.Begin() + customTag, err := impl.customTagRepository.IncrementAndFetchByEntityKeyAndValue(tx, entityKey, entityValue) + if err != nil { + return nil, err + } + if err != nil { + return nil, err + } + err = tx.Commit() + if err != nil { + impl.Logger.Errorw("Error in fetching custom tag", "err", err) + return customTag, err + } + return customTag, nil + +} + +func (impl *CustomTagServiceImpl) ReserveImagePath(imagePath string, customTagId int) error { + connection := impl.customTagRepository.GetConnection() + tx, err := connection.Begin() + if err != nil { + return nil + } + imagePathReservations, err := impl.customTagRepository.FindByImagePath(tx, imagePath) + if err != nil && err != pg.ErrNoRows { + return nil + } + if len(imagePathReservations) > 0 { + return nil + } + imagePathReservation := &repository.ImagePathReservation{ + ImagePath: imagePath, + CustomTagId: customTagId, + } + err = impl.customTagRepository.InsertImagePath(tx, imagePathReservation) + if err != nil { + return nil + } + err = tx.Commit() + if err != nil { + impl.Logger.Errorw("Error in fetching custom tag", "err", err) + return nil + } + return err +} + func validateTag(imageTag string) error { if len(imageTag) == 0 || len(imageTag) > 128 { return fmt.Errorf("image tag should be of len 1-128 only, imageTag: %s", imageTag) diff --git a/pkg/pipeline/WebhookService.go b/pkg/pipeline/WebhookService.go index 5e1b6bc18e..8e6a356869 100644 --- a/pkg/pipeline/WebhookService.go +++ b/pkg/pipeline/WebhookService.go @@ -40,15 +40,17 @@ import ( ) type CiArtifactWebhookRequest struct { - Image string `json:"image"` - ImageDigest string `json:"imageDigest"` - MaterialInfo json.RawMessage `json:"materialInfo"` - DataSource string `json:"dataSource"` - PipelineName string `json:"pipelineName"` - WorkflowId *int `json:"workflowId"` - UserId int32 `json:"userId"` - IsArtifactUploaded bool `json:"isArtifactUploaded"` - FailureReason string `json:"failureReason"` + Image string `json:"image"` + ImageDigest string `json:"imageDigest"` + MaterialInfo json.RawMessage `json:"materialInfo"` + DataSource string `json:"dataSource"` + PipelineName string `json:"pipelineName"` + WorkflowId *int `json:"workflowId"` + UserId int32 `json:"userId"` + IsArtifactUploaded bool `json:"isArtifactUploaded"` + FailureReason string `json:"failureReason"` + PluginRegistryArtifactDetails map[string][]string `json:"PluginRegistryArtifactDetails"` //map of registry and array of images generated by skopeo plugin + PluginArtifactStage string `json:"pluginArtifactStage"` // at which stage of CI artifact was generated by plugin ("pre_ci/post_ci") } type WebhookService interface { @@ -199,7 +201,31 @@ func (impl WebhookServiceImpl) HandleCiSuccessEvent(ciPipelineId int, request *C if !imagePushedAt.IsZero() { createdOn = *imagePushedAt } - artifact := &repository.CiArtifact{ + var pluginArtifacts []*repository.CiArtifact + for registry, artifacts := range request.PluginRegistryArtifactDetails { + for _, image := range artifacts { + pluginArtifact := &repository.CiArtifact{ + Image: image, + ImageDigest: request.ImageDigest, + MaterialInfo: "", + DataSource: request.PluginArtifactStage, + ComponentId: pipeline.Id, + PipelineId: pipeline.Id, + AuditLog: sql.AuditLog{CreatedBy: request.UserId, UpdatedBy: request.UserId, CreatedOn: createdOn, UpdatedOn: updatedOn}, + CredentialsSourceType: repository.GLOBAL_CONTAINER_REGISTRY, + CredentialSourceValue: registry, + } + pluginArtifacts = append(pluginArtifacts, pluginArtifact) + } + } + if len(pluginArtifacts) > 0 { + err = impl.ciArtifactRepository.SaveAll(pluginArtifacts) + if err != nil { + impl.logger.Errorw("error while saving ci artifacts", "err", err) + return 0, err + } + } + buildArtifact := &repository.CiArtifact{ Image: request.Image, ImageDigest: request.ImageDigest, MaterialInfo: string(materialJson), @@ -212,9 +238,9 @@ func (impl WebhookServiceImpl) HandleCiSuccessEvent(ciPipelineId int, request *C AuditLog: sql.AuditLog{CreatedBy: request.UserId, UpdatedBy: request.UserId, CreatedOn: createdOn, UpdatedOn: updatedOn}, } if pipeline.ScanEnabled { - artifact.Scanned = true + buildArtifact.Scanned = true } - if err = impl.ciArtifactRepository.Save(artifact); err != nil { + if err = impl.ciArtifactRepository.Save(buildArtifact); err != nil { impl.logger.Errorw("error in saving material", "err", err) return 0, err } @@ -233,7 +259,7 @@ func (impl WebhookServiceImpl) HandleCiSuccessEvent(ciPipelineId int, request *C MaterialInfo: string(materialJson), DataSource: request.DataSource, PipelineId: ci.Id, - ParentCiArtifact: artifact.Id, + ParentCiArtifact: buildArtifact.Id, ScanEnabled: ci.ScanEnabled, Scanned: false, IsArtifactUploaded: request.IsArtifactUploaded, @@ -253,8 +279,12 @@ func (impl WebhookServiceImpl) HandleCiSuccessEvent(ciPipelineId int, request *C return 0, err } } - ciArtifactArr = append(ciArtifactArr, artifact) - go impl.WriteCISuccessEvent(request, pipeline, artifact) + if len(pluginArtifacts) == 0 { + ciArtifactArr = append(ciArtifactArr, buildArtifact) + go impl.WriteCISuccessEvent(request, pipeline, buildArtifact) + } else { + ciArtifactArr = append(ciArtifactArr, pluginArtifacts[0]) + } isCiManual := true if request.UserId == 1 { @@ -298,7 +328,7 @@ func (impl WebhookServiceImpl) HandleCiSuccessEvent(ciPipelineId int, request *C i += batchSize } impl.logger.Debugw("Completed: auto trigger for children Stage/CD pipelines", "Time taken", time.Since(start).Seconds()) - return artifact.Id, err + return buildArtifact.Id, err } func (impl WebhookServiceImpl) HandleExternalCiWebhook(externalCiId int, request *CiArtifactWebhookRequest, auth func(token string, projectObject string, envObject string) bool) (id int, err error) { diff --git a/pkg/pipeline/WorkflowDagExecutor.go b/pkg/pipeline/WorkflowDagExecutor.go index bd850dcd5c..510a5d2516 100644 --- a/pkg/pipeline/WorkflowDagExecutor.go +++ b/pkg/pipeline/WorkflowDagExecutor.go @@ -179,6 +179,7 @@ type WorkflowDagExecutorImpl struct { variableTemplateParser parsers.VariableTemplateParser argoClientWrapperService argocdServer.ArgoClientWrapperService scopedVariableService variables.ScopedVariableService + customTagService CustomTagService } const kedaAutoscaling = "kedaAutoscaling" @@ -306,6 +307,7 @@ func NewWorkflowDagExecutorImpl(Logger *zap.SugaredLogger, pipelineRepository pi variableTemplateParser parsers.VariableTemplateParser, argoClientWrapperService argocdServer.ArgoClientWrapperService, scopedVariableService variables.ScopedVariableService, + customTagService CustomTagService, ) *WorkflowDagExecutorImpl { wde := &WorkflowDagExecutorImpl{logger: Logger, pipelineRepository: pipelineRepository, @@ -378,6 +380,7 @@ func NewWorkflowDagExecutorImpl(Logger *zap.SugaredLogger, pipelineRepository pi variableTemplateParser: variableTemplateParser, argoClientWrapperService: argoClientWrapperService, scopedVariableService: scopedVariableService, + customTagService: customTagService, } config, err := GetCdConfig() if err != nil { @@ -443,7 +446,14 @@ func (impl *WorkflowDagExecutorImpl) HandleCiSuccessEvent(artifact *repository.C //1. get cd pipelines //2. get config //3. trigger wf/ deployment - pipelines, err := impl.pipelineRepository.FindByParentCiPipelineId(artifact.PipelineId) + var pipelineID int + if artifact.DataSource == repository.POST_CI || artifact.DataSource == repository.PRE_CI { + pipelineID = artifact.ComponentId + } else { + // TODO: need to migrate artifact.PipelineId for dataSource="CI_RUNNER" also to component_id + pipelineID = artifact.PipelineId + } + pipelines, err := impl.pipelineRepository.FindByParentCiPipelineId(pipelineID) if err != nil { impl.logger.Errorw("error in fetching cd pipeline", "pipelineId", artifact.PipelineId, "err", err) return err @@ -751,16 +761,26 @@ func (impl *WorkflowDagExecutorImpl) TriggerPreStage(ctx context.Context, cdWf * cdStageWorkflowRequest.StageType = PRE // handling plugin specific logic skopeoRefPluginId, err := impl.globalPluginService.GetRefPluginIdByRefPluginName(SKOPEO) + if err != nil && err != pg.ErrNoRows { + impl.logger.Errorw("error in getting skopeo plugin id", "err", err) + return err + } for _, step := range cdStageWorkflowRequest.PrePostDeploySteps { if skopeoRefPluginId != 0 && step.RefPluginId == skopeoRefPluginId { // for Skopeo plugin parse destination images and save its data in image path reservation table - registryDestinationImageMap, registryCredentialMap, err := impl.pluginInputVariableParser.ParseSkopeoPluginInputVariables(step.InputVars, bean3.EntityTypePreCD, strconv.Itoa(pipeline.Id), cdStageWorkflowRequest.CiArtifactDTO.Image, cdStageWorkflowRequest.DockerRegistryId) + customTag, err := impl.customTagService.GetCustomTag(bean3.EntityTypePreCD, strconv.Itoa(pipeline.Id)) + if err != nil && err != pg.ErrNoRows { + impl.logger.Errorw("error in fetching custom tag by entity key and value for CD", "err", err) + return err + } + registryDestinationImageMap, registryCredentialMap, err := impl.pluginInputVariableParser.ParseSkopeoPluginInputVariables(step.InputVars, customTag, cdStageWorkflowRequest.CiArtifactDTO.Image, cdStageWorkflowRequest.DockerRegistryId) if err != nil { impl.logger.Errorw("error in parsing skopeo input variable", "err", err) return err } cdStageWorkflowRequest.RegistryDestinationImageMap = registryDestinationImageMap cdStageWorkflowRequest.RegistryCredentialMap = registryCredentialMap + cdStageWorkflowRequest.PluginArtifactStage = repository.PRE_CD } } _, span = otel.Tracer("orchestrator").Start(ctx, "cdWorkflowService.SubmitWorkflow") @@ -884,16 +904,26 @@ func (impl *WorkflowDagExecutorImpl) TriggerPostStage(cdWf *pipelineConfig.CdWor cdStageWorkflowRequest.Type = bean3.CD_WORKFLOW_PIPELINE_TYPE // handling plugin specific logic skopeoRefPluginId, err := impl.globalPluginService.GetRefPluginIdByRefPluginName(SKOPEO) + if err != nil && err != pg.ErrNoRows { + impl.logger.Errorw("error in getting skopeo plugin id", "err", err) + return err + } for _, step := range cdStageWorkflowRequest.PostCiSteps { if skopeoRefPluginId != 0 && step.RefPluginId == skopeoRefPluginId { // for Skopeo plugin parse destination images and save its data in image path reservation table - registryDestinationImageMap, registryCredentialMap, err := impl.pluginInputVariableParser.ParseSkopeoPluginInputVariables(step.InputVars, bean3.EntityTypePostCD, strconv.Itoa(pipeline.Id), cdStageWorkflowRequest.CiArtifactDTO.Image, cdStageWorkflowRequest.DockerRegistryId) + customTag, err := impl.customTagService.GetCustomTag(bean3.EntityTypePostCD, strconv.Itoa(pipeline.Id)) + if err != nil && err != pg.ErrNoRows { + impl.logger.Errorw("error in fetching custom tag by entity key and value for CD", "err", err) + return err + } + registryDestinationImageMap, registryCredentialMap, err := impl.pluginInputVariableParser.ParseSkopeoPluginInputVariables(step.InputVars, customTag, cdStageWorkflowRequest.CiArtifactDTO.Image, cdStageWorkflowRequest.DockerRegistryId) if err != nil { impl.logger.Errorw("error in parsing skopeo input variable", "err", err) return err } cdStageWorkflowRequest.RegistryDestinationImageMap = registryDestinationImageMap cdStageWorkflowRequest.RegistryCredentialMap = registryCredentialMap + cdStageWorkflowRequest.PluginArtifactStage = repository.POST_CD } } _, err = impl.cdWorkflowService.SubmitWorkflow(cdStageWorkflowRequest) diff --git a/pkg/pipeline/WorkflowUtils.go b/pkg/pipeline/WorkflowUtils.go index b8317a2f98..03374429d9 100644 --- a/pkg/pipeline/WorkflowUtils.go +++ b/pkg/pipeline/WorkflowUtils.go @@ -369,11 +369,11 @@ type WorkflowRequest struct { CiArtifactLastFetch time.Time `json:"ciArtifactLastFetch"` RegistryDestinationImageMap map[string][]string `json:"registryDestinationImageMap"` RegistryCredentialMap map[string]plugin.RegistryCredentials `json:"registryCredentialMap"` - - Type bean.WorkflowPipelineType - Pipeline *pipelineConfig.Pipeline - Env *repository2.Environment - AppLabels map[string]string + PluginArtifactStage string `json:"pluginArtifactStage"` + Type bean.WorkflowPipelineType + Pipeline *pipelineConfig.Pipeline + Env *repository2.Environment + AppLabels map[string]string } type CiCdTriggerEvent struct { diff --git a/pkg/pipeline/pipelineStageVariableParser.go b/pkg/pipeline/pipelineStageVariableParser.go index da818298c5..5620958ec0 100644 --- a/pkg/pipeline/pipelineStageVariableParser.go +++ b/pkg/pipeline/pipelineStageVariableParser.go @@ -3,10 +3,12 @@ package pipeline import ( "errors" "fmt" + "github.com/devtron-labs/devtron/internal/sql/repository" "github.com/devtron-labs/devtron/pkg/pipeline/bean" "github.com/devtron-labs/devtron/pkg/plugin" "github.com/go-pg/pg" "go.uber.org/zap" + "strconv" "strings" ) @@ -23,7 +25,7 @@ const ( ) type PluginInputVariableParser interface { - ParseSkopeoPluginInputVariables(inputVariables []*bean.VariableObject, entityKey int, entityValue string, pluginTriggerImage string, buildConfigurationRegistry string) (map[string][]string, map[string]plugin.RegistryCredentials, error) + ParseSkopeoPluginInputVariables(inputVariables []*bean.VariableObject, customTag *repository.CustomTag, pluginTriggerImage string, buildConfigurationRegistry string) (map[string][]string, map[string]plugin.RegistryCredentials, error) } type PluginInputVariableParserImpl struct { @@ -44,7 +46,7 @@ func NewPluginInputVariableParserImpl( } } -func (impl *PluginInputVariableParserImpl) ParseSkopeoPluginInputVariables(inputVariables []*bean.VariableObject, entityKey int, entityValue string, pluginTriggerImage string, buildConfigurationRegistry string) (map[string][]string, map[string]plugin.RegistryCredentials, error) { +func (impl *PluginInputVariableParserImpl) ParseSkopeoPluginInputVariables(inputVariables []*bean.VariableObject, customTag *repository.CustomTag, pluginTriggerImage string, buildConfigurationRegistry string) (map[string][]string, map[string]plugin.RegistryCredentials, error) { var DestinationInfo, SourceRegistry, SourceImage string for _, ipVariable := range inputVariables { if ipVariable.Name == DESTINATION_INFO { @@ -65,7 +67,7 @@ func (impl *PluginInputVariableParserImpl) ParseSkopeoPluginInputVariables(input } } } - registryDestinationImageMap, registryCredentialMap, err := impl.getRegistryDetailsAndDestinationImagePathForSkopeo(entityKey, entityValue, SourceImage, SourceRegistry, DestinationInfo) + registryDestinationImageMap, registryCredentialMap, err := impl.getRegistryDetailsAndDestinationImagePathForSkopeo(customTag, SourceImage, SourceRegistry, DestinationInfo) if err != nil { impl.logger.Errorw("Error in parsing skopeo input variables") return nil, nil, err @@ -73,7 +75,7 @@ func (impl *PluginInputVariableParserImpl) ParseSkopeoPluginInputVariables(input return registryDestinationImageMap, registryCredentialMap, nil } -func (impl *PluginInputVariableParserImpl) getRegistryDetailsAndDestinationImagePathForSkopeo(entityKey int, entityValue, sourceImage string, sourceRegistry string, destinationInfo string) (registryDestinationImageMap map[string][]string, registryCredentialsMap map[string]plugin.RegistryCredentials, err error) { +func (impl *PluginInputVariableParserImpl) getRegistryDetailsAndDestinationImagePathForSkopeo(tag *repository.CustomTag, sourceImage string, sourceRegistry string, destinationInfo string) (registryDestinationImageMap map[string][]string, registryCredentialsMap map[string]plugin.RegistryCredentials, err error) { registryDestinationImageMap = make(map[string][]string) registryCredentialsMap = make(map[string]plugin.RegistryCredentials) @@ -115,24 +117,24 @@ func (impl *PluginInputVariableParserImpl) getRegistryDetailsAndDestinationImage for _, repositoryName := range repositoryValuesSplit { repositoryName = strings.Trim(repositoryName, " ") - customTag, err := impl.customTagService.GetCustomTagByEntityKeyAndValue(entityKey, entityValue) - if err != nil && err != pg.ErrNoRows { - impl.logger.Errorw("error in fetching custom tag by entity key and entity value ", "entityKey", entityKey, "entityValue", entityValue) - return registryDestinationImageMap, registryCredentialsMap, err - } var destinationImage string - if customTag != nil && customTag.Id == 0 { - destinationImage = fmt.Sprintf("%s/%s:%s", registryCredentials.RegistryURL, repositoryName, sourceImageTag) + var tagId int + if tag != nil && tag.Id > 0 { + tagId = tag.Id + } + if tagId > 0 { + destinationImage = fmt.Sprintf("%s/%s:%s", registryCredentials.RegistryURL, repositoryName, strconv.Itoa(tag.Id)) } else { - imagePathReservation, err := impl.customTagService.GenerateImagePath(entityKey, entityValue, registryCredentials.RegistryURL, repositoryName) - if err != nil { - impl.logger.Errorw("error in reserving image path", "err", err) - return registryDestinationImageMap, registryCredentialsMap, err - } - destinationImage = imagePathReservation.ImagePath + destinationImage = fmt.Sprintf("%s/%s:%s", registryCredentials.RegistryURL, repositoryName, sourceImageTag) } destinationImages = append(destinationImages, destinationImage) + err = impl.customTagService.ReserveImagePath(destinationImage, tagId) + if err != nil { + impl.logger.Errorw("Error in marking custom tag reserved", "err", err) + return registryDestinationImageMap, registryCredentialsMap, err + } } + registryDestinationImageMap[registryName] = destinationImages registryCredentialsMap[registryName] = plugin.RegistryCredentials{ RegistryType: string(registryCredentials.RegistryType), diff --git a/wire_gen.go b/wire_gen.go index b5016612da..d7a2246978 100644 --- a/wire_gen.go +++ b/wire_gen.go @@ -451,7 +451,7 @@ func InitializeApp() (*App, error) { imageTagRepositoryImpl := repository.NewImageTagRepository(db, sugaredLogger) customTagServiceImpl := pipeline.NewCustomTagService(sugaredLogger, imageTagRepositoryImpl) pluginInputVariableParserImpl := pipeline.NewPluginInputVariableParserImpl(sugaredLogger, dockerRegistryConfigImpl, customTagServiceImpl) - workflowDagExecutorImpl := pipeline.NewWorkflowDagExecutorImpl(sugaredLogger, pipelineRepositoryImpl, cdWorkflowRepositoryImpl, pubSubClientServiceImpl, appServiceImpl, workflowServiceImpl, ciArtifactRepositoryImpl, ciPipelineRepositoryImpl, materialRepositoryImpl, pipelineOverrideRepositoryImpl, userServiceImpl, deploymentGroupRepositoryImpl, environmentRepositoryImpl, enforcerImpl, enforcerUtilImpl, tokenCache, acdAuthConfig, eventSimpleFactoryImpl, eventRESTClientImpl, cvePolicyRepositoryImpl, imageScanResultRepositoryImpl, appWorkflowRepositoryImpl, prePostCdScriptHistoryServiceImpl, argoUserServiceImpl, pipelineStatusTimelineRepositoryImpl, pipelineStatusTimelineServiceImpl, ciTemplateRepositoryImpl, ciWorkflowRepositoryImpl, appLabelRepositoryImpl, clientImpl, pipelineStageServiceImpl, k8sCommonServiceImpl, variableSnapshotHistoryServiceImpl, globalPluginServiceImpl, pluginInputVariableParserImpl, deploymentTemplateHistoryServiceImpl, configMapHistoryServiceImpl, pipelineStrategyHistoryServiceImpl, manifestPushConfigRepositoryImpl, gitOpsManifestPushServiceImpl, ciPipelineMaterialRepositoryImpl, imageScanHistoryRepositoryImpl, imageScanDeployInfoRepositoryImpl, appCrudOperationServiceImpl, pipelineConfigRepositoryImpl, dockerRegistryIpsConfigServiceImpl, chartRepositoryImpl, chartTemplateServiceImpl, pipelineStrategyHistoryRepositoryImpl, appRepositoryImpl, deploymentTemplateHistoryRepositoryImpl, argoK8sClientImpl, configMapRepositoryImpl, configMapHistoryRepositoryImpl, refChartDir, helmAppServiceImpl, helmAppClientImpl, chartRefRepositoryImpl, envConfigOverrideRepositoryImpl, appLevelMetricsRepositoryImpl, envLevelAppMetricsRepositoryImpl, dbMigrationConfigRepositoryImpl, mergeUtil, gitOpsConfigRepositoryImpl, gitFactory, applicationServiceClientImpl, variableEntityMappingServiceImpl, variableTemplateParserImpl, argoClientWrapperServiceImpl, scopedVariableServiceImpl) + workflowDagExecutorImpl := pipeline.NewWorkflowDagExecutorImpl(sugaredLogger, pipelineRepositoryImpl, cdWorkflowRepositoryImpl, pubSubClientServiceImpl, appServiceImpl, workflowServiceImpl, ciArtifactRepositoryImpl, ciPipelineRepositoryImpl, materialRepositoryImpl, pipelineOverrideRepositoryImpl, userServiceImpl, deploymentGroupRepositoryImpl, environmentRepositoryImpl, enforcerImpl, enforcerUtilImpl, tokenCache, acdAuthConfig, eventSimpleFactoryImpl, eventRESTClientImpl, cvePolicyRepositoryImpl, imageScanResultRepositoryImpl, appWorkflowRepositoryImpl, prePostCdScriptHistoryServiceImpl, argoUserServiceImpl, pipelineStatusTimelineRepositoryImpl, pipelineStatusTimelineServiceImpl, ciTemplateRepositoryImpl, ciWorkflowRepositoryImpl, appLabelRepositoryImpl, clientImpl, pipelineStageServiceImpl, k8sCommonServiceImpl, variableSnapshotHistoryServiceImpl, globalPluginServiceImpl, pluginInputVariableParserImpl, deploymentTemplateHistoryServiceImpl, configMapHistoryServiceImpl, pipelineStrategyHistoryServiceImpl, manifestPushConfigRepositoryImpl, gitOpsManifestPushServiceImpl, ciPipelineMaterialRepositoryImpl, imageScanHistoryRepositoryImpl, imageScanDeployInfoRepositoryImpl, appCrudOperationServiceImpl, pipelineConfigRepositoryImpl, dockerRegistryIpsConfigServiceImpl, chartRepositoryImpl, chartTemplateServiceImpl, pipelineStrategyHistoryRepositoryImpl, appRepositoryImpl, deploymentTemplateHistoryRepositoryImpl, argoK8sClientImpl, configMapRepositoryImpl, configMapHistoryRepositoryImpl, refChartDir, helmAppServiceImpl, helmAppClientImpl, chartRefRepositoryImpl, envConfigOverrideRepositoryImpl, appLevelMetricsRepositoryImpl, envLevelAppMetricsRepositoryImpl, dbMigrationConfigRepositoryImpl, mergeUtil, gitOpsConfigRepositoryImpl, gitFactory, applicationServiceClientImpl, variableEntityMappingServiceImpl, variableTemplateParserImpl, argoClientWrapperServiceImpl, scopedVariableServiceImpl, customTagServiceImpl) deploymentGroupAppRepositoryImpl := repository.NewDeploymentGroupAppRepositoryImpl(sugaredLogger, db) deploymentGroupServiceImpl := deploymentGroup.NewDeploymentGroupServiceImpl(appRepositoryImpl, sugaredLogger, pipelineRepositoryImpl, ciPipelineRepositoryImpl, deploymentGroupRepositoryImpl, environmentRepositoryImpl, deploymentGroupAppRepositoryImpl, ciArtifactRepositoryImpl, appWorkflowRepositoryImpl, workflowDagExecutorImpl) deploymentConfigServiceImpl := pipeline.NewDeploymentConfigServiceImpl(sugaredLogger, envConfigOverrideRepositoryImpl, chartRepositoryImpl, pipelineRepositoryImpl, envLevelAppMetricsRepositoryImpl, appLevelMetricsRepositoryImpl, pipelineConfigRepositoryImpl, configMapRepositoryImpl, configMapHistoryServiceImpl, chartRefRepositoryImpl, variableEntityMappingServiceImpl, scopedVariableServiceImpl, variableTemplateParserImpl) @@ -498,7 +498,7 @@ func InitializeApp() (*App, error) { devtronAppConfigServiceImpl := pipeline.NewDevtronAppConfigServiceImpl(sugaredLogger, ciCdPipelineOrchestratorImpl, appRepositoryImpl, pipelineRepositoryImpl, resourceGroupServiceImpl, enforcerUtilImpl, ciMaterialConfigServiceImpl) pipelineBuilderImpl := pipeline.NewPipelineBuilderImpl(sugaredLogger, materialRepositoryImpl, chartRepositoryImpl, ciPipelineConfigServiceImpl, ciMaterialConfigServiceImpl, appArtifactManagerImpl, devtronAppCMCSServiceImpl, devtronAppStrategyServiceImpl, appDeploymentTypeChangeManagerImpl, cdPipelineConfigServiceImpl, devtronAppConfigServiceImpl) dbMigrationServiceImpl := pipeline.NewDbMogrationService(sugaredLogger, dbMigrationConfigRepositoryImpl) - ciServiceImpl := pipeline.NewCiServiceImpl(sugaredLogger, workflowServiceImpl, ciPipelineMaterialRepositoryImpl, ciWorkflowRepositoryImpl, eventRESTClientImpl, eventSimpleFactoryImpl, mergeUtil, ciPipelineRepositoryImpl, prePostCiScriptHistoryServiceImpl, pipelineStageServiceImpl, userServiceImpl, ciTemplateServiceImpl, appCrudOperationServiceImpl, environmentRepositoryImpl, appRepositoryImpl, variableSnapshotHistoryServiceImpl, customTagServiceImpl) + ciServiceImpl := pipeline.NewCiServiceImpl(sugaredLogger, workflowServiceImpl, ciPipelineMaterialRepositoryImpl, ciWorkflowRepositoryImpl, eventRESTClientImpl, eventSimpleFactoryImpl, mergeUtil, ciPipelineRepositoryImpl, prePostCiScriptHistoryServiceImpl, pipelineStageServiceImpl, userServiceImpl, ciTemplateServiceImpl, appCrudOperationServiceImpl, environmentRepositoryImpl, appRepositoryImpl, variableSnapshotHistoryServiceImpl, customTagServiceImpl, pluginInputVariableParserImpl, globalPluginServiceImpl) ciLogServiceImpl, err := pipeline.NewCiLogServiceImpl(sugaredLogger, ciServiceImpl, k8sUtil) if err != nil { return nil, err From feac3eabf63f7757d6e8e1f30432d57a934e8ddc Mon Sep 17 00:00:00 2001 From: ayushmaheshwari Date: Thu, 2 Nov 2023 18:07:09 +0530 Subject: [PATCH 095/143] fix list api --- internal/sql/repository/CiArtifactsListingQueryBuilder.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/internal/sql/repository/CiArtifactsListingQueryBuilder.go b/internal/sql/repository/CiArtifactsListingQueryBuilder.go index 28147dd5a9..e1d77589f6 100644 --- a/internal/sql/repository/CiArtifactsListingQueryBuilder.go +++ b/internal/sql/repository/CiArtifactsListingQueryBuilder.go @@ -61,11 +61,11 @@ func BuildQueryForArtifactsForCdStage(listingFilterOptions bean.ArtifactsListFil // " AND cia.image LIKE '%v' " commonQuery = fmt.Sprintf(commonQuery, listingFilterOptions.PipelineId, listingFilterOptions.StageType, listingFilterOptions.ParentId, listingFilterOptions.ParentStageType, listingFilterOptions.ParentId, listingFilterOptions.PluginStage, listingFilterOptions.SearchString) if len(listingFilterOptions.ExcludeArtifactIds) > 0 { - commonQuery = commonQuery + fmt.Sprintf(" AND ( cd_workflow.ci_artifact_id NOT IN (%v))", helper.GetCommaSepratedString(listingFilterOptions.ExcludeArtifactIds)) + commonQuery = commonQuery + fmt.Sprintf(" AND ( ci_artifact.id NOT IN (%v))", helper.GetCommaSepratedString(listingFilterOptions.ExcludeArtifactIds)) } totalCountQuery := "SELECT COUNT(DISTINCT ci_artifact.id) as total_count " + commonQuery - selectQuery := fmt.Sprintf("SELECT ci_artifact.id , (%v) ", totalCountQuery) + selectQuery := fmt.Sprintf("SELECT DISTINCT(ci_artifact.id) , (%v) ", totalCountQuery) //GroupByQuery := " GROUP BY cia.id " limitOffSetQuery := fmt.Sprintf(" LIMIT %v OFFSET %v", listingFilterOptions.Limit, listingFilterOptions.Offset) From 469d1a317e0d342be9961387c27a46d47dba9340 Mon Sep 17 00:00:00 2001 From: Gireesh Naidu Date: Fri, 3 Nov 2023 11:39:40 +0530 Subject: [PATCH 096/143] totalcount fix --- pkg/pipeline/AppArtifactManager.go | 1 + 1 file changed, 1 insertion(+) diff --git a/pkg/pipeline/AppArtifactManager.go b/pkg/pipeline/AppArtifactManager.go index 5778a9e0f3..50cf79a56d 100644 --- a/pkg/pipeline/AppArtifactManager.go +++ b/pkg/pipeline/AppArtifactManager.go @@ -733,6 +733,7 @@ func (impl *AppArtifactManagerImpl) BuildArtifactsList(listingFilterOpts *bean.A searchString := listingFilterOpts.SearchString[1 : len(listingFilterOpts.SearchString)-1] if strings.Contains(currentRunningArtifactBean.Image, searchString) { ciArtifacts = append(ciArtifacts, currentRunningArtifactBean) + totalCount += 1 } } From 84b39118500fd6960eb916c3663d8fb259afad69 Mon Sep 17 00:00:00 2001 From: Gireesh Naidu Date: Fri, 3 Nov 2023 11:45:44 +0530 Subject: [PATCH 097/143] offset validation --- api/restHandler/app/DeploymentPipelineRestHandler.go | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/api/restHandler/app/DeploymentPipelineRestHandler.go b/api/restHandler/app/DeploymentPipelineRestHandler.go index ce2c6a2357..0c8e2c7060 100644 --- a/api/restHandler/app/DeploymentPipelineRestHandler.go +++ b/api/restHandler/app/DeploymentPipelineRestHandler.go @@ -1189,16 +1189,20 @@ func (handler PipelineConfigRestHandlerImpl) GetArtifactsByCDPipeline(w http.Res offsetQueryParam := r.URL.Query().Get("offset") if offsetQueryParam != "" { offset, err = strconv.Atoi(offsetQueryParam) - handler.Logger.Errorw("request err, GetArtifactsForRollback", "err", err, "offsetQueryParam", offsetQueryParam) - common.WriteJsonResp(w, err, "invalid offset", http.StatusBadRequest) + if err != nil || offset < 1 { + handler.Logger.Errorw("request err, GetArtifactsForRollback", "err", err, "offsetQueryParam", offsetQueryParam) + common.WriteJsonResp(w, err, "invalid offset", http.StatusBadRequest) + } return } sizeQueryParam := r.URL.Query().Get("size") if sizeQueryParam != "" { limit, err = strconv.Atoi(sizeQueryParam) - handler.Logger.Errorw("request err, GetArtifactsForRollback", "err", err, "sizeQueryParam", sizeQueryParam) - common.WriteJsonResp(w, err, "invalid size", http.StatusBadRequest) + if err != nil { + handler.Logger.Errorw("request err, GetArtifactsForRollback", "err", err, "sizeQueryParam", sizeQueryParam) + common.WriteJsonResp(w, err, "invalid size", http.StatusBadRequest) + } return } handler.Logger.Infow("request payload, GetArtifactsByCDPipeline", "cdPipelineId", cdPipelineId, "stage", stage) From 1ff96d7c6fef8c85ed0ab0a40a7f9a1b051a3ae7 Mon Sep 17 00:00:00 2001 From: ayushmaheshwari Date: Fri, 3 Nov 2023 12:16:38 +0530 Subject: [PATCH 098/143] fixes post ci --- .../sql/repository/CustomTagRepository.go | 10 ++++ pkg/pipeline/CiService.go | 14 ++++-- pkg/pipeline/CustomTagService.go | 47 ++++++++++--------- pkg/pipeline/WebhookService.go | 10 ++++ pkg/pipeline/WorkflowDagExecutor.go | 16 +++++-- pkg/pipeline/WorkflowUtils.go | 1 + pkg/pipeline/pipelineStageVariableParser.go | 30 ++++-------- 7 files changed, 77 insertions(+), 51 deletions(-) diff --git a/internal/sql/repository/CustomTagRepository.go b/internal/sql/repository/CustomTagRepository.go index 132046c885..92c77cf45c 100644 --- a/internal/sql/repository/CustomTagRepository.go +++ b/internal/sql/repository/CustomTagRepository.go @@ -36,6 +36,7 @@ type ImageTagRepository interface { DeleteByEntityKeyAndValue(entityKey int, entityValue string) error DeactivateImagePathReservation(id int) error FetchActiveCustomTagData(entityKey int, entityValue string) (*CustomTag, error) + DeactivateImagePathReservationByImagePaths(tx *pg.Tx, imagePaths []string) error } type ImageTagRepositoryImpl struct { @@ -106,3 +107,12 @@ func (impl *ImageTagRepositoryImpl) FindByImagePath(tx *pg.Tx, path string) ([]* func (impl *ImageTagRepositoryImpl) InsertImagePath(tx *pg.Tx, reservation *ImagePathReservation) error { return tx.Insert(reservation) } + +func (impl *ImageTagRepositoryImpl) DeactivateImagePathReservationByImagePaths(tx *pg.Tx, imagePaths []string) error { + query := `UPDATE image_path_reservation set active=false where image_path in (?)` + _, err := tx.Exec(query, pg.In(imagePaths)) + if err != nil && err != pg.ErrNoRows { + return err + } + return nil +} diff --git a/pkg/pipeline/CiService.go b/pkg/pipeline/CiService.go index c4aaf1cb29..a2d203cfa1 100644 --- a/pkg/pipeline/CiService.go +++ b/pkg/pipeline/CiService.go @@ -487,7 +487,9 @@ func (impl *CiServiceImpl) buildWfRequestForCiPipeline(pipeline *pipelineConfig. } else { dockerImageTag = impl.buildImageTag(commitHashes, pipeline.Id, savedWf.Id) } - registryDestinationImageMap, registryCredentialMap, pluginArtifactStage, err := impl.GetEnvVariablesForSkopeoPlugin(preCiSteps, postCiSteps, customTag, fmt.Sprintf(bean2.ImagePathPattern, pipeline.CiTemplate.DockerRegistry.RegistryURL, pipeline.CiTemplate.DockerRepository, dockerImageTag), pipeline.CiTemplate.DockerRegistry.Id) + registryDestinationImageMap, registryCredentialMap, pluginArtifactStage, err := impl.GetEnvVariablesForSkopeoPlugin( + preCiSteps, postCiSteps, dockerImageTag, customTag.Id, + fmt.Sprintf(bean2.ImagePathPattern, pipeline.CiTemplate.DockerRegistry.RegistryURL, pipeline.CiTemplate.DockerRepository, dockerImageTag), pipeline.CiTemplate.DockerRegistry.Id) if err != nil { impl.Logger.Errorw("error in getting env variables for skopeo plugin") return nil, err @@ -661,7 +663,9 @@ func (impl *CiServiceImpl) buildWfRequestForCiPipeline(pipeline *pipelineConfig. if ciWorkflowConfig.LogsBucket == "" { ciWorkflowConfig.LogsBucket = impl.config.GetDefaultBuildLogsBucket() } - + if len(registryDestinationImageMap) > 0 { + workflowRequest.PushImageBeforePostCI = true + } switch workflowRequest.CloudProvider { case BLOB_STORAGE_S3: //No AccessKey is used for uploading artifacts, instead IAM based auth is used @@ -719,7 +723,7 @@ func (impl *CiServiceImpl) buildWfRequestForCiPipeline(pipeline *pipelineConfig. return workflowRequest, nil } -func (impl *CiServiceImpl) GetEnvVariablesForSkopeoPlugin(preCiSteps []*bean2.StepObject, postCiSteps []*bean2.StepObject, customTag *repository5.CustomTag, buildImagePath string, buildImagedockerRegistryId string) (map[string][]string, map[string]plugin.RegistryCredentials, string, error) { +func (impl *CiServiceImpl) GetEnvVariablesForSkopeoPlugin(preCiSteps []*bean2.StepObject, postCiSteps []*bean2.StepObject, customTag string, customTagId int, buildImagePath string, buildImagedockerRegistryId string) (map[string][]string, map[string]plugin.RegistryCredentials, string, error) { var registryDestinationImageMap map[string][]string var registryCredentialMap map[string]plugin.RegistryCredentials var pluginArtifactStage string @@ -731,7 +735,7 @@ func (impl *CiServiceImpl) GetEnvVariablesForSkopeoPlugin(preCiSteps []*bean2.St for _, step := range preCiSteps { if skopeoRefPluginId != 0 && step.RefPluginId == skopeoRefPluginId { // for Skopeo plugin parse destination images and save its data in image path reservation table - registryDestinationImageMap, registryCredentialMap, err = impl.pluginInputVariableParser.ParseSkopeoPluginInputVariables(step.InputVars, customTag, buildImagePath, buildImagedockerRegistryId) + registryDestinationImageMap, registryCredentialMap, err = impl.pluginInputVariableParser.ParseSkopeoPluginInputVariables(step.InputVars, customTag, customTagId, buildImagePath, buildImagedockerRegistryId) if err != nil { impl.Logger.Errorw("error in parsing skopeo input variable", "err", err) return registryDestinationImageMap, registryCredentialMap, pluginArtifactStage, err @@ -742,7 +746,7 @@ func (impl *CiServiceImpl) GetEnvVariablesForSkopeoPlugin(preCiSteps []*bean2.St for _, step := range postCiSteps { if skopeoRefPluginId != 0 && step.RefPluginId == skopeoRefPluginId { // for Skopeo plugin parse destination images and save its data in image path reservation table - registryDestinationImageMap, registryCredentialMap, err = impl.pluginInputVariableParser.ParseSkopeoPluginInputVariables(step.InputVars, customTag, buildImagePath, buildImagedockerRegistryId) + registryDestinationImageMap, registryCredentialMap, err = impl.pluginInputVariableParser.ParseSkopeoPluginInputVariables(step.InputVars, customTag, customTagId, buildImagePath, buildImagedockerRegistryId) if err != nil { impl.Logger.Errorw("error in parsing skopeo input variable", "err", err) return registryDestinationImageMap, registryCredentialMap, pluginArtifactStage, err diff --git a/pkg/pipeline/CustomTagService.go b/pkg/pipeline/CustomTagService.go index 2bcf48def5..02d08b61e4 100644 --- a/pkg/pipeline/CustomTagService.go +++ b/pkg/pipeline/CustomTagService.go @@ -19,8 +19,9 @@ type CustomTagService interface { GenerateImagePath(entityKey int, entityValue string, dockerRegistryURL string, dockerRepo string) (*repository.ImagePathReservation, error) DeleteCustomTagIfExists(tag bean.CustomTag) error DeactivateImagePathReservation(id int) error - GetCustomTag(entityKey int, entityValue string) (*repository.CustomTag, error) + GetCustomTag(entityKey int, entityValue string) (*repository.CustomTag, string, error) ReserveImagePath(imagePath string, customTagId int) error + DeactivateImagePathReservationByImagePath(imagePaths []string) error } type CustomTagServiceImpl struct { @@ -103,7 +104,6 @@ func (impl *CustomTagServiceImpl) GenerateImagePath(entityKey int, entityValue s tag, err := validateAndConstructTag(customTagData) if err != nil { return nil, err - return nil, err } imagePath := fmt.Sprintf(bean2.ImagePathPattern, dockerRegistryURL, dockerRepo, tag) imagePathReservations, err := impl.customTagRepository.FindByImagePath(tx, imagePath) @@ -176,22 +176,24 @@ func isValidDockerImageTag(tag string) bool { return re.MatchString(tag) } -func (impl *CustomTagServiceImpl) GetCustomTag(entityKey int, entityValue string) (*repository.CustomTag, error) { +func (impl *CustomTagServiceImpl) GetCustomTag(entityKey int, entityValue string) (*repository.CustomTag, string, error) { connection := impl.customTagRepository.GetConnection() tx, err := connection.Begin() - customTag, err := impl.customTagRepository.IncrementAndFetchByEntityKeyAndValue(tx, entityKey, entityValue) - if err != nil { - return nil, err - } + customTagData, err := impl.customTagRepository.IncrementAndFetchByEntityKeyAndValue(tx, entityKey, entityValue) if err != nil { - return nil, err + return nil, "", err } err = tx.Commit() if err != nil { impl.Logger.Errorw("Error in fetching custom tag", "err", err) - return customTag, err + return customTagData, "", err } - return customTag, nil + + tag, err := validateAndConstructTag(customTagData) + if err != nil { + return nil, "", err + } + return customTagData, tag, nil } @@ -219,23 +221,26 @@ func (impl *CustomTagServiceImpl) ReserveImagePath(imagePath string, customTagId err = tx.Commit() if err != nil { impl.Logger.Errorw("Error in fetching custom tag", "err", err) - return nil + return err } return err } -func validateTag(imageTag string) error { - if len(imageTag) == 0 || len(imageTag) > 128 { - return fmt.Errorf("image tag should be of len 1-128 only, imageTag: %s", imageTag) +func (impl *CustomTagServiceImpl) DeactivateImagePathReservationByImagePath(imagePaths []string) error { + connection := impl.customTagRepository.GetConnection() + tx, err := connection.Begin() + if err != nil { + return nil } - allowedSymbols := ".abcdefghijklmnopqrstuvwxyz_ABCDEFGHIJKLMNOPQRSTUVWXYZ-0987654321" - allowedCharSet := make(map[int32]struct{}) - for _, c := range allowedSymbols { - allowedCharSet[c] = struct{}{} + err = impl.customTagRepository.DeactivateImagePathReservationByImagePaths(tx, imagePaths) + if err != nil { + impl.Logger.Errorw("error in marking image path unreserved") + return err } - firstChar := imageTag[0:1] - if firstChar == "." || firstChar == "-" { - return fmt.Errorf("image tag can not start with a period or a hyphen, imageTag: %s", imageTag) + err = tx.Commit() + if err != nil { + impl.Logger.Errorw("Error in fetching custom tag", "err", err) + return err } return nil } diff --git a/pkg/pipeline/WebhookService.go b/pkg/pipeline/WebhookService.go index 8e6a356869..9cacd9cb93 100644 --- a/pkg/pipeline/WebhookService.go +++ b/pkg/pipeline/WebhookService.go @@ -147,10 +147,20 @@ func (impl WebhookServiceImpl) HandleCiStepFailedEvent(ciPipelineId int, request } go func() { + //TODO: Ayush - remove plugin images from reservation table err := impl.customTagService.DeactivateImagePathReservation(savedWorkflow.ImagePathReservationId) if err != nil { impl.logger.Errorw("unable to deactivate impage_path_reservation ", err) } + for _, images := range request.PluginRegistryArtifactDetails { + if len(images) > 0 { + err = impl.customTagService.DeactivateImagePathReservationByImagePath(images) + if err != nil { + impl.logger.Errorw("unable to deactivate impage_path_reservation ", err) + } + } + } + }() go impl.WriteCIStepFailedEvent(pipeline, request, savedWorkflow) diff --git a/pkg/pipeline/WorkflowDagExecutor.go b/pkg/pipeline/WorkflowDagExecutor.go index 510a5d2516..8f205e1086 100644 --- a/pkg/pipeline/WorkflowDagExecutor.go +++ b/pkg/pipeline/WorkflowDagExecutor.go @@ -768,12 +768,16 @@ func (impl *WorkflowDagExecutorImpl) TriggerPreStage(ctx context.Context, cdWf * for _, step := range cdStageWorkflowRequest.PrePostDeploySteps { if skopeoRefPluginId != 0 && step.RefPluginId == skopeoRefPluginId { // for Skopeo plugin parse destination images and save its data in image path reservation table - customTag, err := impl.customTagService.GetCustomTag(bean3.EntityTypePreCD, strconv.Itoa(pipeline.Id)) + customTag, dockerImageTag, err := impl.customTagService.GetCustomTag(bean3.EntityTypePreCD, strconv.Itoa(pipeline.Id)) if err != nil && err != pg.ErrNoRows { impl.logger.Errorw("error in fetching custom tag by entity key and value for CD", "err", err) return err } - registryDestinationImageMap, registryCredentialMap, err := impl.pluginInputVariableParser.ParseSkopeoPluginInputVariables(step.InputVars, customTag, cdStageWorkflowRequest.CiArtifactDTO.Image, cdStageWorkflowRequest.DockerRegistryId) + var customTagId int + if customTag == nil { + customTagId = customTag.Id + } + registryDestinationImageMap, registryCredentialMap, err := impl.pluginInputVariableParser.ParseSkopeoPluginInputVariables(step.InputVars, dockerImageTag, customTagId, cdStageWorkflowRequest.CiArtifactDTO.Image, cdStageWorkflowRequest.DockerRegistryId) if err != nil { impl.logger.Errorw("error in parsing skopeo input variable", "err", err) return err @@ -911,12 +915,16 @@ func (impl *WorkflowDagExecutorImpl) TriggerPostStage(cdWf *pipelineConfig.CdWor for _, step := range cdStageWorkflowRequest.PostCiSteps { if skopeoRefPluginId != 0 && step.RefPluginId == skopeoRefPluginId { // for Skopeo plugin parse destination images and save its data in image path reservation table - customTag, err := impl.customTagService.GetCustomTag(bean3.EntityTypePostCD, strconv.Itoa(pipeline.Id)) + customTag, dockerImageTag, err := impl.customTagService.GetCustomTag(bean3.EntityTypePostCD, strconv.Itoa(pipeline.Id)) if err != nil && err != pg.ErrNoRows { impl.logger.Errorw("error in fetching custom tag by entity key and value for CD", "err", err) return err } - registryDestinationImageMap, registryCredentialMap, err := impl.pluginInputVariableParser.ParseSkopeoPluginInputVariables(step.InputVars, customTag, cdStageWorkflowRequest.CiArtifactDTO.Image, cdStageWorkflowRequest.DockerRegistryId) + var customTagId int + if customTag == nil { + customTagId = customTag.Id + } + registryDestinationImageMap, registryCredentialMap, err := impl.pluginInputVariableParser.ParseSkopeoPluginInputVariables(step.InputVars, dockerImageTag, customTagId, cdStageWorkflowRequest.CiArtifactDTO.Image, cdStageWorkflowRequest.DockerRegistryId) if err != nil { impl.logger.Errorw("error in parsing skopeo input variable", "err", err) return err diff --git a/pkg/pipeline/WorkflowUtils.go b/pkg/pipeline/WorkflowUtils.go index 03374429d9..5f682bc515 100644 --- a/pkg/pipeline/WorkflowUtils.go +++ b/pkg/pipeline/WorkflowUtils.go @@ -370,6 +370,7 @@ type WorkflowRequest struct { RegistryDestinationImageMap map[string][]string `json:"registryDestinationImageMap"` RegistryCredentialMap map[string]plugin.RegistryCredentials `json:"registryCredentialMap"` PluginArtifactStage string `json:"pluginArtifactStage"` + PushImageBeforePostCI bool `json:"pushImageBeforePostCI"` Type bean.WorkflowPipelineType Pipeline *pipelineConfig.Pipeline Env *repository2.Environment diff --git a/pkg/pipeline/pipelineStageVariableParser.go b/pkg/pipeline/pipelineStageVariableParser.go index 5620958ec0..fdedb79ec7 100644 --- a/pkg/pipeline/pipelineStageVariableParser.go +++ b/pkg/pipeline/pipelineStageVariableParser.go @@ -3,12 +3,10 @@ package pipeline import ( "errors" "fmt" - "github.com/devtron-labs/devtron/internal/sql/repository" "github.com/devtron-labs/devtron/pkg/pipeline/bean" "github.com/devtron-labs/devtron/pkg/plugin" "github.com/go-pg/pg" "go.uber.org/zap" - "strconv" "strings" ) @@ -25,7 +23,7 @@ const ( ) type PluginInputVariableParser interface { - ParseSkopeoPluginInputVariables(inputVariables []*bean.VariableObject, customTag *repository.CustomTag, pluginTriggerImage string, buildConfigurationRegistry string) (map[string][]string, map[string]plugin.RegistryCredentials, error) + ParseSkopeoPluginInputVariables(inputVariables []*bean.VariableObject, customTag string, customTagId int, pluginTriggerImage string, buildConfigurationRegistry string) (map[string][]string, map[string]plugin.RegistryCredentials, error) } type PluginInputVariableParserImpl struct { @@ -46,7 +44,7 @@ func NewPluginInputVariableParserImpl( } } -func (impl *PluginInputVariableParserImpl) ParseSkopeoPluginInputVariables(inputVariables []*bean.VariableObject, customTag *repository.CustomTag, pluginTriggerImage string, buildConfigurationRegistry string) (map[string][]string, map[string]plugin.RegistryCredentials, error) { +func (impl *PluginInputVariableParserImpl) ParseSkopeoPluginInputVariables(inputVariables []*bean.VariableObject, dockerImageTag string, customTagId int, pluginTriggerImage string, buildConfigurationRegistry string) (map[string][]string, map[string]plugin.RegistryCredentials, error) { var DestinationInfo, SourceRegistry, SourceImage string for _, ipVariable := range inputVariables { if ipVariable.Name == DESTINATION_INFO { @@ -67,7 +65,7 @@ func (impl *PluginInputVariableParserImpl) ParseSkopeoPluginInputVariables(input } } } - registryDestinationImageMap, registryCredentialMap, err := impl.getRegistryDetailsAndDestinationImagePathForSkopeo(customTag, SourceImage, SourceRegistry, DestinationInfo) + registryDestinationImageMap, registryCredentialMap, err := impl.getRegistryDetailsAndDestinationImagePathForSkopeo(dockerImageTag, customTagId, SourceImage, SourceRegistry, DestinationInfo) if err != nil { impl.logger.Errorw("Error in parsing skopeo input variables") return nil, nil, err @@ -75,14 +73,14 @@ func (impl *PluginInputVariableParserImpl) ParseSkopeoPluginInputVariables(input return registryDestinationImageMap, registryCredentialMap, nil } -func (impl *PluginInputVariableParserImpl) getRegistryDetailsAndDestinationImagePathForSkopeo(tag *repository.CustomTag, sourceImage string, sourceRegistry string, destinationInfo string) (registryDestinationImageMap map[string][]string, registryCredentialsMap map[string]plugin.RegistryCredentials, err error) { +func (impl *PluginInputVariableParserImpl) getRegistryDetailsAndDestinationImagePathForSkopeo(dockerImageTag string, tagId int, sourceImage string, sourceRegistry string, destinationInfo string) (registryDestinationImageMap map[string][]string, registryCredentialsMap map[string]plugin.RegistryCredentials, err error) { registryDestinationImageMap = make(map[string][]string) registryCredentialsMap = make(map[string]plugin.RegistryCredentials) - var sourceImageTag string - sourceSplit := strings.Split(sourceImage, ":") - sourceImageTag = sourceSplit[len(sourceSplit)-1] - + if len(dockerImageTag) == 0 { + sourceSplit := strings.Split(sourceImage, ":") + dockerImageTag = sourceSplit[len(sourceSplit)-1] + } //saving source registry credentials registryCredentials, err := impl.dockerRegistryConfig.FetchOneDockerAccount(sourceRegistry) if err != nil { @@ -117,16 +115,7 @@ func (impl *PluginInputVariableParserImpl) getRegistryDetailsAndDestinationImage for _, repositoryName := range repositoryValuesSplit { repositoryName = strings.Trim(repositoryName, " ") - var destinationImage string - var tagId int - if tag != nil && tag.Id > 0 { - tagId = tag.Id - } - if tagId > 0 { - destinationImage = fmt.Sprintf("%s/%s:%s", registryCredentials.RegistryURL, repositoryName, strconv.Itoa(tag.Id)) - } else { - destinationImage = fmt.Sprintf("%s/%s:%s", registryCredentials.RegistryURL, repositoryName, sourceImageTag) - } + destinationImage := fmt.Sprintf("%s/%s:%s", registryCredentials.RegistryURL, repositoryName, dockerImageTag) destinationImages = append(destinationImages, destinationImage) err = impl.customTagService.ReserveImagePath(destinationImage, tagId) if err != nil { @@ -134,7 +123,6 @@ func (impl *PluginInputVariableParserImpl) getRegistryDetailsAndDestinationImage return registryDestinationImageMap, registryCredentialsMap, err } } - registryDestinationImageMap[registryName] = destinationImages registryCredentialsMap[registryName] = plugin.RegistryCredentials{ RegistryType: string(registryCredentials.RegistryType), From d45c1da111e17b052f493b3903d425e18c6f1557 Mon Sep 17 00:00:00 2001 From: ayushmaheshwari Date: Fri, 3 Nov 2023 12:19:08 +0530 Subject: [PATCH 099/143] fix custom tag id --- pkg/pipeline/WorkflowDagExecutor.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/pipeline/WorkflowDagExecutor.go b/pkg/pipeline/WorkflowDagExecutor.go index 8f205e1086..ac21e71635 100644 --- a/pkg/pipeline/WorkflowDagExecutor.go +++ b/pkg/pipeline/WorkflowDagExecutor.go @@ -774,7 +774,7 @@ func (impl *WorkflowDagExecutorImpl) TriggerPreStage(ctx context.Context, cdWf * return err } var customTagId int - if customTag == nil { + if customTag != nil { customTagId = customTag.Id } registryDestinationImageMap, registryCredentialMap, err := impl.pluginInputVariableParser.ParseSkopeoPluginInputVariables(step.InputVars, dockerImageTag, customTagId, cdStageWorkflowRequest.CiArtifactDTO.Image, cdStageWorkflowRequest.DockerRegistryId) From ffe740f406f08c3194ec9456175da1761489acac Mon Sep 17 00:00:00 2001 From: ayushmaheshwari Date: Fri, 3 Nov 2023 13:45:35 +0530 Subject: [PATCH 100/143] custom tag fix and ci artifact api fix --- internal/sql/repository/CiArtifactsListingQueryBuilder.go | 2 +- pkg/pipeline/DeploymentPipelineConfigService.go | 5 ++++- ...artifact_refactor.down.sql => 185_skopeo_plugin.down.sql} | 0 .../{182_skopeo_plugin.up.sql => 185_skopeo_plugin.up.sql} | 0 ...peo_plugin.down.sql => 186_ci_artifact_refactor.down.sql} | 0 ...ifact_refactor.up.sql => 186_ci_artifact_refactor.up.sql} | 2 ++ 6 files changed, 7 insertions(+), 2 deletions(-) rename scripts/sql/{181_ci_artifact_refactor.down.sql => 185_skopeo_plugin.down.sql} (100%) rename scripts/sql/{182_skopeo_plugin.up.sql => 185_skopeo_plugin.up.sql} (100%) rename scripts/sql/{182_skopeo_plugin.down.sql => 186_ci_artifact_refactor.down.sql} (100%) rename scripts/sql/{181_ci_artifact_refactor.up.sql => 186_ci_artifact_refactor.up.sql} (84%) diff --git a/internal/sql/repository/CiArtifactsListingQueryBuilder.go b/internal/sql/repository/CiArtifactsListingQueryBuilder.go index e1d77589f6..99345c1cb3 100644 --- a/internal/sql/repository/CiArtifactsListingQueryBuilder.go +++ b/internal/sql/repository/CiArtifactsListingQueryBuilder.go @@ -15,7 +15,7 @@ func BuildQueryForParentTypeCIOrWebhook(listingFilterOpts bean.ArtifactsListFilt if listingFilterOpts.ParentStageType == bean.CI_WORKFLOW_TYPE { selectQuery := " SELECT cia.* " remainingQuery := " FROM ci_artifact cia" + - " INNER JOIN ci_pipeline cp ON (cp.id=cia.pipeline_id or (cp.id=cia.pipeline_id and cia.data_source='post_ci' ) )" + + " INNER JOIN ci_pipeline cp ON (cp.id=cia.pipeline_id or (cp.id=cia.component_id and cia.data_source='post_ci' ) )" + " INNER JOIN pipeline p ON p.ci_pipeline_id = cp.id and p.id=%v" + " WHERE " remainingQuery = fmt.Sprintf(remainingQuery, listingFilterOpts.PipelineId) diff --git a/pkg/pipeline/DeploymentPipelineConfigService.go b/pkg/pipeline/DeploymentPipelineConfigService.go index 884dce3d64..c317e3fdbe 100644 --- a/pkg/pipeline/DeploymentPipelineConfigService.go +++ b/pkg/pipeline/DeploymentPipelineConfigService.go @@ -430,7 +430,10 @@ func (impl *CdPipelineConfigServiceImpl) CreateCdPipelines(pipelineCreateRequest } func (impl *CdPipelineConfigServiceImpl) CDPipelineCustomTagDBOperations(pipeline *bean.CDPipelineConfigObject) error { - if pipeline.EnableCustomTag && pipeline.CustomTagObject == nil { + if !pipeline.EnableCustomTag { + return nil + } + if pipeline.EnableCustomTag && len(pipeline.CustomTagObject.TagPattern) == 0 { return fmt.Errorf("please provide custom tag data if tag is enabled") } if pipeline.CustomTagObject == nil && pipeline.CustomTagStage == nil { diff --git a/scripts/sql/181_ci_artifact_refactor.down.sql b/scripts/sql/185_skopeo_plugin.down.sql similarity index 100% rename from scripts/sql/181_ci_artifact_refactor.down.sql rename to scripts/sql/185_skopeo_plugin.down.sql diff --git a/scripts/sql/182_skopeo_plugin.up.sql b/scripts/sql/185_skopeo_plugin.up.sql similarity index 100% rename from scripts/sql/182_skopeo_plugin.up.sql rename to scripts/sql/185_skopeo_plugin.up.sql diff --git a/scripts/sql/182_skopeo_plugin.down.sql b/scripts/sql/186_ci_artifact_refactor.down.sql similarity index 100% rename from scripts/sql/182_skopeo_plugin.down.sql rename to scripts/sql/186_ci_artifact_refactor.down.sql diff --git a/scripts/sql/181_ci_artifact_refactor.up.sql b/scripts/sql/186_ci_artifact_refactor.up.sql similarity index 84% rename from scripts/sql/181_ci_artifact_refactor.up.sql rename to scripts/sql/186_ci_artifact_refactor.up.sql index f176377712..50131d6d8e 100644 --- a/scripts/sql/181_ci_artifact_refactor.up.sql +++ b/scripts/sql/186_ci_artifact_refactor.up.sql @@ -1,3 +1,5 @@ ALTER TABLE ci_artifact ADD COLUMN credentials_source_type VARCHAR(50); ALTER TABLE ci_artifact ADD COLUMN credentials_source_value VARCHAR(50); ALTER TABLE ci_artifact ADD COLUMN component_id integer; + +ALTER TABLE ci_artifact ADD COLUMN From e4619ea7e21a04ce2d615f00a4af1121a06c41d9 Mon Sep 17 00:00:00 2001 From: ayushmaheshwari Date: Fri, 3 Nov 2023 15:54:39 +0530 Subject: [PATCH 101/143] adding image_path_reservation_ids --- .../pipelineConfig/CiWorkflowRepository.go | 96 ++++++++++--------- pkg/pipeline/WebhookService.go | 2 +- pkg/pipeline/WorkflowDagExecutor.go | 1 + scripts/sql/186_ci_artifact_refactor.up.sql | 4 +- 4 files changed, 54 insertions(+), 49 deletions(-) diff --git a/internal/sql/repository/pipelineConfig/CiWorkflowRepository.go b/internal/sql/repository/pipelineConfig/CiWorkflowRepository.go index 4c33fe5025..776640ac01 100644 --- a/internal/sql/repository/pipelineConfig/CiWorkflowRepository.go +++ b/internal/sql/repository/pipelineConfig/CiWorkflowRepository.go @@ -56,56 +56,58 @@ type CiWorkflowRepositoryImpl struct { } type CiWorkflow struct { - tableName struct{} `sql:"ci_workflow" pg:",discard_unknown_columns"` - Id int `sql:"id,pk"` - Name string `sql:"name"` - Status string `sql:"status"` - PodStatus string `sql:"pod_status"` - Message string `sql:"message"` - StartedOn time.Time `sql:"started_on"` - FinishedOn time.Time `sql:"finished_on"` - CiPipelineId int `sql:"ci_pipeline_id"` - Namespace string `sql:"namespace"` - BlobStorageEnabled bool `sql:"blob_storage_enabled,notnull"` - LogLocation string `sql:"log_file_path"` - GitTriggers map[int]GitCommit `sql:"git_triggers"` - TriggeredBy int32 `sql:"triggered_by"` - CiArtifactLocation string `sql:"ci_artifact_location"` - PodName string `sql:"pod_name"` - CiBuildType string `sql:"ci_build_type"` - EnvironmentId int `sql:"environment_id"` - ImagePathReservationId int `sql:"image_path_reservation_id"` - ReferenceCiWorkflowId int `sql:"ref_ci_workflow_id"` - ParentCiWorkFlowId int `sql:"parent_ci_workflow_id"` - CiPipeline *CiPipeline + tableName struct{} `sql:"ci_workflow" pg:",discard_unknown_columns"` + Id int `sql:"id,pk"` + Name string `sql:"name"` + Status string `sql:"status"` + PodStatus string `sql:"pod_status"` + Message string `sql:"message"` + StartedOn time.Time `sql:"started_on"` + FinishedOn time.Time `sql:"finished_on"` + CiPipelineId int `sql:"ci_pipeline_id"` + Namespace string `sql:"namespace"` + BlobStorageEnabled bool `sql:"blob_storage_enabled,notnull"` + LogLocation string `sql:"log_file_path"` + GitTriggers map[int]GitCommit `sql:"git_triggers"` + TriggeredBy int32 `sql:"triggered_by"` + CiArtifactLocation string `sql:"ci_artifact_location"` + PodName string `sql:"pod_name"` + CiBuildType string `sql:"ci_build_type"` + EnvironmentId int `sql:"environment_id"` + ImagePathReservationId int `sql:"image_path_reservation_id"` + ReferenceCiWorkflowId int `sql:"ref_ci_workflow_id"` + ParentCiWorkFlowId int `sql:"parent_ci_workflow_id"` + ImagePathReservationIds []int `json:"image_path_reservation_ids"` + CiPipeline *CiPipeline } type WorkflowWithArtifact struct { - Id int `json:"id"` - Name string `json:"name"` - PodName string `json:"podName"` - Status string `json:"status"` - PodStatus string `json:"pod_status"` - Message string `json:"message"` - StartedOn time.Time `json:"started_on"` - FinishedOn time.Time `json:"finished_on"` - CiPipelineId int `json:"ci_pipeline_id"` - Namespace string `json:"namespace"` - LogFilePath string `json:"log_file_path"` - GitTriggers map[int]GitCommit `json:"git_triggers"` - TriggeredBy int32 `json:"triggered_by"` - EmailId string `json:"email_id"` - Image string `json:"image"` - CiArtifactLocation string `json:"ci_artifact_location"` - CiArtifactId int `json:"ci_artifact_d"` - BlobStorageEnabled bool `json:"blobStorageEnabled"` - CiBuildType string `json:"ci_build_type"` - IsArtifactUploaded bool `json:"is_artifact_uploaded"` - EnvironmentId int `json:"environmentId"` - EnvironmentName string `json:"environmentName"` - ImagePathReservationId int `json:"image_path_reservation_id"` - RefCiWorkflowId int `json:"referenceCiWorkflowId"` - ParentCiWorkflowId int `json:"parent_ci_workflow_id"` + Id int `json:"id"` + Name string `json:"name"` + PodName string `json:"podName"` + Status string `json:"status"` + PodStatus string `json:"pod_status"` + Message string `json:"message"` + StartedOn time.Time `json:"started_on"` + FinishedOn time.Time `json:"finished_on"` + CiPipelineId int `json:"ci_pipeline_id"` + Namespace string `json:"namespace"` + LogFilePath string `json:"log_file_path"` + GitTriggers map[int]GitCommit `json:"git_triggers"` + TriggeredBy int32 `json:"triggered_by"` + EmailId string `json:"email_id"` + Image string `json:"image"` + CiArtifactLocation string `json:"ci_artifact_location"` + CiArtifactId int `json:"ci_artifact_d"` + BlobStorageEnabled bool `json:"blobStorageEnabled"` + CiBuildType string `json:"ci_build_type"` + IsArtifactUploaded bool `json:"is_artifact_uploaded"` + EnvironmentId int `json:"environmentId"` + EnvironmentName string `json:"environmentName"` + ImagePathReservationId int `json:"image_path_reservation_id"` + RefCiWorkflowId int `json:"referenceCiWorkflowId"` + ParentCiWorkflowId int `json:"parent_ci_workflow_id"` + ImagePathReservationIds []int `json:"image_path_reservation_ids"` } type GitCommit struct { diff --git a/pkg/pipeline/WebhookService.go b/pkg/pipeline/WebhookService.go index 9cacd9cb93..773722d675 100644 --- a/pkg/pipeline/WebhookService.go +++ b/pkg/pipeline/WebhookService.go @@ -217,7 +217,7 @@ func (impl WebhookServiceImpl) HandleCiSuccessEvent(ciPipelineId int, request *C pluginArtifact := &repository.CiArtifact{ Image: image, ImageDigest: request.ImageDigest, - MaterialInfo: "", + MaterialInfo: string(materialJson), DataSource: request.PluginArtifactStage, ComponentId: pipeline.Id, PipelineId: pipeline.Id, diff --git a/pkg/pipeline/WorkflowDagExecutor.go b/pkg/pipeline/WorkflowDagExecutor.go index ac21e71635..9b2e379080 100644 --- a/pkg/pipeline/WorkflowDagExecutor.go +++ b/pkg/pipeline/WorkflowDagExecutor.go @@ -644,6 +644,7 @@ func (impl *WorkflowDagExecutorImpl) SavePluginArtifacts(ciArtifact *repository. pluginArtifact := &repository.CiArtifact{ Image: artifact, ImageDigest: ciArtifact.ImageDigest, + MaterialInfo: ciArtifact.MaterialInfo, DataSource: stage, ComponentId: pipelineId, CredentialsSourceType: repository.GLOBAL_CONTAINER_REGISTRY, diff --git a/scripts/sql/186_ci_artifact_refactor.up.sql b/scripts/sql/186_ci_artifact_refactor.up.sql index 50131d6d8e..84e26117ef 100644 --- a/scripts/sql/186_ci_artifact_refactor.up.sql +++ b/scripts/sql/186_ci_artifact_refactor.up.sql @@ -2,4 +2,6 @@ ALTER TABLE ci_artifact ADD COLUMN credentials_source_type VARCHAR(50); ALTER TABLE ci_artifact ADD COLUMN credentials_source_value VARCHAR(50); ALTER TABLE ci_artifact ADD COLUMN component_id integer; -ALTER TABLE ci_artifact ADD COLUMN +ALTER TABLE ci_workflow ADD COLUMN image_reservation_ids integer[]; + +UPDATE ci_workflow set image_reservation_ids=ARRAY[image_path_reservation] \ No newline at end of file From 0324adbfe25d16c09d39f1ff568d519078e4d739 Mon Sep 17 00:00:00 2001 From: ayushmaheshwari Date: Fri, 3 Nov 2023 17:45:04 +0530 Subject: [PATCH 102/143] code for deleting reserved tags --- .../pubsub/WorkflowStatusUpdateHandler.go | 8 +++++ .../sql/repository/CustomTagRepository.go | 10 ++++++ .../pipelineConfig/CdWorfkflowRepository.go | 1 + pkg/pipeline/CdHandler.go | 17 +++++++-- pkg/pipeline/CiHandler.go | 12 ++++--- pkg/pipeline/CiService.go | 24 ++++++++----- pkg/pipeline/CustomTagService.go | 36 ++++++++++++++----- pkg/pipeline/WebhookService.go | 16 +++------ pkg/pipeline/WorkflowDagExecutor.go | 18 ++++++++-- pkg/pipeline/pipelineStageVariableParser.go | 29 +++++++-------- scripts/sql/186_ci_artifact_refactor.up.sql | 4 ++- 11 files changed, 122 insertions(+), 53 deletions(-) diff --git a/api/router/pubsub/WorkflowStatusUpdateHandler.go b/api/router/pubsub/WorkflowStatusUpdateHandler.go index 0d1bbf9ec0..1a99997499 100644 --- a/api/router/pubsub/WorkflowStatusUpdateHandler.go +++ b/api/router/pubsub/WorkflowStatusUpdateHandler.go @@ -124,6 +124,14 @@ func (impl *WorkflowStatusUpdateHandlerImpl) SubscribeCD() error { impl.logger.Errorw("could not get wf runner", "err", err) return } + if wfrStatus == string(v1alpha1.NodeFailed) || wfrStatus == string(v1alpha1.NodeError) { + if len(wfr.ImageReservationIds) > 0 { + err := impl.cdHandler.DeactivateImageReservationPathsOnFailure(wfr.ImageReservationIds) + if err != nil { + impl.logger.Errorw("error in removing image path reservation ") + } + } + } if wfrStatus == string(v1alpha1.NodeSucceeded) || wfrStatus == string(v1alpha1.NodeFailed) || wfrStatus == string(v1alpha1.NodeError) { eventType := util.EventType(0) if wfrStatus == string(v1alpha1.NodeSucceeded) { diff --git a/internal/sql/repository/CustomTagRepository.go b/internal/sql/repository/CustomTagRepository.go index 92c77cf45c..c8b2bf472b 100644 --- a/internal/sql/repository/CustomTagRepository.go +++ b/internal/sql/repository/CustomTagRepository.go @@ -37,6 +37,7 @@ type ImageTagRepository interface { DeactivateImagePathReservation(id int) error FetchActiveCustomTagData(entityKey int, entityValue string) (*CustomTag, error) DeactivateImagePathReservationByImagePaths(tx *pg.Tx, imagePaths []string) error + DeactivateImagePathReservationByImagePathReservationIds(tx *pg.Tx, imagePathReservationIds []int) error } type ImageTagRepositoryImpl struct { @@ -116,3 +117,12 @@ func (impl *ImageTagRepositoryImpl) DeactivateImagePathReservationByImagePaths(t } return nil } + +func (impl *ImageTagRepositoryImpl) DeactivateImagePathReservationByImagePathReservationIds(tx *pg.Tx, imagePathReservationIds []int) error { + query := `UPDATE image_path_reservation set active=false where id in (?)` + _, err := tx.Exec(query, pg.In(imagePathReservationIds)) + if err != nil && err != pg.ErrNoRows { + return err + } + return nil +} diff --git a/internal/sql/repository/pipelineConfig/CdWorfkflowRepository.go b/internal/sql/repository/pipelineConfig/CdWorfkflowRepository.go index 37452c377c..8171bacc82 100644 --- a/internal/sql/repository/pipelineConfig/CdWorfkflowRepository.go +++ b/internal/sql/repository/pipelineConfig/CdWorfkflowRepository.go @@ -165,6 +165,7 @@ type CdWorkflowRunner struct { PodName string `sql:"pod_name"` BlobStorageEnabled bool `sql:"blob_storage_enabled,notnull"` RefCdWorkflowRunnerId int `sql:"ref_cd_workflow_runner_id,notnull"` + ImageReservationIds []int `sql:"image_reservation_ids"` CdWorkflow *CdWorkflow sql.AuditLog } diff --git a/pkg/pipeline/CdHandler.go b/pkg/pipeline/CdHandler.go index ad22659614..30930c8c43 100644 --- a/pkg/pipeline/CdHandler.go +++ b/pkg/pipeline/CdHandler.go @@ -83,6 +83,7 @@ type CdHandler interface { CheckAndSendArgoPipelineStatusSyncEventIfNeeded(pipelineId int, userId int32, isAppStoreApplication bool) FetchAppWorkflowStatusForTriggerViewForEnvironment(request resourceGroup2.ResourceGroupingRequest) ([]*pipelineConfig.CdWorkflowStatus, error) FetchAppDeploymentStatusForEnvironments(request resourceGroup2.ResourceGroupingRequest) ([]*pipelineConfig.AppDeploymentStatus, error) + DeactivateImageReservationPathsOnFailure(imagePathReservationIds []int) error } type CdHandlerImpl struct { @@ -119,9 +120,10 @@ type CdHandlerImpl struct { k8sUtil *k8s.K8sUtil workflowService WorkflowService config *CdConfig + customTagService CustomTagService } -func NewCdHandlerImpl(Logger *zap.SugaredLogger, userService user.UserService, cdWorkflowRepository pipelineConfig.CdWorkflowRepository, ciLogService CiLogService, ciArtifactRepository repository.CiArtifactRepository, ciPipelineMaterialRepository pipelineConfig.CiPipelineMaterialRepository, pipelineRepository pipelineConfig.PipelineRepository, envRepository repository2.EnvironmentRepository, ciWorkflowRepository pipelineConfig.CiWorkflowRepository, helmAppService client.HelmAppService, pipelineOverrideRepository chartConfig.PipelineOverrideRepository, workflowDagExecutor WorkflowDagExecutor, appListingService app.AppListingService, appListingRepository repository.AppListingRepository, pipelineStatusTimelineRepository pipelineConfig.PipelineStatusTimelineRepository, application application.ServiceClient, argoUserService argo.ArgoUserService, deploymentEventHandler app.DeploymentEventHandler, eventClient client2.EventClient, pipelineStatusTimelineResourcesService status.PipelineStatusTimelineResourcesService, pipelineStatusSyncDetailService status.PipelineStatusSyncDetailService, pipelineStatusTimelineService status.PipelineStatusTimelineService, appService app.AppService, appStatusService app_status.AppStatusService, enforcerUtil rbac.EnforcerUtil, installedAppRepository repository3.InstalledAppRepository, installedAppVersionHistoryRepository repository3.InstalledAppVersionHistoryRepository, appRepository app2.AppRepository, resourceGroupService resourceGroup2.ResourceGroupService, imageTaggingService ImageTaggingService, k8sUtil *k8s.K8sUtil, workflowService WorkflowService) *CdHandlerImpl { +func NewCdHandlerImpl(Logger *zap.SugaredLogger, userService user.UserService, cdWorkflowRepository pipelineConfig.CdWorkflowRepository, ciLogService CiLogService, ciArtifactRepository repository.CiArtifactRepository, ciPipelineMaterialRepository pipelineConfig.CiPipelineMaterialRepository, pipelineRepository pipelineConfig.PipelineRepository, envRepository repository2.EnvironmentRepository, ciWorkflowRepository pipelineConfig.CiWorkflowRepository, helmAppService client.HelmAppService, pipelineOverrideRepository chartConfig.PipelineOverrideRepository, workflowDagExecutor WorkflowDagExecutor, appListingService app.AppListingService, appListingRepository repository.AppListingRepository, pipelineStatusTimelineRepository pipelineConfig.PipelineStatusTimelineRepository, application application.ServiceClient, argoUserService argo.ArgoUserService, deploymentEventHandler app.DeploymentEventHandler, eventClient client2.EventClient, pipelineStatusTimelineResourcesService status.PipelineStatusTimelineResourcesService, pipelineStatusSyncDetailService status.PipelineStatusSyncDetailService, pipelineStatusTimelineService status.PipelineStatusTimelineService, appService app.AppService, appStatusService app_status.AppStatusService, enforcerUtil rbac.EnforcerUtil, installedAppRepository repository3.InstalledAppRepository, installedAppVersionHistoryRepository repository3.InstalledAppVersionHistoryRepository, appRepository app2.AppRepository, resourceGroupService resourceGroup2.ResourceGroupService, imageTaggingService ImageTaggingService, k8sUtil *k8s.K8sUtil, workflowService WorkflowService, customTagService CustomTagService) *CdHandlerImpl { cdh := &CdHandlerImpl{ Logger: Logger, userService: userService, @@ -155,6 +157,7 @@ func NewCdHandlerImpl(Logger *zap.SugaredLogger, userService user.UserService, c imageTaggingService: imageTaggingService, k8sUtil: k8sUtil, workflowService: workflowService, + customTagService: customTagService, } config, err := GetCdConfig() if err != nil { @@ -615,7 +618,13 @@ func (impl *CdHandlerImpl) CancelStage(workflowRunnerId int, userId int32) (int, impl.Logger.Error("cannot terminate wf runner", "err", err) return 0, err } - + if len(workflowRunner.ImageReservationIds) > 0 { + err := impl.customTagService.DeactivateImagePathReservationByImageIds(workflowRunner.ImageReservationIds) + if err != nil { + impl.Logger.Errorw("error in deactivating image path reservation ids", "err", err) + return 0, err + } + } workflowRunner.Status = WorkflowCancel workflowRunner.UpdatedOn = time.Now() workflowRunner.UpdatedBy = userId @@ -1576,3 +1585,7 @@ func (impl *CdHandlerImpl) FetchAppDeploymentStatusForEnvironments(request resou return deploymentStatuses, err } + +func (impl *CdHandlerImpl) DeactivateImageReservationPathsOnFailure(imagePathReservationIds []int) error { + return impl.customTagService.DeactivateImagePathReservationByImageIds(imagePathReservationIds) +} diff --git a/pkg/pipeline/CiHandler.go b/pkg/pipeline/CiHandler.go index 385036df79..0cc0165cad 100644 --- a/pkg/pipeline/CiHandler.go +++ b/pkg/pipeline/CiHandler.go @@ -688,11 +688,13 @@ func (impl *CiHandlerImpl) CancelBuild(workflowId int) (int, error) { impl.Logger.Errorw("cannot update deleted workflow status, but wf deleted", "err", err) return 0, err } - imagePathReservationId := workflow.ImagePathReservationId - err = impl.customTagService.DeactivateImagePathReservation(imagePathReservationId) - if err != nil { - impl.Logger.Errorw("error in marking image tag unreserved", "err", err) - return 0, err + imagePathReservationIds := workflow.ImagePathReservationIds + if len(imagePathReservationIds) > 0 { + err = impl.customTagService.DeactivateImagePathReservationByImageIds(imagePathReservationIds) + if err != nil { + impl.Logger.Errorw("error in marking image tag unreserved", "err", err) + return 0, err + } } return workflow.Id, nil } diff --git a/pkg/pipeline/CiService.go b/pkg/pipeline/CiService.go index a2d203cfa1..ab826b126e 100644 --- a/pkg/pipeline/CiService.go +++ b/pkg/pipeline/CiService.go @@ -481,15 +481,20 @@ func (impl *CiServiceImpl) buildWfRequestForCiPipeline(pipeline *pipelineConfig. } return nil, err } - savedWf.ImagePathReservationId = imagePathReservation.Id + savedWf.ImagePathReservationIds = []int{imagePathReservation.Id} //imagePath = docker.io/avd0/dashboard:fd23414b dockerImageTag = strings.Split(imagePathReservation.ImagePath, ":")[1] } else { dockerImageTag = impl.buildImageTag(commitHashes, pipeline.Id, savedWf.Id) } - registryDestinationImageMap, registryCredentialMap, pluginArtifactStage, err := impl.GetEnvVariablesForSkopeoPlugin( + + // skopeo plugin specific logic + registryDestinationImageMap, registryCredentialMap, pluginArtifactStage, imageReservationIds, err := impl.GetWorkflowRequestVariablesForSkopeoPlugin( preCiSteps, postCiSteps, dockerImageTag, customTag.Id, fmt.Sprintf(bean2.ImagePathPattern, pipeline.CiTemplate.DockerRegistry.RegistryURL, pipeline.CiTemplate.DockerRepository, dockerImageTag), pipeline.CiTemplate.DockerRegistry.Id) + savedWf.ImagePathReservationIds = append(savedWf.ImagePathReservationIds, imageReservationIds...) + // skopeo plugin logic ends + if err != nil { impl.Logger.Errorw("error in getting env variables for skopeo plugin") return nil, err @@ -723,22 +728,23 @@ func (impl *CiServiceImpl) buildWfRequestForCiPipeline(pipeline *pipelineConfig. return workflowRequest, nil } -func (impl *CiServiceImpl) GetEnvVariablesForSkopeoPlugin(preCiSteps []*bean2.StepObject, postCiSteps []*bean2.StepObject, customTag string, customTagId int, buildImagePath string, buildImagedockerRegistryId string) (map[string][]string, map[string]plugin.RegistryCredentials, string, error) { +func (impl *CiServiceImpl) GetWorkflowRequestVariablesForSkopeoPlugin(preCiSteps []*bean2.StepObject, postCiSteps []*bean2.StepObject, customTag string, customTagId int, buildImagePath string, buildImagedockerRegistryId string) (map[string][]string, map[string]plugin.RegistryCredentials, string, []int, error) { var registryDestinationImageMap map[string][]string var registryCredentialMap map[string]plugin.RegistryCredentials var pluginArtifactStage string + var imagePathReservationIds []int skopeoRefPluginId, err := impl.globalPluginService.GetRefPluginIdByRefPluginName(SKOPEO) if err != nil && err != pg.ErrNoRows { impl.Logger.Errorw("error in getting skopeo plugin id", "err", err) - return registryDestinationImageMap, registryCredentialMap, pluginArtifactStage, err + return registryDestinationImageMap, registryCredentialMap, pluginArtifactStage, imagePathReservationIds, err } for _, step := range preCiSteps { if skopeoRefPluginId != 0 && step.RefPluginId == skopeoRefPluginId { // for Skopeo plugin parse destination images and save its data in image path reservation table - registryDestinationImageMap, registryCredentialMap, err = impl.pluginInputVariableParser.ParseSkopeoPluginInputVariables(step.InputVars, customTag, customTagId, buildImagePath, buildImagedockerRegistryId) + registryDestinationImageMap, registryCredentialMap, imagePathReservationIds, err = impl.pluginInputVariableParser.ParseSkopeoPluginInputVariables(step.InputVars, customTag, customTagId, buildImagePath, buildImagedockerRegistryId) if err != nil { impl.Logger.Errorw("error in parsing skopeo input variable", "err", err) - return registryDestinationImageMap, registryCredentialMap, pluginArtifactStage, err + return registryDestinationImageMap, registryCredentialMap, pluginArtifactStage, imagePathReservationIds, err } pluginArtifactStage = repository5.PRE_CI } @@ -746,15 +752,15 @@ func (impl *CiServiceImpl) GetEnvVariablesForSkopeoPlugin(preCiSteps []*bean2.St for _, step := range postCiSteps { if skopeoRefPluginId != 0 && step.RefPluginId == skopeoRefPluginId { // for Skopeo plugin parse destination images and save its data in image path reservation table - registryDestinationImageMap, registryCredentialMap, err = impl.pluginInputVariableParser.ParseSkopeoPluginInputVariables(step.InputVars, customTag, customTagId, buildImagePath, buildImagedockerRegistryId) + registryDestinationImageMap, registryCredentialMap, imagePathReservationIds, err = impl.pluginInputVariableParser.ParseSkopeoPluginInputVariables(step.InputVars, customTag, customTagId, buildImagePath, buildImagedockerRegistryId) if err != nil { impl.Logger.Errorw("error in parsing skopeo input variable", "err", err) - return registryDestinationImageMap, registryCredentialMap, pluginArtifactStage, err + return registryDestinationImageMap, registryCredentialMap, pluginArtifactStage, imagePathReservationIds, err } pluginArtifactStage = repository5.POST_CI } } - return registryDestinationImageMap, registryCredentialMap, pluginArtifactStage, nil + return registryDestinationImageMap, registryCredentialMap, pluginArtifactStage, imagePathReservationIds, nil } func buildCiStepsDataFromDockerBuildScripts(dockerBuildScripts []*bean.CiScript) []*bean2.StepObject { diff --git a/pkg/pipeline/CustomTagService.go b/pkg/pipeline/CustomTagService.go index 02d08b61e4..4d50f07d99 100644 --- a/pkg/pipeline/CustomTagService.go +++ b/pkg/pipeline/CustomTagService.go @@ -20,8 +20,9 @@ type CustomTagService interface { DeleteCustomTagIfExists(tag bean.CustomTag) error DeactivateImagePathReservation(id int) error GetCustomTag(entityKey int, entityValue string) (*repository.CustomTag, string, error) - ReserveImagePath(imagePath string, customTagId int) error + ReserveImagePath(imagePath string, customTagId int) (*repository.ImagePathReservation, error) DeactivateImagePathReservationByImagePath(imagePaths []string) error + DeactivateImagePathReservationByImageIds(imagePathReservationIds []int) error } type CustomTagServiceImpl struct { @@ -197,18 +198,18 @@ func (impl *CustomTagServiceImpl) GetCustomTag(entityKey int, entityValue string } -func (impl *CustomTagServiceImpl) ReserveImagePath(imagePath string, customTagId int) error { +func (impl *CustomTagServiceImpl) ReserveImagePath(imagePath string, customTagId int) (*repository.ImagePathReservation, error) { connection := impl.customTagRepository.GetConnection() tx, err := connection.Begin() if err != nil { - return nil + return nil, err } imagePathReservations, err := impl.customTagRepository.FindByImagePath(tx, imagePath) if err != nil && err != pg.ErrNoRows { - return nil + return nil, err } if len(imagePathReservations) > 0 { - return nil + return nil, nil } imagePathReservation := &repository.ImagePathReservation{ ImagePath: imagePath, @@ -216,14 +217,14 @@ func (impl *CustomTagServiceImpl) ReserveImagePath(imagePath string, customTagId } err = impl.customTagRepository.InsertImagePath(tx, imagePathReservation) if err != nil { - return nil + return imagePathReservation, err } err = tx.Commit() if err != nil { impl.Logger.Errorw("Error in fetching custom tag", "err", err) - return err + return imagePathReservation, err } - return err + return imagePathReservation, err } func (impl *CustomTagServiceImpl) DeactivateImagePathReservationByImagePath(imagePaths []string) error { @@ -244,3 +245,22 @@ func (impl *CustomTagServiceImpl) DeactivateImagePathReservationByImagePath(imag } return nil } + +func (impl *CustomTagServiceImpl) DeactivateImagePathReservationByImageIds(imagePathReservationIds []int) error { + connection := impl.customTagRepository.GetConnection() + tx, err := connection.Begin() + if err != nil { + return nil + } + err = impl.customTagRepository.DeactivateImagePathReservationByImagePathReservationIds(tx, imagePathReservationIds) + if err != nil { + impl.Logger.Errorw("error in marking image path unreserved") + return err + } + err = tx.Commit() + if err != nil { + impl.Logger.Errorw("Error in fetching custom tag", "err", err) + return err + } + return nil +} diff --git a/pkg/pipeline/WebhookService.go b/pkg/pipeline/WebhookService.go index 773722d675..9c62ac8c23 100644 --- a/pkg/pipeline/WebhookService.go +++ b/pkg/pipeline/WebhookService.go @@ -147,20 +147,12 @@ func (impl WebhookServiceImpl) HandleCiStepFailedEvent(ciPipelineId int, request } go func() { - //TODO: Ayush - remove plugin images from reservation table - err := impl.customTagService.DeactivateImagePathReservation(savedWorkflow.ImagePathReservationId) - if err != nil { - impl.logger.Errorw("unable to deactivate impage_path_reservation ", err) - } - for _, images := range request.PluginRegistryArtifactDetails { - if len(images) > 0 { - err = impl.customTagService.DeactivateImagePathReservationByImagePath(images) - if err != nil { - impl.logger.Errorw("unable to deactivate impage_path_reservation ", err) - } + if len(savedWorkflow.ImagePathReservationIds) > 0 { + err = impl.customTagService.DeactivateImagePathReservationByImageIds(savedWorkflow.ImagePathReservationIds) + if err != nil { + impl.logger.Errorw("unable to deactivate impage_path_reservation ", err) } } - }() go impl.WriteCIStepFailedEvent(pipeline, request, savedWorkflow) diff --git a/pkg/pipeline/WorkflowDagExecutor.go b/pkg/pipeline/WorkflowDagExecutor.go index 9b2e379080..988ec3ab2d 100644 --- a/pkg/pipeline/WorkflowDagExecutor.go +++ b/pkg/pipeline/WorkflowDagExecutor.go @@ -778,16 +778,22 @@ func (impl *WorkflowDagExecutorImpl) TriggerPreStage(ctx context.Context, cdWf * if customTag != nil { customTagId = customTag.Id } - registryDestinationImageMap, registryCredentialMap, err := impl.pluginInputVariableParser.ParseSkopeoPluginInputVariables(step.InputVars, dockerImageTag, customTagId, cdStageWorkflowRequest.CiArtifactDTO.Image, cdStageWorkflowRequest.DockerRegistryId) + registryDestinationImageMap, registryCredentialMap, imagePathReservationIds, err := impl.pluginInputVariableParser.ParseSkopeoPluginInputVariables(step.InputVars, dockerImageTag, customTagId, cdStageWorkflowRequest.CiArtifactDTO.Image, cdStageWorkflowRequest.DockerRegistryId) if err != nil { impl.logger.Errorw("error in parsing skopeo input variable", "err", err) return err } + runner.ImageReservationIds = imagePathReservationIds + err = impl.cdWorkflowRepository.UpdateWorkFlowRunner(runner) + if err != nil { + impl.logger.Errorw("error in updating image path reservation ") + } cdStageWorkflowRequest.RegistryDestinationImageMap = registryDestinationImageMap cdStageWorkflowRequest.RegistryCredentialMap = registryCredentialMap cdStageWorkflowRequest.PluginArtifactStage = repository.PRE_CD } } + _, span = otel.Tracer("orchestrator").Start(ctx, "cdWorkflowService.SubmitWorkflow") cdStageWorkflowRequest.Pipeline = pipeline cdStageWorkflowRequest.Env = env @@ -913,6 +919,8 @@ func (impl *WorkflowDagExecutorImpl) TriggerPostStage(cdWf *pipelineConfig.CdWor impl.logger.Errorw("error in getting skopeo plugin id", "err", err) return err } + + var pluginImagePathReservationIds []int for _, step := range cdStageWorkflowRequest.PostCiSteps { if skopeoRefPluginId != 0 && step.RefPluginId == skopeoRefPluginId { // for Skopeo plugin parse destination images and save its data in image path reservation table @@ -925,11 +933,12 @@ func (impl *WorkflowDagExecutorImpl) TriggerPostStage(cdWf *pipelineConfig.CdWor if customTag == nil { customTagId = customTag.Id } - registryDestinationImageMap, registryCredentialMap, err := impl.pluginInputVariableParser.ParseSkopeoPluginInputVariables(step.InputVars, dockerImageTag, customTagId, cdStageWorkflowRequest.CiArtifactDTO.Image, cdStageWorkflowRequest.DockerRegistryId) + registryDestinationImageMap, registryCredentialMap, imagePathReservationIds, err := impl.pluginInputVariableParser.ParseSkopeoPluginInputVariables(step.InputVars, dockerImageTag, customTagId, cdStageWorkflowRequest.CiArtifactDTO.Image, cdStageWorkflowRequest.DockerRegistryId) if err != nil { impl.logger.Errorw("error in parsing skopeo input variable", "err", err) return err } + pluginImagePathReservationIds = imagePathReservationIds cdStageWorkflowRequest.RegistryDestinationImageMap = registryDestinationImageMap cdStageWorkflowRequest.RegistryCredentialMap = registryCredentialMap cdStageWorkflowRequest.PluginArtifactStage = repository.POST_CD @@ -946,6 +955,11 @@ func (impl *WorkflowDagExecutorImpl) TriggerPostStage(cdWf *pipelineConfig.CdWor impl.logger.Errorw("error in getting wfr by workflowId and runnerType", "err", err, "wfId", cdWf.Id) return err } + wfr.ImageReservationIds = pluginImagePathReservationIds + err = impl.cdWorkflowRepository.UpdateWorkFlowRunner(&wfr) + if err != nil { + impl.logger.Error("error in updating image path reservation ids in cd workflow runner", "err", "err") + } event := impl.eventFactory.Build(util2.Trigger, &pipeline.Id, pipeline.AppId, &pipeline.EnvironmentId, util2.CD) impl.logger.Debugw("event Cd Post Trigger", "event", event) diff --git a/pkg/pipeline/pipelineStageVariableParser.go b/pkg/pipeline/pipelineStageVariableParser.go index fdedb79ec7..19d066cf42 100644 --- a/pkg/pipeline/pipelineStageVariableParser.go +++ b/pkg/pipeline/pipelineStageVariableParser.go @@ -23,7 +23,7 @@ const ( ) type PluginInputVariableParser interface { - ParseSkopeoPluginInputVariables(inputVariables []*bean.VariableObject, customTag string, customTagId int, pluginTriggerImage string, buildConfigurationRegistry string) (map[string][]string, map[string]plugin.RegistryCredentials, error) + ParseSkopeoPluginInputVariables(inputVariables []*bean.VariableObject, customTag string, customTagId int, pluginTriggerImage string, buildConfigurationRegistry string) (map[string][]string, map[string]plugin.RegistryCredentials, []int, error) } type PluginInputVariableParserImpl struct { @@ -44,7 +44,7 @@ func NewPluginInputVariableParserImpl( } } -func (impl *PluginInputVariableParserImpl) ParseSkopeoPluginInputVariables(inputVariables []*bean.VariableObject, dockerImageTag string, customTagId int, pluginTriggerImage string, buildConfigurationRegistry string) (map[string][]string, map[string]plugin.RegistryCredentials, error) { +func (impl *PluginInputVariableParserImpl) ParseSkopeoPluginInputVariables(inputVariables []*bean.VariableObject, dockerImageTag string, customTagId int, pluginTriggerImage string, buildConfigurationRegistry string) (map[string][]string, map[string]plugin.RegistryCredentials, []int, error) { var DestinationInfo, SourceRegistry, SourceImage string for _, ipVariable := range inputVariables { if ipVariable.Name == DESTINATION_INFO { @@ -53,7 +53,7 @@ func (impl *PluginInputVariableParserImpl) ParseSkopeoPluginInputVariables(input if len(pluginTriggerImage) == 0 { if len(ipVariable.Value) == 0 { impl.logger.Errorw("No image provided in source or during trigger time") - return nil, nil, errors.New("no image provided in source or during trigger time") + return nil, nil, nil, errors.New("no image provided in source or during trigger time") } SourceInfo := ipVariable.Value SourceInfoSplit := strings.Split(SourceInfo, "|") @@ -65,18 +65,18 @@ func (impl *PluginInputVariableParserImpl) ParseSkopeoPluginInputVariables(input } } } - registryDestinationImageMap, registryCredentialMap, err := impl.getRegistryDetailsAndDestinationImagePathForSkopeo(dockerImageTag, customTagId, SourceImage, SourceRegistry, DestinationInfo) + registryDestinationImageMap, registryCredentialMap, imagePathReservationIds, err := impl.getRegistryDetailsAndDestinationImagePathForSkopeo(dockerImageTag, customTagId, SourceImage, SourceRegistry, DestinationInfo) if err != nil { impl.logger.Errorw("Error in parsing skopeo input variables") - return nil, nil, err + return nil, nil, nil, err } - return registryDestinationImageMap, registryCredentialMap, nil + return registryDestinationImageMap, registryCredentialMap, imagePathReservationIds, nil } -func (impl *PluginInputVariableParserImpl) getRegistryDetailsAndDestinationImagePathForSkopeo(dockerImageTag string, tagId int, sourceImage string, sourceRegistry string, destinationInfo string) (registryDestinationImageMap map[string][]string, registryCredentialsMap map[string]plugin.RegistryCredentials, err error) { +func (impl *PluginInputVariableParserImpl) getRegistryDetailsAndDestinationImagePathForSkopeo(dockerImageTag string, tagId int, sourceImage string, sourceRegistry string, destinationInfo string) (registryDestinationImageMap map[string][]string, registryCredentialsMap map[string]plugin.RegistryCredentials, imagePathReservationIds []int, err error) { registryDestinationImageMap = make(map[string][]string) registryCredentialsMap = make(map[string]plugin.RegistryCredentials) - + imagePathReservationIds = make([]int, 0) if len(dockerImageTag) == 0 { sourceSplit := strings.Split(sourceImage, ":") dockerImageTag = sourceSplit[len(sourceSplit)-1] @@ -85,7 +85,7 @@ func (impl *PluginInputVariableParserImpl) getRegistryDetailsAndDestinationImage registryCredentials, err := impl.dockerRegistryConfig.FetchOneDockerAccount(sourceRegistry) if err != nil { impl.logger.Errorw("error in fetching registry details by registry name", "err", err) - return registryDestinationImageMap, registryCredentialsMap, err + return registryDestinationImageMap, registryCredentialsMap, imagePathReservationIds, err } registryCredentialsMap["SOURCE_REGISTRY_CREDENTIAL"] = plugin.RegistryCredentials{ RegistryType: string(registryCredentials.RegistryType), @@ -105,9 +105,9 @@ func (impl *PluginInputVariableParserImpl) getRegistryDetailsAndDestinationImage if err != nil { impl.logger.Errorw("error in fetching registry details by registry name", "err", err) if err == pg.ErrNoRows { - return registryDestinationImageMap, registryCredentialsMap, fmt.Errorf("invalid registry name: registry details not found in global container registries") + return registryDestinationImageMap, registryCredentialsMap, imagePathReservationIds, fmt.Errorf("invalid registry name: registry details not found in global container registries") } - return registryDestinationImageMap, registryCredentialsMap, err + return registryDestinationImageMap, registryCredentialsMap, imagePathReservationIds, err } var destinationImages []string destinationRepositoryValues := registryRepoSplit[1] @@ -117,11 +117,12 @@ func (impl *PluginInputVariableParserImpl) getRegistryDetailsAndDestinationImage repositoryName = strings.Trim(repositoryName, " ") destinationImage := fmt.Sprintf("%s/%s:%s", registryCredentials.RegistryURL, repositoryName, dockerImageTag) destinationImages = append(destinationImages, destinationImage) - err = impl.customTagService.ReserveImagePath(destinationImage, tagId) + imagePathReservationData, err := impl.customTagService.ReserveImagePath(destinationImage, tagId) if err != nil { impl.logger.Errorw("Error in marking custom tag reserved", "err", err) - return registryDestinationImageMap, registryCredentialsMap, err + return registryDestinationImageMap, registryCredentialsMap, imagePathReservationIds, err } + imagePathReservationIds = append(imagePathReservationIds, imagePathReservationData.Id) } registryDestinationImageMap[registryName] = destinationImages registryCredentialsMap[registryName] = plugin.RegistryCredentials{ @@ -135,5 +136,5 @@ func (impl *PluginInputVariableParserImpl) getRegistryDetailsAndDestinationImage } } //adding source registry details - return registryDestinationImageMap, registryCredentialsMap, nil + return registryDestinationImageMap, registryCredentialsMap, imagePathReservationIds, nil } diff --git a/scripts/sql/186_ci_artifact_refactor.up.sql b/scripts/sql/186_ci_artifact_refactor.up.sql index 84e26117ef..cb8e60449a 100644 --- a/scripts/sql/186_ci_artifact_refactor.up.sql +++ b/scripts/sql/186_ci_artifact_refactor.up.sql @@ -4,4 +4,6 @@ ALTER TABLE ci_artifact ADD COLUMN component_id integer; ALTER TABLE ci_workflow ADD COLUMN image_reservation_ids integer[]; -UPDATE ci_workflow set image_reservation_ids=ARRAY[image_path_reservation] \ No newline at end of file +UPDATE ci_workflow set image_reservation_ids=ARRAY[image_path_reservation] + +ALTER TABLE ci_workflow_runner ADD COLUMN image_reservation_ids integer[]; \ No newline at end of file From 2bf3ede8b1dadf4128b774810a1f42f412f7b13e Mon Sep 17 00:00:00 2001 From: ayushmaheshwari Date: Fri, 3 Nov 2023 18:26:08 +0530 Subject: [PATCH 103/143] fix sql object --- .../pubsub/WorkflowStatusUpdateHandler.go | 4 +- .../pipelineConfig/CdWorfkflowRepository.go | 38 +++++++++---------- .../pipelineConfig/CiWorkflowRepository.go | 2 +- pkg/pipeline/CdHandler.go | 4 +- pkg/pipeline/WorkflowDagExecutor.go | 4 +- scripts/sql/186_ci_artifact_refactor.up.sql | 6 +-- wire_gen.go | 2 +- 7 files changed, 30 insertions(+), 30 deletions(-) diff --git a/api/router/pubsub/WorkflowStatusUpdateHandler.go b/api/router/pubsub/WorkflowStatusUpdateHandler.go index 1a99997499..592e6e572c 100644 --- a/api/router/pubsub/WorkflowStatusUpdateHandler.go +++ b/api/router/pubsub/WorkflowStatusUpdateHandler.go @@ -125,8 +125,8 @@ func (impl *WorkflowStatusUpdateHandlerImpl) SubscribeCD() error { return } if wfrStatus == string(v1alpha1.NodeFailed) || wfrStatus == string(v1alpha1.NodeError) { - if len(wfr.ImageReservationIds) > 0 { - err := impl.cdHandler.DeactivateImageReservationPathsOnFailure(wfr.ImageReservationIds) + if len(wfr.ImagePathReservationIds) > 0 { + err := impl.cdHandler.DeactivateImageReservationPathsOnFailure(wfr.ImagePathReservationIds) if err != nil { impl.logger.Errorw("error in removing image path reservation ") } diff --git a/internal/sql/repository/pipelineConfig/CdWorfkflowRepository.go b/internal/sql/repository/pipelineConfig/CdWorfkflowRepository.go index 8171bacc82..ad79d2b8d8 100644 --- a/internal/sql/repository/pipelineConfig/CdWorfkflowRepository.go +++ b/internal/sql/repository/pipelineConfig/CdWorfkflowRepository.go @@ -148,25 +148,25 @@ type CdWorkflowRunnerWithExtraFields struct { } type CdWorkflowRunner struct { - tableName struct{} `sql:"cd_workflow_runner" pg:",discard_unknown_columns"` - Id int `sql:"id,pk"` - Name string `sql:"name"` - WorkflowType bean.WorkflowType `sql:"workflow_type"` //pre,post,deploy - ExecutorType WorkflowExecutorType `sql:"executor_type"` //awf, system - Status string `sql:"status"` - PodStatus string `sql:"pod_status"` - Message string `sql:"message"` - StartedOn time.Time `sql:"started_on"` - FinishedOn time.Time `sql:"finished_on"` - Namespace string `sql:"namespace"` - LogLocation string `sql:"log_file_path"` - TriggeredBy int32 `sql:"triggered_by"` - CdWorkflowId int `sql:"cd_workflow_id"` - PodName string `sql:"pod_name"` - BlobStorageEnabled bool `sql:"blob_storage_enabled,notnull"` - RefCdWorkflowRunnerId int `sql:"ref_cd_workflow_runner_id,notnull"` - ImageReservationIds []int `sql:"image_reservation_ids"` - CdWorkflow *CdWorkflow + tableName struct{} `sql:"cd_workflow_runner" pg:",discard_unknown_columns"` + Id int `sql:"id,pk"` + Name string `sql:"name"` + WorkflowType bean.WorkflowType `sql:"workflow_type"` //pre,post,deploy + ExecutorType WorkflowExecutorType `sql:"executor_type"` //awf, system + Status string `sql:"status"` + PodStatus string `sql:"pod_status"` + Message string `sql:"message"` + StartedOn time.Time `sql:"started_on"` + FinishedOn time.Time `sql:"finished_on"` + Namespace string `sql:"namespace"` + LogLocation string `sql:"log_file_path"` + TriggeredBy int32 `sql:"triggered_by"` + CdWorkflowId int `sql:"cd_workflow_id"` + PodName string `sql:"pod_name"` + BlobStorageEnabled bool `sql:"blob_storage_enabled,notnull"` + RefCdWorkflowRunnerId int `sql:"ref_cd_workflow_runner_id,notnull"` + ImagePathReservationIds []int `sql:"image_path_reservation_ids" pg:",array"` + CdWorkflow *CdWorkflow sql.AuditLog } diff --git a/internal/sql/repository/pipelineConfig/CiWorkflowRepository.go b/internal/sql/repository/pipelineConfig/CiWorkflowRepository.go index 776640ac01..0b77fb86f9 100644 --- a/internal/sql/repository/pipelineConfig/CiWorkflowRepository.go +++ b/internal/sql/repository/pipelineConfig/CiWorkflowRepository.go @@ -77,7 +77,7 @@ type CiWorkflow struct { ImagePathReservationId int `sql:"image_path_reservation_id"` ReferenceCiWorkflowId int `sql:"ref_ci_workflow_id"` ParentCiWorkFlowId int `sql:"parent_ci_workflow_id"` - ImagePathReservationIds []int `json:"image_path_reservation_ids"` + ImagePathReservationIds []int `sql:"image_path_reservation_ids" pg:",array"` CiPipeline *CiPipeline } diff --git a/pkg/pipeline/CdHandler.go b/pkg/pipeline/CdHandler.go index 30930c8c43..1333d9ce95 100644 --- a/pkg/pipeline/CdHandler.go +++ b/pkg/pipeline/CdHandler.go @@ -618,8 +618,8 @@ func (impl *CdHandlerImpl) CancelStage(workflowRunnerId int, userId int32) (int, impl.Logger.Error("cannot terminate wf runner", "err", err) return 0, err } - if len(workflowRunner.ImageReservationIds) > 0 { - err := impl.customTagService.DeactivateImagePathReservationByImageIds(workflowRunner.ImageReservationIds) + if len(workflowRunner.ImagePathReservationIds) > 0 { + err := impl.customTagService.DeactivateImagePathReservationByImageIds(workflowRunner.ImagePathReservationIds) if err != nil { impl.Logger.Errorw("error in deactivating image path reservation ids", "err", err) return 0, err diff --git a/pkg/pipeline/WorkflowDagExecutor.go b/pkg/pipeline/WorkflowDagExecutor.go index 988ec3ab2d..b627c384ee 100644 --- a/pkg/pipeline/WorkflowDagExecutor.go +++ b/pkg/pipeline/WorkflowDagExecutor.go @@ -783,7 +783,7 @@ func (impl *WorkflowDagExecutorImpl) TriggerPreStage(ctx context.Context, cdWf * impl.logger.Errorw("error in parsing skopeo input variable", "err", err) return err } - runner.ImageReservationIds = imagePathReservationIds + runner.ImagePathReservationIds = imagePathReservationIds err = impl.cdWorkflowRepository.UpdateWorkFlowRunner(runner) if err != nil { impl.logger.Errorw("error in updating image path reservation ") @@ -955,7 +955,7 @@ func (impl *WorkflowDagExecutorImpl) TriggerPostStage(cdWf *pipelineConfig.CdWor impl.logger.Errorw("error in getting wfr by workflowId and runnerType", "err", err, "wfId", cdWf.Id) return err } - wfr.ImageReservationIds = pluginImagePathReservationIds + wfr.ImagePathReservationIds = pluginImagePathReservationIds err = impl.cdWorkflowRepository.UpdateWorkFlowRunner(&wfr) if err != nil { impl.logger.Error("error in updating image path reservation ids in cd workflow runner", "err", "err") diff --git a/scripts/sql/186_ci_artifact_refactor.up.sql b/scripts/sql/186_ci_artifact_refactor.up.sql index cb8e60449a..12c59267cd 100644 --- a/scripts/sql/186_ci_artifact_refactor.up.sql +++ b/scripts/sql/186_ci_artifact_refactor.up.sql @@ -2,8 +2,8 @@ ALTER TABLE ci_artifact ADD COLUMN credentials_source_type VARCHAR(50); ALTER TABLE ci_artifact ADD COLUMN credentials_source_value VARCHAR(50); ALTER TABLE ci_artifact ADD COLUMN component_id integer; -ALTER TABLE ci_workflow ADD COLUMN image_reservation_ids integer[]; +ALTER TABLE ci_workflow ADD COLUMN image_path_reservation_ids integer[]; -UPDATE ci_workflow set image_reservation_ids=ARRAY[image_path_reservation] +UPDATE ci_workflow set image_path_reservation_ids=ARRAY["image_path_reservation_id"] where image_path_reservation_id is not NULL; -ALTER TABLE ci_workflow_runner ADD COLUMN image_reservation_ids integer[]; \ No newline at end of file +ALTER TABLE ci_workflow_runner ADD COLUMN image_path_reservation_ids integer[]; \ No newline at end of file diff --git a/wire_gen.go b/wire_gen.go index d7a2246978..d6be5d4115 100644 --- a/wire_gen.go +++ b/wire_gen.go @@ -509,7 +509,7 @@ func InitializeApp() (*App, error) { linkoutsRepositoryImpl := repository.NewLinkoutsRepositoryImpl(sugaredLogger, db) appListingServiceImpl := app2.NewAppListingServiceImpl(sugaredLogger, appListingRepositoryImpl, applicationServiceClientImpl, appRepositoryImpl, appListingViewBuilderImpl, pipelineRepositoryImpl, linkoutsRepositoryImpl, appLevelMetricsRepositoryImpl, envLevelAppMetricsRepositoryImpl, cdWorkflowRepositoryImpl, pipelineOverrideRepositoryImpl, environmentRepositoryImpl, argoUserServiceImpl, envConfigOverrideRepositoryImpl, chartRepositoryImpl, ciPipelineRepositoryImpl, dockerRegistryIpsConfigServiceImpl) deploymentEventHandlerImpl := app2.NewDeploymentEventHandlerImpl(sugaredLogger, appListingServiceImpl, eventRESTClientImpl, eventSimpleFactoryImpl) - cdHandlerImpl := pipeline.NewCdHandlerImpl(sugaredLogger, userServiceImpl, cdWorkflowRepositoryImpl, ciLogServiceImpl, ciArtifactRepositoryImpl, ciPipelineMaterialRepositoryImpl, pipelineRepositoryImpl, environmentRepositoryImpl, ciWorkflowRepositoryImpl, helmAppServiceImpl, pipelineOverrideRepositoryImpl, workflowDagExecutorImpl, appListingServiceImpl, appListingRepositoryImpl, pipelineStatusTimelineRepositoryImpl, applicationServiceClientImpl, argoUserServiceImpl, deploymentEventHandlerImpl, eventRESTClientImpl, pipelineStatusTimelineResourcesServiceImpl, pipelineStatusSyncDetailServiceImpl, pipelineStatusTimelineServiceImpl, appServiceImpl, appStatusServiceImpl, enforcerUtilImpl, installedAppRepositoryImpl, installedAppVersionHistoryRepositoryImpl, appRepositoryImpl, resourceGroupServiceImpl, imageTaggingServiceImpl, k8sUtil, workflowServiceImpl) + cdHandlerImpl := pipeline.NewCdHandlerImpl(sugaredLogger, userServiceImpl, cdWorkflowRepositoryImpl, ciLogServiceImpl, ciArtifactRepositoryImpl, ciPipelineMaterialRepositoryImpl, pipelineRepositoryImpl, environmentRepositoryImpl, ciWorkflowRepositoryImpl, helmAppServiceImpl, pipelineOverrideRepositoryImpl, workflowDagExecutorImpl, appListingServiceImpl, appListingRepositoryImpl, pipelineStatusTimelineRepositoryImpl, applicationServiceClientImpl, argoUserServiceImpl, deploymentEventHandlerImpl, eventRESTClientImpl, pipelineStatusTimelineResourcesServiceImpl, pipelineStatusSyncDetailServiceImpl, pipelineStatusTimelineServiceImpl, appServiceImpl, appStatusServiceImpl, enforcerUtilImpl, installedAppRepositoryImpl, installedAppVersionHistoryRepositoryImpl, appRepositoryImpl, resourceGroupServiceImpl, imageTaggingServiceImpl, k8sUtil, workflowServiceImpl, customTagServiceImpl) appWorkflowServiceImpl := appWorkflow2.NewAppWorkflowServiceImpl(sugaredLogger, appWorkflowRepositoryImpl, ciCdPipelineOrchestratorImpl, ciPipelineRepositoryImpl, pipelineRepositoryImpl, enforcerUtilImpl, resourceGroupServiceImpl) appCloneServiceImpl := appClone.NewAppCloneServiceImpl(sugaredLogger, pipelineBuilderImpl, materialRepositoryImpl, chartServiceImpl, configMapServiceImpl, appWorkflowServiceImpl, appListingServiceImpl, propertiesConfigServiceImpl, ciTemplateOverrideRepositoryImpl, pipelineStageServiceImpl, ciTemplateServiceImpl, appRepositoryImpl, ciPipelineRepositoryImpl, pipelineRepositoryImpl, appWorkflowRepositoryImpl) deploymentTemplateRepositoryImpl := repository.NewDeploymentTemplateRepositoryImpl(db, sugaredLogger) From a34e214fab0529045b44ef21483eeffda6edd503 Mon Sep 17 00:00:00 2001 From: ayushmaheshwari Date: Fri, 3 Nov 2023 18:40:57 +0530 Subject: [PATCH 104/143] adding array field in sql object --- internal/sql/repository/pipelineConfig/CiWorkflowRepository.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/internal/sql/repository/pipelineConfig/CiWorkflowRepository.go b/internal/sql/repository/pipelineConfig/CiWorkflowRepository.go index 0b77fb86f9..8e0c0316cb 100644 --- a/internal/sql/repository/pipelineConfig/CiWorkflowRepository.go +++ b/internal/sql/repository/pipelineConfig/CiWorkflowRepository.go @@ -107,7 +107,7 @@ type WorkflowWithArtifact struct { ImagePathReservationId int `json:"image_path_reservation_id"` RefCiWorkflowId int `json:"referenceCiWorkflowId"` ParentCiWorkflowId int `json:"parent_ci_workflow_id"` - ImagePathReservationIds []int `json:"image_path_reservation_ids"` + ImagePathReservationIds []int `json:"image_path_reservation_ids" pg:",array"` } type GitCommit struct { From 9bccb07e69b8a202670ca4123f8b196eb17e460e Mon Sep 17 00:00:00 2001 From: ayushmaheshwari Date: Fri, 3 Nov 2023 19:23:52 +0530 Subject: [PATCH 105/143] fix: custom tag enabled --- pkg/pipeline/CiCdPipelineOrchestrator.go | 2 +- pkg/pipeline/WebhookService.go | 3 +-- scripts/sql/186_ci_artifact_refactor.up.sql | 2 +- 3 files changed, 3 insertions(+), 4 deletions(-) diff --git a/pkg/pipeline/CiCdPipelineOrchestrator.go b/pkg/pipeline/CiCdPipelineOrchestrator.go index 7fa1ce2c20..34c61c70c8 100644 --- a/pkg/pipeline/CiCdPipelineOrchestrator.go +++ b/pkg/pipeline/CiCdPipelineOrchestrator.go @@ -336,7 +336,7 @@ func (impl CiCdPipelineOrchestratorImpl) PatchMaterialValue(createRequest *bean. //If customTagObject has been passed, create or update the resource //Otherwise deleteIfExists - if createRequest.CustomTagObject != nil { + if len(createRequest.CustomTagObject.TagPattern) > 0 { customTag := bean4.CustomTag{ EntityKey: bean2.EntityTypeCiPipelineId, EntityValue: strconv.Itoa(ciPipelineObject.Id), diff --git a/pkg/pipeline/WebhookService.go b/pkg/pipeline/WebhookService.go index 9c62ac8c23..81afedd722 100644 --- a/pkg/pipeline/WebhookService.go +++ b/pkg/pipeline/WebhookService.go @@ -283,11 +283,10 @@ func (impl WebhookServiceImpl) HandleCiSuccessEvent(ciPipelineId int, request *C } if len(pluginArtifacts) == 0 { ciArtifactArr = append(ciArtifactArr, buildArtifact) - go impl.WriteCISuccessEvent(request, pipeline, buildArtifact) } else { ciArtifactArr = append(ciArtifactArr, pluginArtifacts[0]) } - + go impl.WriteCISuccessEvent(request, pipeline, buildArtifact) isCiManual := true if request.UserId == 1 { impl.logger.Debugw("Trigger (auto) by system user", "userId", request.UserId) diff --git a/scripts/sql/186_ci_artifact_refactor.up.sql b/scripts/sql/186_ci_artifact_refactor.up.sql index 12c59267cd..455836e697 100644 --- a/scripts/sql/186_ci_artifact_refactor.up.sql +++ b/scripts/sql/186_ci_artifact_refactor.up.sql @@ -6,4 +6,4 @@ ALTER TABLE ci_workflow ADD COLUMN image_path_reservation_ids integer[]; UPDATE ci_workflow set image_path_reservation_ids=ARRAY["image_path_reservation_id"] where image_path_reservation_id is not NULL; -ALTER TABLE ci_workflow_runner ADD COLUMN image_path_reservation_ids integer[]; \ No newline at end of file +ALTER TABLE cd_workflow_runner ADD COLUMN image_path_reservation_ids integer[]; \ No newline at end of file From 24441307c7287f38ca4e40be1295e75e2aabf27f Mon Sep 17 00:00:00 2001 From: ayushmaheshwari Date: Sun, 5 Nov 2023 13:54:31 +0530 Subject: [PATCH 106/143] fix custom tag used even if disabled --- internal/sql/repository/CustomTagRepository.go | 2 +- pkg/pipeline/CiService.go | 2 +- pkg/pipeline/WorkflowDagExecutor.go | 16 +++++++++++----- 3 files changed, 13 insertions(+), 7 deletions(-) diff --git a/internal/sql/repository/CustomTagRepository.go b/internal/sql/repository/CustomTagRepository.go index c8b2bf472b..57bcbb2293 100644 --- a/internal/sql/repository/CustomTagRepository.go +++ b/internal/sql/repository/CustomTagRepository.go @@ -14,7 +14,7 @@ type CustomTag struct { AutoIncreasingNumber int `sql:"auto_increasing_number, notnull"` Active bool `sql:"active"` Metadata string `sql:"metadata"` - Enabled bool `sql:"enabled"` + Enabled bool `sql:"enabled, notnull"` } type ImagePathReservation struct { diff --git a/pkg/pipeline/CiService.go b/pkg/pipeline/CiService.go index 95619d7452..7b3dc9d2d9 100644 --- a/pkg/pipeline/CiService.go +++ b/pkg/pipeline/CiService.go @@ -465,7 +465,7 @@ func (impl *CiServiceImpl) buildWfRequestForCiPipeline(pipeline *pipelineConfig. if err != nil && err != pg.ErrNoRows { return nil, err } - if customTag.Id != 0 { + if customTag.Id != 0 && customTag.Enabled == true { imagePathReservation, err := impl.customTagService.GenerateImagePath(bean2.EntityTypeCiPipelineId, strconv.Itoa(pipeline.Id), pipeline.CiTemplate.DockerRegistry.RegistryURL, pipeline.CiTemplate.DockerRepository) if err != nil { if errors.Is(err, bean2.ErrImagePathInUse) { diff --git a/pkg/pipeline/WorkflowDagExecutor.go b/pkg/pipeline/WorkflowDagExecutor.go index ca98b8bd65..f77d9c6e5d 100644 --- a/pkg/pipeline/WorkflowDagExecutor.go +++ b/pkg/pipeline/WorkflowDagExecutor.go @@ -41,8 +41,8 @@ import ( "github.com/devtron-labs/devtron/pkg/k8s" bean3 "github.com/devtron-labs/devtron/pkg/pipeline/bean" repository4 "github.com/devtron-labs/devtron/pkg/pipeline/repository" - "github.com/devtron-labs/devtron/pkg/plugin" "github.com/devtron-labs/devtron/pkg/pipeline/types" + "github.com/devtron-labs/devtron/pkg/plugin" "github.com/devtron-labs/devtron/pkg/resourceQualifiers" "github.com/devtron-labs/devtron/pkg/variables" "github.com/devtron-labs/devtron/pkg/variables/parsers" @@ -141,9 +141,9 @@ type WorkflowDagExecutorImpl struct { pipelineStageRepository repository4.PipelineStageRepository pipelineStageService PipelineStageService config *types.CdConfig - globalPluginService plugin.GlobalPluginService + globalPluginService plugin.GlobalPluginService - scopedVariableManager variables.ScopedVariableCMCSManager + scopedVariableManager variables.ScopedVariableCMCSManager variableSnapshotHistoryService variables.VariableSnapshotHistoryService pluginInputVariableParser PluginInputVariableParser @@ -314,8 +314,8 @@ func NewWorkflowDagExecutorImpl(Logger *zap.SugaredLogger, pipelineRepository pi k8sCommonService: k8sCommonService, pipelineStageService: pipelineStageService, scopedVariableManager: scopedVariableManager, - globalPluginService: globalPluginService, - pluginInputVariableParser: pluginInputVariableParser, + globalPluginService: globalPluginService, + pluginInputVariableParser: pluginInputVariableParser, deploymentTemplateHistoryService: deploymentTemplateHistoryService, configMapHistoryService: configMapHistoryService, @@ -743,6 +743,9 @@ func (impl *WorkflowDagExecutorImpl) TriggerPreStage(ctx context.Context, cdWf * impl.logger.Errorw("error in fetching custom tag by entity key and value for CD", "err", err) return err } + if customTag.Enabled == false { + return fmt.Errorf("skopeo plugin configured but custom tag is disabled") + } var customTagId int if customTag != nil { customTagId = customTag.Id @@ -898,6 +901,9 @@ func (impl *WorkflowDagExecutorImpl) TriggerPostStage(cdWf *pipelineConfig.CdWor impl.logger.Errorw("error in fetching custom tag by entity key and value for CD", "err", err) return err } + if customTag.Enabled == false { + return fmt.Errorf("skopeo plugin configured but custom tag is disabled") + } var customTagId int if customTag == nil { customTagId = customTag.Id From 5075851d13dd8779e2e01a3faee2fce9ec435c2f Mon Sep 17 00:00:00 2001 From: ayushmaheshwari Date: Sun, 5 Nov 2023 19:17:14 +0530 Subject: [PATCH 107/143] adding data source --- internal/sql/repository/CiArtifactRepository.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/internal/sql/repository/CiArtifactRepository.go b/internal/sql/repository/CiArtifactRepository.go index 542436f892..76c3a76db7 100644 --- a/internal/sql/repository/CiArtifactRepository.go +++ b/internal/sql/repository/CiArtifactRepository.go @@ -572,7 +572,7 @@ func (impl CiArtifactRepositoryImpl) GetArtifactsByCDPipelineV2(cdPipelineId int } func GetCiMaterialInfo(materialInfo string, source string) ([]CiMaterialInfo, error) { - if source != "GOCD" && source != "CI-RUNNER" && source != "EXTERNAL" { + if source != "GOCD" && source != "CI-RUNNER" && source != "EXTERNAL" && source != "post_ci" { return nil, fmt.Errorf("datasource: %s not supported", source) } var ciMaterials []CiMaterialInfo From d495f9ac7b1bc11dbf3e3bcee1cacebf71e1f957 Mon Sep 17 00:00:00 2001 From: ayushmaheshwari Date: Mon, 6 Nov 2023 02:21:55 +0530 Subject: [PATCH 108/143] refactoring skopeo parsing --- pkg/pipeline/CiService.go | 28 ++++- pkg/pipeline/CustomTagService.go | 9 +- pkg/pipeline/WorkflowDagExecutor.go | 42 +++++-- pkg/pipeline/pipelineStageVariableParser.go | 128 +++++++++++++++++++- 4 files changed, 188 insertions(+), 19 deletions(-) diff --git a/pkg/pipeline/CiService.go b/pkg/pipeline/CiService.go index 7b3dc9d2d9..bcae6c2a65 100644 --- a/pkg/pipeline/CiService.go +++ b/pkg/pipeline/CiService.go @@ -490,14 +490,15 @@ func (impl *CiServiceImpl) buildWfRequestForCiPipeline(pipeline *pipelineConfig. registryDestinationImageMap, registryCredentialMap, pluginArtifactStage, imageReservationIds, err := impl.GetWorkflowRequestVariablesForSkopeoPlugin( preCiSteps, postCiSteps, dockerImageTag, customTag.Id, fmt.Sprintf(bean2.ImagePathPattern, pipeline.CiTemplate.DockerRegistry.RegistryURL, pipeline.CiTemplate.DockerRepository, dockerImageTag), pipeline.CiTemplate.DockerRegistry.Id) - savedWf.ImagePathReservationIds = append(savedWf.ImagePathReservationIds, imageReservationIds...) - // skopeo plugin logic ends if err != nil { impl.Logger.Errorw("error in getting env variables for skopeo plugin") return nil, err } + savedWf.ImagePathReservationIds = append(savedWf.ImagePathReservationIds, imageReservationIds...) + // skopeo plugin logic ends + if ciWorkflowConfig.CiCacheBucket == "" { ciWorkflowConfig.CiCacheBucket = impl.config.DefaultCacheBucket } @@ -740,7 +741,7 @@ func (impl *CiServiceImpl) GetWorkflowRequestVariablesForSkopeoPlugin(preCiSteps for _, step := range preCiSteps { if skopeoRefPluginId != 0 && step.RefPluginId == skopeoRefPluginId { // for Skopeo plugin parse destination images and save its data in image path reservation table - registryDestinationImageMap, registryCredentialMap, imagePathReservationIds, err = impl.pluginInputVariableParser.ParseSkopeoPluginInputVariables(step.InputVars, customTag, customTagId, buildImagePath, buildImagedockerRegistryId) + registryDestinationImageMap, registryCredentialMap, err = impl.pluginInputVariableParser.HandleSkopeoPluginInputVariable(step.InputVars, customTag, buildImagePath, buildImagedockerRegistryId) if err != nil { impl.Logger.Errorw("error in parsing skopeo input variable", "err", err) return registryDestinationImageMap, registryCredentialMap, pluginArtifactStage, imagePathReservationIds, err @@ -751,7 +752,7 @@ func (impl *CiServiceImpl) GetWorkflowRequestVariablesForSkopeoPlugin(preCiSteps for _, step := range postCiSteps { if skopeoRefPluginId != 0 && step.RefPluginId == skopeoRefPluginId { // for Skopeo plugin parse destination images and save its data in image path reservation table - registryDestinationImageMap, registryCredentialMap, imagePathReservationIds, err = impl.pluginInputVariableParser.ParseSkopeoPluginInputVariables(step.InputVars, customTag, customTagId, buildImagePath, buildImagedockerRegistryId) + registryDestinationImageMap, registryCredentialMap, err = impl.pluginInputVariableParser.HandleSkopeoPluginInputVariable(step.InputVars, customTag, buildImagePath, buildImagedockerRegistryId) if err != nil { impl.Logger.Errorw("error in parsing skopeo input variable", "err", err) return registryDestinationImageMap, registryCredentialMap, pluginArtifactStage, imagePathReservationIds, err @@ -759,9 +760,28 @@ func (impl *CiServiceImpl) GetWorkflowRequestVariablesForSkopeoPlugin(preCiSteps pluginArtifactStage = repository5.POST_CI } } + imagePathReservationIds, err = impl.ReserveImagesGeneratedAtPlugin(customTagId, registryDestinationImageMap) + if err != nil { + return nil, nil, pluginArtifactStage, imagePathReservationIds, nil + } return registryDestinationImageMap, registryCredentialMap, pluginArtifactStage, imagePathReservationIds, nil } +func (impl *CiServiceImpl) ReserveImagesGeneratedAtPlugin(customTagId int, registryImageMap map[string][]string) ([]int, error) { + var imagePathReservationIds []int + for _, images := range registryImageMap { + for _, image := range images { + imagePathReservationData, err := impl.customTagService.ReserveImagePath(image, customTagId) + if err != nil { + impl.Logger.Errorw("Error in marking custom tag reserved", "err", err) + return imagePathReservationIds, err + } + imagePathReservationIds = append(imagePathReservationIds, imagePathReservationData.Id) + } + } + return imagePathReservationIds, nil +} + func buildCiStepsDataFromDockerBuildScripts(dockerBuildScripts []*bean.CiScript) []*bean2.StepObject { //before plugin support, few variables were set as env vars in ci-runner //these variables are now moved to global vars in plugin steps, but to avoid error in old scripts adding those variables in payload diff --git a/pkg/pipeline/CustomTagService.go b/pkg/pipeline/CustomTagService.go index 4d50f07d99..18fd979973 100644 --- a/pkg/pipeline/CustomTagService.go +++ b/pkg/pipeline/CustomTagService.go @@ -189,12 +189,15 @@ func (impl *CustomTagServiceImpl) GetCustomTag(entityKey int, entityValue string impl.Logger.Errorw("Error in fetching custom tag", "err", err) return customTagData, "", err } - - tag, err := validateAndConstructTag(customTagData) + var dockerTag string + if customTagData != nil && len(customTagData.TagPattern) == 0 { + return customTagData, dockerTag, nil + } + dockerTag, err = validateAndConstructTag(customTagData) if err != nil { return nil, "", err } - return customTagData, tag, nil + return customTagData, dockerTag, nil } diff --git a/pkg/pipeline/WorkflowDagExecutor.go b/pkg/pipeline/WorkflowDagExecutor.go index f77d9c6e5d..2a4fdda568 100644 --- a/pkg/pipeline/WorkflowDagExecutor.go +++ b/pkg/pipeline/WorkflowDagExecutor.go @@ -743,18 +743,21 @@ func (impl *WorkflowDagExecutorImpl) TriggerPreStage(ctx context.Context, cdWf * impl.logger.Errorw("error in fetching custom tag by entity key and value for CD", "err", err) return err } - if customTag.Enabled == false { - return fmt.Errorf("skopeo plugin configured but custom tag is disabled") - } var customTagId int - if customTag != nil { + if customTag != nil && customTagId > 0 { customTagId = customTag.Id + } else { + customTagId = -1 } - registryDestinationImageMap, registryCredentialMap, imagePathReservationIds, err := impl.pluginInputVariableParser.ParseSkopeoPluginInputVariables(step.InputVars, dockerImageTag, customTagId, cdStageWorkflowRequest.CiArtifactDTO.Image, cdStageWorkflowRequest.DockerRegistryId) + registryDestinationImageMap, registryCredentialMap, err := impl.pluginInputVariableParser.HandleSkopeoPluginInputVariable(step.InputVars, dockerImageTag, cdStageWorkflowRequest.CiArtifactDTO.Image, cdStageWorkflowRequest.DockerRegistryId) if err != nil { impl.logger.Errorw("error in parsing skopeo input variable", "err", err) return err } + imagePathReservationIds, err := impl.ReserveImagesGeneratedAtPlugin(customTagId, registryDestinationImageMap) + if err != nil { + return err + } runner.ImagePathReservationIds = imagePathReservationIds err = impl.cdWorkflowRepository.UpdateWorkFlowRunner(runner) if err != nil { @@ -901,18 +904,21 @@ func (impl *WorkflowDagExecutorImpl) TriggerPostStage(cdWf *pipelineConfig.CdWor impl.logger.Errorw("error in fetching custom tag by entity key and value for CD", "err", err) return err } - if customTag.Enabled == false { - return fmt.Errorf("skopeo plugin configured but custom tag is disabled") - } var customTagId int - if customTag == nil { + if customTag != nil { customTagId = customTag.Id + } else { + customTagId = -1 } - registryDestinationImageMap, registryCredentialMap, imagePathReservationIds, err := impl.pluginInputVariableParser.ParseSkopeoPluginInputVariables(step.InputVars, dockerImageTag, customTagId, cdStageWorkflowRequest.CiArtifactDTO.Image, cdStageWorkflowRequest.DockerRegistryId) + registryDestinationImageMap, registryCredentialMap, err := impl.pluginInputVariableParser.HandleSkopeoPluginInputVariable(step.InputVars, dockerImageTag, cdStageWorkflowRequest.CiArtifactDTO.Image, cdStageWorkflowRequest.DockerRegistryId) if err != nil { impl.logger.Errorw("error in parsing skopeo input variable", "err", err) return err } + imagePathReservationIds, err := impl.ReserveImagesGeneratedAtPlugin(customTagId, registryDestinationImageMap) + if err != nil { + return err + } pluginImagePathReservationIds = imagePathReservationIds cdStageWorkflowRequest.RegistryDestinationImageMap = registryDestinationImageMap cdStageWorkflowRequest.RegistryCredentialMap = registryCredentialMap @@ -951,6 +957,22 @@ func (impl *WorkflowDagExecutorImpl) TriggerPostStage(cdWf *pipelineConfig.CdWor } return nil } + +func (impl *WorkflowDagExecutorImpl) ReserveImagesGeneratedAtPlugin(customTagId int, registryImageMap map[string][]string) ([]int, error) { + var imagePathReservationIds []int + for _, images := range registryImageMap { + for _, image := range images { + imagePathReservationData, err := impl.customTagService.ReserveImagePath(image, customTagId) + if err != nil { + impl.logger.Errorw("Error in marking custom tag reserved", "err", err) + return imagePathReservationIds, err + } + imagePathReservationIds = append(imagePathReservationIds, imagePathReservationData.Id) + } + } + return imagePathReservationIds, nil +} + func (impl *WorkflowDagExecutorImpl) buildArtifactLocationForS3(cdWorkflowConfig *pipelineConfig.CdWorkflowConfig, cdWf *pipelineConfig.CdWorkflow, runner *pipelineConfig.CdWorkflowRunner) (string, string, string) { cdArtifactLocationFormat := cdWorkflowConfig.CdArtifactLocationFormat if cdArtifactLocationFormat == "" { diff --git a/pkg/pipeline/pipelineStageVariableParser.go b/pkg/pipeline/pipelineStageVariableParser.go index 19d066cf42..42f50b3272 100644 --- a/pkg/pipeline/pipelineStageVariableParser.go +++ b/pkg/pipeline/pipelineStageVariableParser.go @@ -18,12 +18,14 @@ const ( ) const ( - DESTINATION_INFO SkopeoInputVariable = "DESTINATION_INFO" - SOURCE_INFO SkopeoInputVariable = "SOURCE_INFO" + DESTINATION_INFO SkopeoInputVariable = "DESTINATION_INFO" + SOURCE_INFO SkopeoInputVariable = "SOURCE_INFO" + SOURCE_REGISTRY_CREDENTIALS_KEY = "SOURCE_REGISTRY_CREDENTIAL" ) type PluginInputVariableParser interface { ParseSkopeoPluginInputVariables(inputVariables []*bean.VariableObject, customTag string, customTagId int, pluginTriggerImage string, buildConfigurationRegistry string) (map[string][]string, map[string]plugin.RegistryCredentials, []int, error) + HandleSkopeoPluginInputVariable(inputVariables []*bean.VariableObject, dockerImageTag string, pluginTriggerImage string, buildConfigurationRegistry string) (registryDestinationImageMap map[string][]string, registryCredentialsMap map[string]plugin.RegistryCredentials, err error) } type PluginInputVariableParserImpl struct { @@ -73,6 +75,45 @@ func (impl *PluginInputVariableParserImpl) ParseSkopeoPluginInputVariables(input return registryDestinationImageMap, registryCredentialMap, imagePathReservationIds, nil } +func (impl *PluginInputVariableParserImpl) HandleSkopeoPluginInputVariable(inputVariables []*bean.VariableObject, + dockerImageTag string, + pluginTriggerImage string, + buildConfigurationRegistry string) (registryDestinationImageMap map[string][]string, registryCredentials map[string]plugin.RegistryCredentials, err error) { + + var DestinationInfo, SourceRegistry, SourceImage string + for _, ipVariable := range inputVariables { + if ipVariable.Name == DESTINATION_INFO { + DestinationInfo = ipVariable.Value + } else if ipVariable.Name == SOURCE_INFO { + if len(pluginTriggerImage) == 0 { + if len(ipVariable.Value) == 0 { + impl.logger.Errorw("No image provided in source or during trigger time") + return nil, nil, errors.New("no image provided in source or during trigger time") + } + SourceInfo := ipVariable.Value + SourceInfoSplit := strings.Split(SourceInfo, "|") + SourceImage = SourceInfoSplit[len(SourceInfoSplit)-1] + SourceRegistry = SourceInfoSplit[0] + } else { + SourceImage = pluginTriggerImage + SourceRegistry = buildConfigurationRegistry + if len(dockerImageTag) == 0 { + sourceSplit := strings.Split(SourceImage, ":") + dockerImageTag = sourceSplit[len(sourceSplit)-1] + } + } + } + } + registryRepoMapping := impl.getRegistryRepoMapping(DestinationInfo) + registryCredentials, err = impl.getRegistryDetails(registryRepoMapping, SourceRegistry) + if err != nil { + return nil, nil, err + } + registryDestinationImageMap = impl.getRegistryDestinationImageMapping(registryRepoMapping, dockerImageTag, registryCredentials) + + return registryDestinationImageMap, registryCredentials, nil +} + func (impl *PluginInputVariableParserImpl) getRegistryDetailsAndDestinationImagePathForSkopeo(dockerImageTag string, tagId int, sourceImage string, sourceRegistry string, destinationInfo string) (registryDestinationImageMap map[string][]string, registryCredentialsMap map[string]plugin.RegistryCredentials, imagePathReservationIds []int, err error) { registryDestinationImageMap = make(map[string][]string) registryCredentialsMap = make(map[string]plugin.RegistryCredentials) @@ -138,3 +179,86 @@ func (impl *PluginInputVariableParserImpl) getRegistryDetailsAndDestinationImage //adding source registry details return registryDestinationImageMap, registryCredentialsMap, imagePathReservationIds, nil } + +func (impl *PluginInputVariableParserImpl) getRegistryRepoMapping(destinationInfo string) map[string][]string { + /* + creating map with registry as key and list of repositories in that registry where we need to copy image + destinationInfo format (each registry detail is separated by new line) : + | + | + */ + destinationRegistryRepositoryMap := make(map[string][]string) + destinationRegistryRepoDetails := strings.Split(destinationInfo, "\n") + for _, detail := range destinationRegistryRepoDetails { + registryRepoSplit := strings.Split(detail, "|") + registryName := strings.Trim(registryRepoSplit[0], " ") + repositoryValuesSplit := strings.Split(registryRepoSplit[1], ",") + var repositories []string + for _, repositoryName := range repositoryValuesSplit { + repositoryName = strings.Trim(repositoryName, " ") + repositories = append(repositories, repositoryName) + } + destinationRegistryRepositoryMap[registryName] = repositories + } + return destinationRegistryRepositoryMap +} + +func (impl *PluginInputVariableParserImpl) getRegistryDetails(destinationRegistryRepositoryMap map[string][]string, sourceRegistry string) (map[string]plugin.RegistryCredentials, error) { + registryCredentialsMap := make(map[string]plugin.RegistryCredentials) + //saving source registry credentials + sourceRegistryCredentials, err := impl.getPluginRegistryCredentialsByRegistryName(sourceRegistry) + if err != nil { + return nil, err + } + registryCredentialsMap[SOURCE_REGISTRY_CREDENTIALS_KEY] = *sourceRegistryCredentials + + // saving destination registry credentials; destinationRegistryRepositoryMap -> map[registryName]= [, ] + for registry, _ := range destinationRegistryRepositoryMap { + destinationRegistryCredential, err := impl.getPluginRegistryCredentialsByRegistryName(registry) + if err != nil { + return nil, err + } + registryCredentialsMap[registry] = *destinationRegistryCredential + } + return registryCredentialsMap, nil +} + +func (impl *PluginInputVariableParserImpl) getPluginRegistryCredentialsByRegistryName(registryName string) (*plugin.RegistryCredentials, error) { + registryCredentials, err := impl.dockerRegistryConfig.FetchOneDockerAccount(registryName) + if err != nil { + impl.logger.Errorw("error in fetching registry details by registry name", "err", err) + if err == pg.ErrNoRows { + return nil, fmt.Errorf("invalid registry name: registry details not found in global container registries") + } + return nil, err + } + return &plugin.RegistryCredentials{ + RegistryType: string(registryCredentials.RegistryType), + RegistryURL: registryCredentials.RegistryURL, + Username: registryCredentials.Username, + Password: registryCredentials.Password, + AWSRegion: registryCredentials.AWSRegion, + AWSSecretAccessKey: registryCredentials.AWSSecretAccessKey, + AWSAccessKeyId: registryCredentials.AWSAccessKeyId, + }, nil +} + +func (impl *PluginInputVariableParserImpl) getRegistryDestinationImageMapping( + registryRepoMapping map[string][]string, + dockerImageTag string, + registryCredentials map[string]plugin.RegistryCredentials) map[string][]string { + + // creating map with registry as key and list of destination images in that registry + registryDestinationImageMapping := make(map[string][]string) + for registry, destinationRepositories := range registryRepoMapping { + registryCredential := registryCredentials[registry] + var destinationImages []string + for _, repo := range destinationRepositories { + destinationImage := fmt.Sprintf("%s/%s:%s", registryCredential.RegistryURL, repo, dockerImageTag) + destinationImages = append(destinationImages, destinationImage) + } + registryDestinationImageMapping[registry] = destinationImages + } + + return registryDestinationImageMapping +} From 9c0063c0a21fabfa7473654e494f495040205582 Mon Sep 17 00:00:00 2001 From: ayushmaheshwari Date: Mon, 6 Nov 2023 04:10:49 +0530 Subject: [PATCH 109/143] sql script modified plus registry credentials --- .../DockerRegistryIpsConfigService.go | 45 ++++--- pkg/pipeline/CustomTagService.go | 3 +- pkg/pipeline/WorkflowDagExecutor.go | 32 +++-- pkg/pipeline/pipelineStageVariableParser.go | 116 ++---------------- scripts/sql/186_ci_artifact_refactor.up.sql | 4 +- 5 files changed, 67 insertions(+), 133 deletions(-) diff --git a/pkg/dockerRegistry/DockerRegistryIpsConfigService.go b/pkg/dockerRegistry/DockerRegistryIpsConfigService.go index 21aab227c1..5c788e9769 100644 --- a/pkg/dockerRegistry/DockerRegistryIpsConfigService.go +++ b/pkg/dockerRegistry/DockerRegistryIpsConfigService.go @@ -20,6 +20,7 @@ package dockerRegistry import ( "encoding/json" "github.com/devtron-labs/common-lib/utils/k8s" + repository3 "github.com/devtron-labs/devtron/internal/sql/repository" "github.com/devtron-labs/devtron/internal/sql/repository/dockerRegistry" "github.com/devtron-labs/devtron/internal/sql/repository/pipelineConfig" "github.com/devtron-labs/devtron/pkg/cluster" @@ -34,7 +35,7 @@ import ( type DockerRegistryIpsConfigService interface { IsImagePullSecretAccessProvided(dockerRegistryId string, clusterId int, isVirtualEnv bool) (bool, error) - HandleImagePullSecretOnApplicationDeployment(environment *repository2.Environment, ciPipelineId int, valuesFileContent []byte) ([]byte, error) + HandleImagePullSecretOnApplicationDeployment(environment *repository2.Environment, artifact *repository3.CiArtifact, ciPipelineId int, valuesFileContent []byte) ([]byte, error) } type DockerRegistryIpsConfigServiceImpl struct { @@ -76,7 +77,7 @@ func (impl DockerRegistryIpsConfigServiceImpl) IsImagePullSecretAccessProvided(d return isAccessProvided, nil } -func (impl DockerRegistryIpsConfigServiceImpl) HandleImagePullSecretOnApplicationDeployment(environment *repository2.Environment, ciPipelineId int, valuesFileContent []byte) ([]byte, error) { +func (impl DockerRegistryIpsConfigServiceImpl) HandleImagePullSecretOnApplicationDeployment(environment *repository2.Environment, artifact *repository3.CiArtifact, ciPipelineId int, valuesFileContent []byte) ([]byte, error) { clusterId := environment.ClusterId impl.logger.Infow("handling ips if access given", "ciPipelineId", ciPipelineId, "clusterId", clusterId) @@ -85,7 +86,7 @@ func (impl DockerRegistryIpsConfigServiceImpl) HandleImagePullSecretOnApplicatio return valuesFileContent, nil } - dockerRegistryId, err := impl.getDockerRegistryIdForCiPipeline(ciPipelineId) + dockerRegistryId, err := impl.getDockerRegistryIdForCiPipeline(ciPipelineId, artifact) if err != nil { impl.logger.Errorw("error in getting docker registry", "dockerRegistryId", dockerRegistryId, "error", err) return valuesFileContent, err @@ -138,7 +139,7 @@ func (impl DockerRegistryIpsConfigServiceImpl) HandleImagePullSecretOnApplicatio return updatedValuesFileContent, nil } -func (impl DockerRegistryIpsConfigServiceImpl) getDockerRegistryIdForCiPipeline(ciPipelineId int) (*string, error) { +func (impl DockerRegistryIpsConfigServiceImpl) getDockerRegistryIdForCiPipeline(ciPipelineId int, artifact *repository3.CiArtifact) (*string, error) { ciPipeline, err := impl.ciPipelineRepository.FindById(ciPipelineId) if err != nil { impl.logger.Errorw("error in fetching ciPipeline", "ciPipelineId", ciPipelineId, "error", err) @@ -154,23 +155,31 @@ func (impl DockerRegistryIpsConfigServiceImpl) getDockerRegistryIdForCiPipeline( impl.logger.Warn("returning as ciPipeline.CiTemplate is found nil") return nil, nil } + var dockerRegistryId string + if artifact.DataSource == repository3.POST_CI || artifact.DataSource == repository3.PRE_CD || artifact.DataSource == repository3.POST_CD { + // if image is generated by plugin at these stages + if artifact.CredentialsSourceType == repository3.GLOBAL_CONTAINER_REGISTRY { + dockerRegistryId = artifact.CredentialSourceValue + } + } else { + // if image is created by ci build + dockerRegistryId = *ciPipeline.CiTemplate.DockerRegistryId + if len(dockerRegistryId) == 0 { + impl.logger.Warn("returning as dockerRegistryId is found empty") + return nil, nil + } - dockerRegistryId := ciPipeline.CiTemplate.DockerRegistryId - if dockerRegistryId != nil && len(*dockerRegistryId) == 0 { - impl.logger.Warn("returning as dockerRegistryId is found empty") - return nil, nil - } - - if ciPipeline.IsDockerConfigOverridden { - //set dockerRegistryId value with the DockerRegistryId of the overridden dockerRegistry - ciTemplateOverride, err := impl.ciTemplateOverrideRepository.FindByCiPipelineId(ciPipelineId) - if err != nil { - impl.logger.Errorw("error in getting ciTemplateOverride by ciPipelineId", "ciPipelineId", ciPipelineId, "error", err) - return nil, err + if ciPipeline.IsDockerConfigOverridden { + //set dockerRegistryId value with the DockerRegistryId of the overridden dockerRegistry + ciTemplateOverride, err := impl.ciTemplateOverrideRepository.FindByCiPipelineId(ciPipelineId) + if err != nil { + impl.logger.Errorw("error in getting ciTemplateOverride by ciPipelineId", "ciPipelineId", ciPipelineId, "error", err) + return nil, err + } + dockerRegistryId = ciTemplateOverride.DockerRegistryId } - dockerRegistryId = &ciTemplateOverride.DockerRegistryId } - return dockerRegistryId, nil + return &dockerRegistryId, nil } func (impl DockerRegistryIpsConfigServiceImpl) createOrUpdateDockerRegistryImagePullSecret(clusterId int, namespace string, ipsName string, dockerRegistryBean *repository.DockerArtifactStore) error { impl.logger.Infow("creating/updating ips", "ipsName", ipsName, "clusterId", clusterId) diff --git a/pkg/pipeline/CustomTagService.go b/pkg/pipeline/CustomTagService.go index 18fd979973..b4efa8a5c7 100644 --- a/pkg/pipeline/CustomTagService.go +++ b/pkg/pipeline/CustomTagService.go @@ -1,6 +1,7 @@ package pipeline import ( + "errors" "fmt" "github.com/devtron-labs/devtron/api/bean" "github.com/devtron-labs/devtron/internal/sql/repository" @@ -212,7 +213,7 @@ func (impl *CustomTagServiceImpl) ReserveImagePath(imagePath string, customTagId return nil, err } if len(imagePathReservations) > 0 { - return nil, nil + return nil, errors.New("cannot copy image using skopeo, image with similar name already exist") } imagePathReservation := &repository.ImagePathReservation{ ImagePath: imagePath, diff --git a/pkg/pipeline/WorkflowDagExecutor.go b/pkg/pipeline/WorkflowDagExecutor.go index 2a4fdda568..b254584c6c 100644 --- a/pkg/pipeline/WorkflowDagExecutor.go +++ b/pkg/pipeline/WorkflowDagExecutor.go @@ -744,12 +744,20 @@ func (impl *WorkflowDagExecutorImpl) TriggerPreStage(ctx context.Context, cdWf * return err } var customTagId int - if customTag != nil && customTagId > 0 { + if customTag != nil && customTag.Id > 0 { customTagId = customTag.Id } else { customTagId = -1 } - registryDestinationImageMap, registryCredentialMap, err := impl.pluginInputVariableParser.HandleSkopeoPluginInputVariable(step.InputVars, dockerImageTag, cdStageWorkflowRequest.CiArtifactDTO.Image, cdStageWorkflowRequest.DockerRegistryId) + var sourceDockerRegistryId string + if artifact.DataSource == repository.PRE_CD || artifact.DataSource == repository.POST_CD || artifact.DataSource == repository.POST_CI { + if artifact.CredentialsSourceType == repository.GLOBAL_CONTAINER_REGISTRY { + sourceDockerRegistryId = artifact.CredentialSourceValue + } + } else { + sourceDockerRegistryId = cdStageWorkflowRequest.DockerRegistryId + } + registryDestinationImageMap, registryCredentialMap, err := impl.pluginInputVariableParser.HandleSkopeoPluginInputVariable(step.InputVars, dockerImageTag, cdStageWorkflowRequest.CiArtifactDTO.Image, sourceDockerRegistryId) if err != nil { impl.logger.Errorw("error in parsing skopeo input variable", "err", err) return err @@ -905,12 +913,20 @@ func (impl *WorkflowDagExecutorImpl) TriggerPostStage(cdWf *pipelineConfig.CdWor return err } var customTagId int - if customTag != nil { + if customTag != nil && customTag.Id > 0 { customTagId = customTag.Id } else { customTagId = -1 } - registryDestinationImageMap, registryCredentialMap, err := impl.pluginInputVariableParser.HandleSkopeoPluginInputVariable(step.InputVars, dockerImageTag, cdStageWorkflowRequest.CiArtifactDTO.Image, cdStageWorkflowRequest.DockerRegistryId) + var sourceDockerRegistryId string + if cdWf.CiArtifact.DataSource == repository.PRE_CD || cdWf.CiArtifact.DataSource == repository.POST_CD || cdWf.CiArtifact.DataSource == repository.POST_CI { + if cdWf.CiArtifact.CredentialsSourceType == repository.GLOBAL_CONTAINER_REGISTRY { + sourceDockerRegistryId = cdWf.CiArtifact.CredentialSourceValue + } + } else { + sourceDockerRegistryId = cdStageWorkflowRequest.DockerRegistryId + } + registryDestinationImageMap, registryCredentialMap, err := impl.pluginInputVariableParser.HandleSkopeoPluginInputVariable(step.InputVars, dockerImageTag, cdStageWorkflowRequest.CiArtifactDTO.Image, sourceDockerRegistryId) if err != nil { impl.logger.Errorw("error in parsing skopeo input variable", "err", err) return err @@ -967,7 +983,9 @@ func (impl *WorkflowDagExecutorImpl) ReserveImagesGeneratedAtPlugin(customTagId impl.logger.Errorw("Error in marking custom tag reserved", "err", err) return imagePathReservationIds, err } - imagePathReservationIds = append(imagePathReservationIds, imagePathReservationData.Id) + if imagePathReservationData != nil { + imagePathReservationIds = append(imagePathReservationIds, imagePathReservationData.Id) + } } } return imagePathReservationIds, nil @@ -2871,7 +2889,7 @@ func (impl *WorkflowDagExecutorImpl) GetValuesOverrideForTrigger(overrideRequest _, span = otel.Tracer("orchestrator").Start(ctx, "dockerRegistryIpsConfigService.HandleImagePullSecretOnApplicationDeployment") // handle image pull secret if access given - mergedValues, err = impl.dockerRegistryIpsConfigService.HandleImagePullSecretOnApplicationDeployment(envOverride.Environment, pipeline.CiPipelineId, mergedValues) + mergedValues, err = impl.dockerRegistryIpsConfigService.HandleImagePullSecretOnApplicationDeployment(envOverride.Environment, artifact, pipeline.CiPipelineId, mergedValues) valuesOverrideResponse.MergedValues = string(mergedValues) span.End() if err != nil { @@ -3723,7 +3741,7 @@ func (impl *WorkflowDagExecutorImpl) mergeAndSave(envOverride *chartConfig.EnvCo _, span := otel.Tracer("orchestrator").Start(ctx, "dockerRegistryIpsConfigService.HandleImagePullSecretOnApplicationDeployment") // handle image pull secret if access given - merged, err = impl.dockerRegistryIpsConfigService.HandleImagePullSecretOnApplicationDeployment(envOverride.Environment, pipeline.CiPipelineId, merged) + merged, err = impl.dockerRegistryIpsConfigService.HandleImagePullSecretOnApplicationDeployment(envOverride.Environment, artifact, pipeline.CiPipelineId, merged) span.End() if err != nil { return 0, 0, "", err diff --git a/pkg/pipeline/pipelineStageVariableParser.go b/pkg/pipeline/pipelineStageVariableParser.go index 42f50b3272..52faf748d6 100644 --- a/pkg/pipeline/pipelineStageVariableParser.go +++ b/pkg/pipeline/pipelineStageVariableParser.go @@ -24,7 +24,6 @@ const ( ) type PluginInputVariableParser interface { - ParseSkopeoPluginInputVariables(inputVariables []*bean.VariableObject, customTag string, customTagId int, pluginTriggerImage string, buildConfigurationRegistry string) (map[string][]string, map[string]plugin.RegistryCredentials, []int, error) HandleSkopeoPluginInputVariable(inputVariables []*bean.VariableObject, dockerImageTag string, pluginTriggerImage string, buildConfigurationRegistry string) (registryDestinationImageMap map[string][]string, registryCredentialsMap map[string]plugin.RegistryCredentials, err error) } @@ -46,35 +45,6 @@ func NewPluginInputVariableParserImpl( } } -func (impl *PluginInputVariableParserImpl) ParseSkopeoPluginInputVariables(inputVariables []*bean.VariableObject, dockerImageTag string, customTagId int, pluginTriggerImage string, buildConfigurationRegistry string) (map[string][]string, map[string]plugin.RegistryCredentials, []int, error) { - var DestinationInfo, SourceRegistry, SourceImage string - for _, ipVariable := range inputVariables { - if ipVariable.Name == DESTINATION_INFO { - DestinationInfo = ipVariable.Value - } else if ipVariable.Name == SOURCE_INFO { - if len(pluginTriggerImage) == 0 { - if len(ipVariable.Value) == 0 { - impl.logger.Errorw("No image provided in source or during trigger time") - return nil, nil, nil, errors.New("no image provided in source or during trigger time") - } - SourceInfo := ipVariable.Value - SourceInfoSplit := strings.Split(SourceInfo, "|") - SourceImage = SourceInfoSplit[len(SourceInfoSplit)-1] - SourceRegistry = SourceInfoSplit[0] - } else { - SourceImage = pluginTriggerImage - SourceRegistry = buildConfigurationRegistry - } - } - } - registryDestinationImageMap, registryCredentialMap, imagePathReservationIds, err := impl.getRegistryDetailsAndDestinationImagePathForSkopeo(dockerImageTag, customTagId, SourceImage, SourceRegistry, DestinationInfo) - if err != nil { - impl.logger.Errorw("Error in parsing skopeo input variables") - return nil, nil, nil, err - } - return registryDestinationImageMap, registryCredentialMap, imagePathReservationIds, nil -} - func (impl *PluginInputVariableParserImpl) HandleSkopeoPluginInputVariable(inputVariables []*bean.VariableObject, dockerImageTag string, pluginTriggerImage string, @@ -85,25 +55,24 @@ func (impl *PluginInputVariableParserImpl) HandleSkopeoPluginInputVariable(input if ipVariable.Name == DESTINATION_INFO { DestinationInfo = ipVariable.Value } else if ipVariable.Name == SOURCE_INFO { - if len(pluginTriggerImage) == 0 { - if len(ipVariable.Value) == 0 { - impl.logger.Errorw("No image provided in source or during trigger time") - return nil, nil, errors.New("no image provided in source or during trigger time") - } + if len(ipVariable.Value) > 0 { SourceInfo := ipVariable.Value SourceInfoSplit := strings.Split(SourceInfo, "|") SourceImage = SourceInfoSplit[len(SourceInfoSplit)-1] SourceRegistry = SourceInfoSplit[0] - } else { + } else if len(pluginTriggerImage) > 0 { SourceImage = pluginTriggerImage SourceRegistry = buildConfigurationRegistry - if len(dockerImageTag) == 0 { - sourceSplit := strings.Split(SourceImage, ":") - dockerImageTag = sourceSplit[len(sourceSplit)-1] - } + } else { + impl.logger.Errorw("No image provided in source or during trigger time") + return nil, nil, errors.New("no image provided in source or during trigger time") } } } + if len(dockerImageTag) == 0 { + sourceSplit := strings.Split(SourceImage, ":") + dockerImageTag = sourceSplit[len(sourceSplit)-1] + } registryRepoMapping := impl.getRegistryRepoMapping(DestinationInfo) registryCredentials, err = impl.getRegistryDetails(registryRepoMapping, SourceRegistry) if err != nil { @@ -114,72 +83,6 @@ func (impl *PluginInputVariableParserImpl) HandleSkopeoPluginInputVariable(input return registryDestinationImageMap, registryCredentials, nil } -func (impl *PluginInputVariableParserImpl) getRegistryDetailsAndDestinationImagePathForSkopeo(dockerImageTag string, tagId int, sourceImage string, sourceRegistry string, destinationInfo string) (registryDestinationImageMap map[string][]string, registryCredentialsMap map[string]plugin.RegistryCredentials, imagePathReservationIds []int, err error) { - registryDestinationImageMap = make(map[string][]string) - registryCredentialsMap = make(map[string]plugin.RegistryCredentials) - imagePathReservationIds = make([]int, 0) - if len(dockerImageTag) == 0 { - sourceSplit := strings.Split(sourceImage, ":") - dockerImageTag = sourceSplit[len(sourceSplit)-1] - } - //saving source registry credentials - registryCredentials, err := impl.dockerRegistryConfig.FetchOneDockerAccount(sourceRegistry) - if err != nil { - impl.logger.Errorw("error in fetching registry details by registry name", "err", err) - return registryDestinationImageMap, registryCredentialsMap, imagePathReservationIds, err - } - registryCredentialsMap["SOURCE_REGISTRY_CREDENTIAL"] = plugin.RegistryCredentials{ - RegistryType: string(registryCredentials.RegistryType), - RegistryURL: registryCredentials.RegistryURL, - Username: registryCredentials.Username, - Password: registryCredentials.Password, - AWSRegion: registryCredentials.AWSRegion, - AWSSecretAccessKey: registryCredentials.AWSSecretAccessKey, - AWSAccessKeyId: registryCredentials.AWSAccessKeyId, - } - - destinationRegistryRepoDetails := strings.Split(destinationInfo, "\n") - for _, detail := range destinationRegistryRepoDetails { - registryRepoSplit := strings.Split(detail, "|") - registryName := strings.Trim(registryRepoSplit[0], " ") - registryCredentials, err := impl.dockerRegistryConfig.FetchOneDockerAccount(registryName) - if err != nil { - impl.logger.Errorw("error in fetching registry details by registry name", "err", err) - if err == pg.ErrNoRows { - return registryDestinationImageMap, registryCredentialsMap, imagePathReservationIds, fmt.Errorf("invalid registry name: registry details not found in global container registries") - } - return registryDestinationImageMap, registryCredentialsMap, imagePathReservationIds, err - } - var destinationImages []string - destinationRepositoryValues := registryRepoSplit[1] - repositoryValuesSplit := strings.Split(destinationRepositoryValues, ",") - - for _, repositoryName := range repositoryValuesSplit { - repositoryName = strings.Trim(repositoryName, " ") - destinationImage := fmt.Sprintf("%s/%s:%s", registryCredentials.RegistryURL, repositoryName, dockerImageTag) - destinationImages = append(destinationImages, destinationImage) - imagePathReservationData, err := impl.customTagService.ReserveImagePath(destinationImage, tagId) - if err != nil { - impl.logger.Errorw("Error in marking custom tag reserved", "err", err) - return registryDestinationImageMap, registryCredentialsMap, imagePathReservationIds, err - } - imagePathReservationIds = append(imagePathReservationIds, imagePathReservationData.Id) - } - registryDestinationImageMap[registryName] = destinationImages - registryCredentialsMap[registryName] = plugin.RegistryCredentials{ - RegistryType: string(registryCredentials.RegistryType), - RegistryURL: registryCredentials.RegistryURL, - Username: registryCredentials.Username, - Password: registryCredentials.Password, - AWSRegion: registryCredentials.AWSRegion, - AWSSecretAccessKey: registryCredentials.AWSSecretAccessKey, - AWSAccessKeyId: registryCredentials.AWSAccessKeyId, - } - } - //adding source registry details - return registryDestinationImageMap, registryCredentialsMap, imagePathReservationIds, nil -} - func (impl *PluginInputVariableParserImpl) getRegistryRepoMapping(destinationInfo string) map[string][]string { /* creating map with registry as key and list of repositories in that registry where we need to copy image @@ -206,6 +109,7 @@ func (impl *PluginInputVariableParserImpl) getRegistryRepoMapping(destinationInf func (impl *PluginInputVariableParserImpl) getRegistryDetails(destinationRegistryRepositoryMap map[string][]string, sourceRegistry string) (map[string]plugin.RegistryCredentials, error) { registryCredentialsMap := make(map[string]plugin.RegistryCredentials) //saving source registry credentials + sourceRegistry = strings.Trim(sourceRegistry, " ") sourceRegistryCredentials, err := impl.getPluginRegistryCredentialsByRegistryName(sourceRegistry) if err != nil { return nil, err diff --git a/scripts/sql/186_ci_artifact_refactor.up.sql b/scripts/sql/186_ci_artifact_refactor.up.sql index 455836e697..3153a038a4 100644 --- a/scripts/sql/186_ci_artifact_refactor.up.sql +++ b/scripts/sql/186_ci_artifact_refactor.up.sql @@ -6,4 +6,6 @@ ALTER TABLE ci_workflow ADD COLUMN image_path_reservation_ids integer[]; UPDATE ci_workflow set image_path_reservation_ids=ARRAY["image_path_reservation_id"] where image_path_reservation_id is not NULL; -ALTER TABLE cd_workflow_runner ADD COLUMN image_path_reservation_ids integer[]; \ No newline at end of file +ALTER TABLE cd_workflow_runner ADD COLUMN image_path_reservation_ids integer[]; + +ALTER TABLE image_path_reservation DROP CONSTRAINT image_path_reservation_custom_tag_id_fkey; \ No newline at end of file From 6545b2bddb093096da069556d1d2607f30b7e6ec Mon Sep 17 00:00:00 2001 From: ayushmaheshwari Date: Mon, 6 Nov 2023 12:18:29 +0530 Subject: [PATCH 110/143] removing source info handling and adding comments --- pkg/pipeline/CiService.go | 10 ++----- pkg/pipeline/pipelineStageVariableParser.go | 32 +++++++++------------ scripts/sql/185_skopeo_plugin.up.sql | 5 +--- 3 files changed, 16 insertions(+), 31 deletions(-) diff --git a/pkg/pipeline/CiService.go b/pkg/pipeline/CiService.go index bcae6c2a65..da1b218d02 100644 --- a/pkg/pipeline/CiService.go +++ b/pkg/pipeline/CiService.go @@ -490,7 +490,6 @@ func (impl *CiServiceImpl) buildWfRequestForCiPipeline(pipeline *pipelineConfig. registryDestinationImageMap, registryCredentialMap, pluginArtifactStage, imageReservationIds, err := impl.GetWorkflowRequestVariablesForSkopeoPlugin( preCiSteps, postCiSteps, dockerImageTag, customTag.Id, fmt.Sprintf(bean2.ImagePathPattern, pipeline.CiTemplate.DockerRegistry.RegistryURL, pipeline.CiTemplate.DockerRepository, dockerImageTag), pipeline.CiTemplate.DockerRegistry.Id) - if err != nil { impl.Logger.Errorw("error in getting env variables for skopeo plugin") return nil, err @@ -741,12 +740,7 @@ func (impl *CiServiceImpl) GetWorkflowRequestVariablesForSkopeoPlugin(preCiSteps for _, step := range preCiSteps { if skopeoRefPluginId != 0 && step.RefPluginId == skopeoRefPluginId { // for Skopeo plugin parse destination images and save its data in image path reservation table - registryDestinationImageMap, registryCredentialMap, err = impl.pluginInputVariableParser.HandleSkopeoPluginInputVariable(step.InputVars, customTag, buildImagePath, buildImagedockerRegistryId) - if err != nil { - impl.Logger.Errorw("error in parsing skopeo input variable", "err", err) - return registryDestinationImageMap, registryCredentialMap, pluginArtifactStage, imagePathReservationIds, err - } - pluginArtifactStage = repository5.PRE_CI + return nil, nil, pluginArtifactStage, nil, errors.New("skopeo plugin not allowed in pre-ci step, please remove it and try again") } } for _, step := range postCiSteps { @@ -762,7 +756,7 @@ func (impl *CiServiceImpl) GetWorkflowRequestVariablesForSkopeoPlugin(preCiSteps } imagePathReservationIds, err = impl.ReserveImagesGeneratedAtPlugin(customTagId, registryDestinationImageMap) if err != nil { - return nil, nil, pluginArtifactStage, imagePathReservationIds, nil + return nil, nil, pluginArtifactStage, imagePathReservationIds, err } return registryDestinationImageMap, registryCredentialMap, pluginArtifactStage, imagePathReservationIds, nil } diff --git a/pkg/pipeline/pipelineStageVariableParser.go b/pkg/pipeline/pipelineStageVariableParser.go index 52faf748d6..73ebcc36f4 100644 --- a/pkg/pipeline/pipelineStageVariableParser.go +++ b/pkg/pipeline/pipelineStageVariableParser.go @@ -24,7 +24,7 @@ const ( ) type PluginInputVariableParser interface { - HandleSkopeoPluginInputVariable(inputVariables []*bean.VariableObject, dockerImageTag string, pluginTriggerImage string, buildConfigurationRegistry string) (registryDestinationImageMap map[string][]string, registryCredentialsMap map[string]plugin.RegistryCredentials, err error) + HandleSkopeoPluginInputVariable(inputVariables []*bean.VariableObject, dockerImageTag string, pluginTriggerImage string, sourceImageDockerRegistry string) (registryDestinationImageMap map[string][]string, registryCredentials map[string]plugin.RegistryCredentials, err error) } type PluginInputVariableParserImpl struct { @@ -48,33 +48,27 @@ func NewPluginInputVariableParserImpl( func (impl *PluginInputVariableParserImpl) HandleSkopeoPluginInputVariable(inputVariables []*bean.VariableObject, dockerImageTag string, pluginTriggerImage string, - buildConfigurationRegistry string) (registryDestinationImageMap map[string][]string, registryCredentials map[string]plugin.RegistryCredentials, err error) { + sourceImageDockerRegistry string) (registryDestinationImageMap map[string][]string, registryCredentials map[string]plugin.RegistryCredentials, err error) { - var DestinationInfo, SourceRegistry, SourceImage string + var DestinationInfo string for _, ipVariable := range inputVariables { if ipVariable.Name == DESTINATION_INFO { DestinationInfo = ipVariable.Value - } else if ipVariable.Name == SOURCE_INFO { - if len(ipVariable.Value) > 0 { - SourceInfo := ipVariable.Value - SourceInfoSplit := strings.Split(SourceInfo, "|") - SourceImage = SourceInfoSplit[len(SourceInfoSplit)-1] - SourceRegistry = SourceInfoSplit[0] - } else if len(pluginTriggerImage) > 0 { - SourceImage = pluginTriggerImage - SourceRegistry = buildConfigurationRegistry - } else { - impl.logger.Errorw("No image provided in source or during trigger time") - return nil, nil, errors.New("no image provided in source or during trigger time") - } } } + + if len(pluginTriggerImage) == 0 { + return nil, nil, errors.New("no image provided during trigger time") + } + if len(dockerImageTag) == 0 { - sourceSplit := strings.Split(SourceImage, ":") - dockerImageTag = sourceSplit[len(sourceSplit)-1] + // case when custom tag is not configured - source image tag will be taken as docker image tag + pluginTriggerImageSplit := strings.Split(pluginTriggerImage, ":") + dockerImageTag = pluginTriggerImageSplit[len(pluginTriggerImageSplit)-1] } + registryRepoMapping := impl.getRegistryRepoMapping(DestinationInfo) - registryCredentials, err = impl.getRegistryDetails(registryRepoMapping, SourceRegistry) + registryCredentials, err = impl.getRegistryDetails(registryRepoMapping, sourceImageDockerRegistry) if err != nil { return nil, nil, err } diff --git a/scripts/sql/185_skopeo_plugin.up.sql b/scripts/sql/185_skopeo_plugin.up.sql index c3f3b13915..9ec2f303aa 100644 --- a/scripts/sql/185_skopeo_plugin.up.sql +++ b/scripts/sql/185_skopeo_plugin.up.sql @@ -11,14 +11,11 @@ INSERT INTO "plugin_stage_mapping" ("plugin_id","stage_type","created_on", "crea VALUES ((SELECT id FROM plugin_metadata WHERE name='Skopeo'),0,'now()', 1, 'now()', 1); INSERT INTO "plugin_pipeline_script" ("id","type","mount_directory_from_host","container_image_path","deleted","created_on", "created_by", "updated_on", "updated_by") -VALUES (nextval('id_seq_plugin_pipeline_script'),'CONTAINER_IMAGE','t','quay.io/devtron/test:ec27cbd0-81-446','f','now()',1,'now()',1); +VALUES (nextval('id_seq_plugin_pipeline_script'),'CONTAINER_IMAGE','t','quay.io/devtron/test:9ab24450-81-909','f','now()',1,'now()',1); INSERT INTO "plugin_step" ("id", "plugin_id","name","description","index","step_type","script_id","deleted", "created_on", "created_by", "updated_on", "updated_by") VALUES (nextval('id_seq_plugin_step'), (SELECT id FROM plugin_metadata WHERE name='Skopeo'),'Step 1','Step 1 - Copy container images','1','INLINE',(SELECT last_value FROM id_seq_plugin_pipeline_script),'f','now()', 1, 'now()', 1); -INSERT INTO "plugin_step_variable" ("id", "plugin_step_id", "name", "format", "description", "is_exposed", "allow_empty_value", "variable_type", "value_type", "variable_step_index", "deleted", "created_on", "created_by", "updated_on", "updated_by") -VALUES (nextval('id_seq_plugin_step_variable'), (SELECT ps.id FROM plugin_metadata p inner JOIN plugin_step ps on ps.plugin_id=p.id WHERE p.name='Skopeo' and ps."index"=1 and ps.deleted=false), 'SOURCE_INFO','STRING','source image info. FORMAT:- registry| ',true,true,'INPUT','NEW',1 ,'f','now()', 1, 'now()', 1); - INSERT INTO "plugin_step_variable" ("id", "plugin_step_id", "name", "format", "description", "is_exposed", "allow_empty_value","variable_type", "value_type", "variable_step_index", "deleted", "created_on", "created_by", "updated_on", "updated_by") VALUES (nextval('id_seq_plugin_step_variable'), (SELECT ps.id FROM plugin_metadata p inner JOIN plugin_step ps on ps.plugin_id=p.id WHERE p.name='Skopeo' and ps."index"=1 and ps.deleted=false), 'DESTINATION_INFO','STRING','Skopeo account username',true,true,'INPUT','NEW',1 ,'f','now()', 1, 'now()', 1); From 262792f6adefb9ab659cc9311def82fc29ba72cb Mon Sep 17 00:00:00 2001 From: ayushmaheshwari Date: Mon, 6 Nov 2023 13:08:33 +0530 Subject: [PATCH 111/143] new cd workflow in case of auto trigger and plugin generated image --- pkg/pipeline/WorkflowDagExecutor.go | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/pkg/pipeline/WorkflowDagExecutor.go b/pkg/pipeline/WorkflowDagExecutor.go index b254584c6c..dad895f095 100644 --- a/pkg/pipeline/WorkflowDagExecutor.go +++ b/pkg/pipeline/WorkflowDagExecutor.go @@ -656,7 +656,8 @@ func (impl *WorkflowDagExecutorImpl) TriggerPreStage(ctx context.Context, cdWf * } } var err error - if cdWf == nil { + if cdWf == nil || (cdWf != nil && cdWf.CiArtifactId != artifact.Id) { + // (cdWf != nil && cdWf.CiArtifactId != artifact.Id) -> for auto trigger case when cd is triggered with image generated at plugin (like skopeo) cdWf = &pipelineConfig.CdWorkflow{ CiArtifactId: artifact.Id, PipelineId: pipeline.Id, @@ -1623,6 +1624,9 @@ func (impl *WorkflowDagExecutorImpl) TriggerDeployment(cdWf *pipelineConfig.CdWo if err != nil { return err } + } else { + // + cdWf.CiArtifactId = artifact.Id } runner := &pipelineConfig.CdWorkflowRunner{ From b915cca1407d40ad66c78a24e60c67a2dec4f301 Mon Sep 17 00:00:00 2001 From: ayushmaheshwari Date: Mon, 6 Nov 2023 15:01:36 +0530 Subject: [PATCH 112/143] idempotency in saving artifacts and deploy cd workflow fix --- .../sql/repository/CiArtifactRepository.go | 14 ++++++++ .../CiArtifactsListingQueryBuilder.go | 2 +- pkg/pipeline/PipelineBuilder.go | 2 +- pkg/pipeline/WorkflowDagExecutor.go | 35 ++++++++++++------- scripts/sql/185_skopeo_plugin.up.sql | 3 ++ 5 files changed, 42 insertions(+), 14 deletions(-) diff --git a/internal/sql/repository/CiArtifactRepository.go b/internal/sql/repository/CiArtifactRepository.go index 76c3a76db7..4865b83aab 100644 --- a/internal/sql/repository/CiArtifactRepository.go +++ b/internal/sql/repository/CiArtifactRepository.go @@ -101,6 +101,7 @@ type CiArtifactRepository interface { GetArtifactsByParentCiWorkflowId(parentCiWorkflowId int) ([]string, error) FetchArtifactsByCdPipelineIdV2(listingFilterOptions bean.ArtifactsListFilterOptions) ([]CiArtifactWithExtraData, int, error) FindArtifactByListFilter(listingFilterOptions *bean.ArtifactsListFilterOptions) ([]CiArtifact, int, error) + GetArtifactsByDataSourceAndComponentId(dataSource string, componentId int) ([]CiArtifact, error) } type CiArtifactRepositoryImpl struct { @@ -715,3 +716,16 @@ func (impl CiArtifactRepositoryImpl) FetchArtifactsByCdPipelineIdV2(listingFilte } return wfrList, totalCount, nil } + +func (impl CiArtifactRepositoryImpl) GetArtifactsByDataSourceAndComponentId(dataSource string, componentId int) ([]CiArtifact, error) { + var ciArtifacts []CiArtifact + err := impl.dbConnection. + Model(&ciArtifacts). + Where(" data_source=? and component_id=? ", dataSource, componentId). + Select() + if err != nil && err != pg.ErrNoRows { + impl.logger.Errorw("error in getting ci artifacts by data_source and component_id") + return ciArtifacts, err + } + return ciArtifacts, nil +} diff --git a/internal/sql/repository/CiArtifactsListingQueryBuilder.go b/internal/sql/repository/CiArtifactsListingQueryBuilder.go index 99345c1cb3..dcab6ec953 100644 --- a/internal/sql/repository/CiArtifactsListingQueryBuilder.go +++ b/internal/sql/repository/CiArtifactsListingQueryBuilder.go @@ -67,7 +67,7 @@ func BuildQueryForArtifactsForCdStage(listingFilterOptions bean.ArtifactsListFil totalCountQuery := "SELECT COUNT(DISTINCT ci_artifact.id) as total_count " + commonQuery selectQuery := fmt.Sprintf("SELECT DISTINCT(ci_artifact.id) , (%v) ", totalCountQuery) //GroupByQuery := " GROUP BY cia.id " - limitOffSetQuery := fmt.Sprintf(" LIMIT %v OFFSET %v", listingFilterOptions.Limit, listingFilterOptions.Offset) + limitOffSetQuery := fmt.Sprintf(" order by ci_artifact.id LIMIT %v OFFSET %v", listingFilterOptions.Limit, listingFilterOptions.Offset) //finalQuery := selectQuery + commonQuery + GroupByQuery + limitOffSetQuery finalQuery := selectQuery + commonQuery + limitOffSetQuery diff --git a/pkg/pipeline/PipelineBuilder.go b/pkg/pipeline/PipelineBuilder.go index da87b37fc7..0cd2fbbd5e 100644 --- a/pkg/pipeline/PipelineBuilder.go +++ b/pkg/pipeline/PipelineBuilder.go @@ -250,7 +250,7 @@ type ConfigMapSecretsResponse struct { } func parseMaterialInfo(materialInfo json.RawMessage, source string) (json.RawMessage, error) { - if source != "GOCD" && source != "CI-RUNNER" && source != "EXTERNAL" && source != "PRE_CD" && source != "POST_CD" && source != "POST_CI" && source != "PRE_CI" { + if source != "GOCD" && source != "CI-RUNNER" && source != "EXTERNAL" { return nil, fmt.Errorf("datasource: %s not supported", source) } var ciMaterials []repository.CiMaterialInfo diff --git a/pkg/pipeline/WorkflowDagExecutor.go b/pkg/pipeline/WorkflowDagExecutor.go index dad895f095..b093ebc3af 100644 --- a/pkg/pipeline/WorkflowDagExecutor.go +++ b/pkg/pipeline/WorkflowDagExecutor.go @@ -416,7 +416,7 @@ func (impl *WorkflowDagExecutorImpl) HandleCiSuccessEvent(artifact *repository.C //2. get config //3. trigger wf/ deployment var pipelineID int - if artifact.DataSource == repository.POST_CI || artifact.DataSource == repository.PRE_CI { + if artifact.DataSource == repository.POST_CI { pipelineID = artifact.ComponentId } else { // TODO: need to migrate artifact.PipelineId for dataSource="CI_RUNNER" also to component_id @@ -606,10 +606,24 @@ func (impl *WorkflowDagExecutorImpl) HandlePreStageSuccessEvent(cdStageCompleteE } func (impl *WorkflowDagExecutorImpl) SavePluginArtifacts(ciArtifact *repository.CiArtifact, pluginArtifactsDetail map[string][]string, pipelineId int, stage string) ([]*repository.CiArtifact, error) { + + saveArtifacts, err := impl.ciArtifactRepository.GetArtifactsByDataSourceAndComponentId(stage, pipelineId) + if err != nil { + return nil, err + } + PipelineArtifacts := make(map[string]bool) + for _, artifact := range saveArtifacts { + PipelineArtifacts[artifact.Image] = true + } + var CDArtifacts []*repository.CiArtifact for registry, artifacts := range pluginArtifactsDetail { // artifacts are list of images for _, artifact := range artifacts { + _, artifactAlreadySaved := PipelineArtifacts[artifact] + if artifactAlreadySaved { + continue + } pluginArtifact := &repository.CiArtifact{ Image: artifact, ImageDigest: ciArtifact.ImageDigest, @@ -626,13 +640,13 @@ func (impl *WorkflowDagExecutorImpl) SavePluginArtifacts(ciArtifact *repository. }, } CDArtifacts = append(CDArtifacts, pluginArtifact) - err := impl.ciArtifactRepository.SaveAll(CDArtifacts) - if err != nil { - impl.logger.Errorw("Error in saving artifacts metadata generated by plugin") - return CDArtifacts, err - } } } + err = impl.ciArtifactRepository.SaveAll(CDArtifacts) + if err != nil { + impl.logger.Errorw("Error in saving artifacts metadata generated by plugin") + return CDArtifacts, err + } return CDArtifacts, nil } @@ -656,8 +670,7 @@ func (impl *WorkflowDagExecutorImpl) TriggerPreStage(ctx context.Context, cdWf * } } var err error - if cdWf == nil || (cdWf != nil && cdWf.CiArtifactId != artifact.Id) { - // (cdWf != nil && cdWf.CiArtifactId != artifact.Id) -> for auto trigger case when cd is triggered with image generated at plugin (like skopeo) + if cdWf == nil { cdWf = &pipelineConfig.CdWorkflow{ CiArtifactId: artifact.Id, PipelineId: pipeline.Id, @@ -1614,7 +1627,8 @@ func (impl *WorkflowDagExecutorImpl) TriggerDeployment(cdWf *pipelineConfig.CdWo //setting triggeredAt variable to have consistent data for various audit log places in db for deployment time triggeredAt := time.Now() - if cdWf == nil { + if cdWf == nil && (cdWf != nil && cdWf.CiArtifactId != artifact.Id) { + // cdWf != nil && cdWf.CiArtifactId != artifact.Id for auto trigger case when deployment is triggered with image generated by plugin cdWf = &pipelineConfig.CdWorkflow{ CiArtifactId: artifact.Id, PipelineId: pipeline.Id, @@ -1624,9 +1638,6 @@ func (impl *WorkflowDagExecutorImpl) TriggerDeployment(cdWf *pipelineConfig.CdWo if err != nil { return err } - } else { - // - cdWf.CiArtifactId = artifact.Id } runner := &pipelineConfig.CdWorkflowRunner{ diff --git a/scripts/sql/185_skopeo_plugin.up.sql b/scripts/sql/185_skopeo_plugin.up.sql index 9ec2f303aa..58b26b49e8 100644 --- a/scripts/sql/185_skopeo_plugin.up.sql +++ b/scripts/sql/185_skopeo_plugin.up.sql @@ -28,3 +28,6 @@ VALUES (nextval('id_seq_plugin_step_variable'), (SELECT ps.id FROM plugin_metada INSERT INTO "plugin_step_variable" ("id", "plugin_step_id", "name", "format", "description", "is_exposed", "allow_empty_value","variable_type", "value_type", "variable_step_index",reference_variable_name, "deleted", "created_on", "created_by", "updated_on", "updated_by") VALUES (nextval('id_seq_plugin_step_variable'), (SELECT ps.id FROM plugin_metadata p inner JOIN plugin_step ps on ps.plugin_id=p.id WHERE p.name='Skopeo' and ps."index"=1 and ps.deleted=false), 'REGISTRY_CREDENTIALS','STRING','',false,true,'INPUT','GLOBAL',1 ,'REGISTRY_CREDENTIALS','f','now()', 1, 'now()', 1); + +SELECT DISTINCT(ci_artifact.id) from ci_artifact LEFT JOIN cd_workflow ON ci_artifact.id = cd_workflow.ci_artifact_id LEFT JOIN cd_workflow_runner ON cd_workflow_runner.cd_workflow_id=cd_workflow.id Where (((cd_workflow_runner.id in (select MAX(cd_workflow_runner.id) OVER (PARTITION BY cd_workflow.ci_artifact_id) FROM cd_workflow_runner inner join cd_workflow on cd_workflow.id=cd_workflow_runner.cd_workflow_id)) AND ((cd_workflow.pipeline_id= 14 and cd_workflow_runner.workflow_type = 'DEPLOY' ) OR (cd_workflow.pipeline_id = 14 AND cd_workflow_runner.workflow_type = 'PRE' AND cd_workflow_runner.status IN ('Healthy','Succeeded') ))) OR (ci_artifact.component_id = 14 and ci_artifact.data_source= 'pre_cd' )) AND (ci_artifact.image LIKE '%%' ) AND ( ci_artifact.id NOT IN (74)) LIMIT 10 OFFSET 0 + From 2e472edd1c94e529ac07b986cbc76e1231c240a2 Mon Sep 17 00:00:00 2001 From: ayushmaheshwari Date: Mon, 6 Nov 2023 15:24:47 +0530 Subject: [PATCH 113/143] fix image order --- internal/sql/repository/CiArtifactsListingQueryBuilder.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/internal/sql/repository/CiArtifactsListingQueryBuilder.go b/internal/sql/repository/CiArtifactsListingQueryBuilder.go index dcab6ec953..df160121ba 100644 --- a/internal/sql/repository/CiArtifactsListingQueryBuilder.go +++ b/internal/sql/repository/CiArtifactsListingQueryBuilder.go @@ -67,7 +67,7 @@ func BuildQueryForArtifactsForCdStage(listingFilterOptions bean.ArtifactsListFil totalCountQuery := "SELECT COUNT(DISTINCT ci_artifact.id) as total_count " + commonQuery selectQuery := fmt.Sprintf("SELECT DISTINCT(ci_artifact.id) , (%v) ", totalCountQuery) //GroupByQuery := " GROUP BY cia.id " - limitOffSetQuery := fmt.Sprintf(" order by ci_artifact.id LIMIT %v OFFSET %v", listingFilterOptions.Limit, listingFilterOptions.Offset) + limitOffSetQuery := fmt.Sprintf(" order by ci_artifact.id desc LIMIT %v OFFSET %v", listingFilterOptions.Limit, listingFilterOptions.Offset) //finalQuery := selectQuery + commonQuery + GroupByQuery + limitOffSetQuery finalQuery := selectQuery + commonQuery + limitOffSetQuery From db0d21686b1a01b21e9826f47943261bfd080466 Mon Sep 17 00:00:00 2001 From: ayushmaheshwari Date: Mon, 6 Nov 2023 15:46:22 +0530 Subject: [PATCH 114/143] fix cdWorkflow object --- pkg/pipeline/WorkflowDagExecutor.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/pipeline/WorkflowDagExecutor.go b/pkg/pipeline/WorkflowDagExecutor.go index b093ebc3af..ecc2ae2426 100644 --- a/pkg/pipeline/WorkflowDagExecutor.go +++ b/pkg/pipeline/WorkflowDagExecutor.go @@ -1627,7 +1627,7 @@ func (impl *WorkflowDagExecutorImpl) TriggerDeployment(cdWf *pipelineConfig.CdWo //setting triggeredAt variable to have consistent data for various audit log places in db for deployment time triggeredAt := time.Now() - if cdWf == nil && (cdWf != nil && cdWf.CiArtifactId != artifact.Id) { + if cdWf == nil || (cdWf != nil && cdWf.CiArtifactId != artifact.Id) { // cdWf != nil && cdWf.CiArtifactId != artifact.Id for auto trigger case when deployment is triggered with image generated by plugin cdWf = &pipelineConfig.CdWorkflow{ CiArtifactId: artifact.Id, From f2711554f80631dc6415cfe773d216f51ba6ccbf Mon Sep 17 00:00:00 2001 From: ayushmaheshwari Date: Mon, 6 Nov 2023 16:21:50 +0530 Subject: [PATCH 115/143] removing unnecessary code --- .../pipelineConfig/CdWorfkflowRepository.go | 57 ------------------- 1 file changed, 57 deletions(-) diff --git a/internal/sql/repository/pipelineConfig/CdWorfkflowRepository.go b/internal/sql/repository/pipelineConfig/CdWorfkflowRepository.go index 6e27b1e031..dd10224ebb 100644 --- a/internal/sql/repository/pipelineConfig/CdWorfkflowRepository.go +++ b/internal/sql/repository/pipelineConfig/CdWorfkflowRepository.go @@ -395,63 +395,6 @@ func (impl *CdWorkflowRepositoryImpl) FindCdWorkflowMetaByPipelineId(pipelineId } return wfrList, err } -func (impl *CdWorkflowRepositoryImpl) FindArtifactByListFilter(listingFilterOptions *bean.ArtifactsListFilterOptions) ([]repository.CiArtifact, int, error) { - - var ciArtifacts []repository.CiArtifact - query := "SELECT ci_artifact.* FROM ci_artifact" + - " LEFT JOIN cd_workflow ON ci_artifact.id = cd_workflow.ci_artifact_id" + - " LEFT JOIN cd_workflow_runner ON cd_workflow_runner.cd_workflow_id=cd_workflow.id " + - " Where (((cd_workflow_runner.id in (select MAX(cd_workflow_runner.id) OVER (PARTITION BY cd_workflow.ci_artifact_id) FROM cd_workflow_runner inner join cd_workflow on cd_workflow.id=cd_workflow_runner.cd_workflow_id))" + - " AND ((cd_workflow.pipeline_id= ? and cd_workflow_runner.workflow_type = ? ) OR (cd_workflow.pipeline_id = ? AND cd_workflow_runner.workflow_type = ? AND cd_workflow_runner.status IN ( ? ) )))" + - " OR (ci_artifact.component_id = ? and ci_artifact.data_source= ? ))" + - " AND (ci_artifact.image LIKE ? )" - if len(listingFilterOptions.ExcludeArtifactIds) > 0 { - query = query + " AND (cd_workflow.ci_artifact_id NOT IN (?) )" - query = query + fmt.Sprintf(" LIMIT %d", listingFilterOptions.Limit) - query = query + fmt.Sprintf(" OFFSET %d", listingFilterOptions.Offset) - _, err := impl.dbConnection.Query(&ciArtifacts, query, - listingFilterOptions.PipelineId, - listingFilterOptions.StageType, - listingFilterOptions.ParentId, - listingFilterOptions.ParentStageType, - pg.In([]string{application.Healthy, application.SUCCEEDED}), - listingFilterOptions.ParentId, - listingFilterOptions.PluginStage, - listingFilterOptions.SearchString, - pg.In(listingFilterOptions.ExcludeArtifactIds), - ) - if err != nil { - return ciArtifacts, 0, err - } - } else { - query = query + fmt.Sprintf(" LIMIT %d", listingFilterOptions.Limit) - query = query + fmt.Sprintf(" OFFSET %d", listingFilterOptions.Offset) - _, err := impl.dbConnection.Query(&ciArtifacts, query, - listingFilterOptions.PipelineId, - listingFilterOptions.StageType, - listingFilterOptions.ParentId, - listingFilterOptions.ParentStageType, - pg.In([]string{application.Healthy, application.SUCCEEDED}), - listingFilterOptions.ParentId, - listingFilterOptions.PluginStage, - listingFilterOptions.SearchString, - ) - if err != nil { - return ciArtifacts, 0, err - } - } - // - //totalCount, err := query.Count() - //if err == pg.ErrNoRows { - // return ciArtifacts, totalCount, err - //} - - //err = query.Select() - //if err == pg.ErrNoRows { - // return ciArtifacts, totalCount, nil - //} - return ciArtifacts, 0, nil -} func (impl *CdWorkflowRepositoryImpl) FindArtifactByPipelineIdAndRunnerType(pipelineId int, runnerType bean.WorkflowType, limit int) ([]CdWorkflowRunner, error) { var wfrList []CdWorkflowRunner From 4a03123372ddd88c2d1613af64922ff5f220c138 Mon Sep 17 00:00:00 2001 From: ayushmaheshwari Date: Mon, 6 Nov 2023 17:02:43 +0530 Subject: [PATCH 116/143] fix post trigger --- internal/sql/repository/CiArtifactRepository.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/internal/sql/repository/CiArtifactRepository.go b/internal/sql/repository/CiArtifactRepository.go index 4865b83aab..f69f391ac0 100644 --- a/internal/sql/repository/CiArtifactRepository.go +++ b/internal/sql/repository/CiArtifactRepository.go @@ -573,7 +573,7 @@ func (impl CiArtifactRepositoryImpl) GetArtifactsByCDPipelineV2(cdPipelineId int } func GetCiMaterialInfo(materialInfo string, source string) ([]CiMaterialInfo, error) { - if source != "GOCD" && source != "CI-RUNNER" && source != "EXTERNAL" && source != "post_ci" { + if source != "GOCD" && source != "CI-RUNNER" && source != "EXTERNAL" && source != "post_ci" && source != "pre_cd" && source != "post_cd" { return nil, fmt.Errorf("datasource: %s not supported", source) } var ciMaterials []CiMaterialInfo From 90ae68ed86c853d96b2b92c56bfff7ac1be46ff4 Mon Sep 17 00:00:00 2001 From: ayushmaheshwari Date: Tue, 7 Nov 2023 14:14:10 +0530 Subject: [PATCH 117/143] showing source info --- pkg/pipeline/PipelineBuilder.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/pipeline/PipelineBuilder.go b/pkg/pipeline/PipelineBuilder.go index 0cd2fbbd5e..38de5b8516 100644 --- a/pkg/pipeline/PipelineBuilder.go +++ b/pkg/pipeline/PipelineBuilder.go @@ -250,7 +250,7 @@ type ConfigMapSecretsResponse struct { } func parseMaterialInfo(materialInfo json.RawMessage, source string) (json.RawMessage, error) { - if source != "GOCD" && source != "CI-RUNNER" && source != "EXTERNAL" { + if source != "GOCD" && source != "CI-RUNNER" && source != "EXTERNAL" && source != "pre_cd" && source != "post_cd" && source != "post_ci" { return nil, fmt.Errorf("datasource: %s not supported", source) } var ciMaterials []repository.CiMaterialInfo From 2c6e251e3d4c4fddd18932a115c806f48b3297cd Mon Sep 17 00:00:00 2001 From: ayushmaheshwari Date: Wed, 8 Nov 2023 14:11:45 +0530 Subject: [PATCH 118/143] sql script no fix --- scripts/sql/182_custom_image_tag.down.sql | 15 --------- scripts/sql/182_custom_image_tag.up.sql | 32 ------------------- scripts/sql/183_custom_tag.up.sql | 1 - scripts/sql/185_skopeo_plugin.up.sql | 6 +--- scripts/sql/186_ci_artifact_refactor.down.sql | 0 ..._refactor.up.sql => 186_custom_tag.up.sql} | 1 + 6 files changed, 2 insertions(+), 53 deletions(-) delete mode 100644 scripts/sql/182_custom_image_tag.down.sql delete mode 100644 scripts/sql/182_custom_image_tag.up.sql delete mode 100644 scripts/sql/183_custom_tag.up.sql delete mode 100644 scripts/sql/186_ci_artifact_refactor.down.sql rename scripts/sql/{186_ci_artifact_refactor.up.sql => 186_custom_tag.up.sql} (89%) diff --git a/scripts/sql/182_custom_image_tag.down.sql b/scripts/sql/182_custom_image_tag.down.sql deleted file mode 100644 index 3033e71504..0000000000 --- a/scripts/sql/182_custom_image_tag.down.sql +++ /dev/null @@ -1,15 +0,0 @@ -DROP TABLE IF EXISTS custom_tag; - -DROP INDEX IF EXISTS entity_key_value; - -ALTER TABLE custom_tag - DROP CONSTRAINT unique_entity_key_entity_value; - -DROP TABLE IF EXISTS image_path_reservation; - -DROP INDEX IF EXISTS image_path_index; - -ALTER TABLE ci_workflow - DROP column IF EXISTS image_path_reservation_id; -ALTER TABLE ci_workflow - DROP CONSTRAINT fk_image_path_reservation_id; \ No newline at end of file diff --git a/scripts/sql/182_custom_image_tag.up.sql b/scripts/sql/182_custom_image_tag.up.sql deleted file mode 100644 index d6388c6ce3..0000000000 --- a/scripts/sql/182_custom_image_tag.up.sql +++ /dev/null @@ -1,32 +0,0 @@ -CREATE TABLE "public"."custom_tag" -( - id serial PRIMARY KEY, - custom_tag_format text, - tag_pattern text, - auto_increasing_number int DEFAULT 0, - entity_key int, - entity_value text, - active boolean DEFAULT true, - metadata jsonb -); - -CREATE INDEX IF NOT EXISTS entity_key_value ON custom_tag (entity_key, entity_value); - -ALTER TABLE custom_tag - ADD CONSTRAINT unique_entity_key_entity_value UNIQUE (entity_key, entity_value); - -CREATE TABLE IF not exists "public"."image_path_reservation" -( - id serial PRIMARY KEY, - custom_tag_id int, - image_path text, - active boolean default true, - FOREIGN KEY (custom_tag_id) REFERENCES custom_tag (id) -); - -CREATE INDEX IF NOT EXISTS image_path_index ON image_path_reservation (image_path); - -ALTER TABLE ci_workflow - ADD column IF NOT EXISTS image_path_reservation_id int; -ALTER TABLE ci_workflow - ADD CONSTRAINT fk_image_path_reservation_id FOREIGN KEY (image_path_reservation_id) REFERENCES image_path_reservation (id); \ No newline at end of file diff --git a/scripts/sql/183_custom_tag.up.sql b/scripts/sql/183_custom_tag.up.sql deleted file mode 100644 index f9b5a646e9..0000000000 --- a/scripts/sql/183_custom_tag.up.sql +++ /dev/null @@ -1 +0,0 @@ -ALTER TABLE custom_tag ADD COLUMN enabled boolean default false; diff --git a/scripts/sql/185_skopeo_plugin.up.sql b/scripts/sql/185_skopeo_plugin.up.sql index 58b26b49e8..49ac154884 100644 --- a/scripts/sql/185_skopeo_plugin.up.sql +++ b/scripts/sql/185_skopeo_plugin.up.sql @@ -11,7 +11,7 @@ INSERT INTO "plugin_stage_mapping" ("plugin_id","stage_type","created_on", "crea VALUES ((SELECT id FROM plugin_metadata WHERE name='Skopeo'),0,'now()', 1, 'now()', 1); INSERT INTO "plugin_pipeline_script" ("id","type","mount_directory_from_host","container_image_path","deleted","created_on", "created_by", "updated_on", "updated_by") -VALUES (nextval('id_seq_plugin_pipeline_script'),'CONTAINER_IMAGE','t','quay.io/devtron/test:9ab24450-81-909','f','now()',1,'now()',1); +VALUES (nextval('id_seq_plugin_pipeline_script'),'CONTAINER_IMAGE','t','quay.io/devtron/test:8d5a6d8d-81-1031','f','now()',1,'now()',1); INSERT INTO "plugin_step" ("id", "plugin_id","name","description","index","step_type","script_id","deleted", "created_on", "created_by", "updated_on", "updated_by") VALUES (nextval('id_seq_plugin_step'), (SELECT id FROM plugin_metadata WHERE name='Skopeo'),'Step 1','Step 1 - Copy container images','1','INLINE',(SELECT last_value FROM id_seq_plugin_pipeline_script),'f','now()', 1, 'now()', 1); @@ -27,7 +27,3 @@ VALUES (nextval('id_seq_plugin_step_variable'), (SELECT ps.id FROM plugin_metada INSERT INTO "plugin_step_variable" ("id", "plugin_step_id", "name", "format", "description", "is_exposed", "allow_empty_value","variable_type", "value_type", "variable_step_index",reference_variable_name, "deleted", "created_on", "created_by", "updated_on", "updated_by") VALUES (nextval('id_seq_plugin_step_variable'), (SELECT ps.id FROM plugin_metadata p inner JOIN plugin_step ps on ps.plugin_id=p.id WHERE p.name='Skopeo' and ps."index"=1 and ps.deleted=false), 'REGISTRY_CREDENTIALS','STRING','',false,true,'INPUT','GLOBAL',1 ,'REGISTRY_CREDENTIALS','f','now()', 1, 'now()', 1); - - -SELECT DISTINCT(ci_artifact.id) from ci_artifact LEFT JOIN cd_workflow ON ci_artifact.id = cd_workflow.ci_artifact_id LEFT JOIN cd_workflow_runner ON cd_workflow_runner.cd_workflow_id=cd_workflow.id Where (((cd_workflow_runner.id in (select MAX(cd_workflow_runner.id) OVER (PARTITION BY cd_workflow.ci_artifact_id) FROM cd_workflow_runner inner join cd_workflow on cd_workflow.id=cd_workflow_runner.cd_workflow_id)) AND ((cd_workflow.pipeline_id= 14 and cd_workflow_runner.workflow_type = 'DEPLOY' ) OR (cd_workflow.pipeline_id = 14 AND cd_workflow_runner.workflow_type = 'PRE' AND cd_workflow_runner.status IN ('Healthy','Succeeded') ))) OR (ci_artifact.component_id = 14 and ci_artifact.data_source= 'pre_cd' )) AND (ci_artifact.image LIKE '%%' ) AND ( ci_artifact.id NOT IN (74)) LIMIT 10 OFFSET 0 - diff --git a/scripts/sql/186_ci_artifact_refactor.down.sql b/scripts/sql/186_ci_artifact_refactor.down.sql deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/scripts/sql/186_ci_artifact_refactor.up.sql b/scripts/sql/186_custom_tag.up.sql similarity index 89% rename from scripts/sql/186_ci_artifact_refactor.up.sql rename to scripts/sql/186_custom_tag.up.sql index 3153a038a4..0f785fc91a 100644 --- a/scripts/sql/186_ci_artifact_refactor.up.sql +++ b/scripts/sql/186_custom_tag.up.sql @@ -1,3 +1,4 @@ +ALTER TABLE custom_tag ADD COLUMN enabled boolean default false; ALTER TABLE ci_artifact ADD COLUMN credentials_source_type VARCHAR(50); ALTER TABLE ci_artifact ADD COLUMN credentials_source_value VARCHAR(50); ALTER TABLE ci_artifact ADD COLUMN component_id integer; From 542bd775a5fa4f4560b91708b80ee9abd9f74e41 Mon Sep 17 00:00:00 2001 From: ShashwatDadhich Date: Fri, 10 Nov 2023 11:49:59 +0530 Subject: [PATCH 119/143] enterprise sync --- api/bean/ValuesOverrideRequest.go | 3 +++ internal/sql/repository/CiArtifactsListingQueryBuilder.go | 2 +- pkg/pipeline/AppArtifactManager.go | 4 +++- 3 files changed, 7 insertions(+), 2 deletions(-) diff --git a/api/bean/ValuesOverrideRequest.go b/api/bean/ValuesOverrideRequest.go index f5c2146f77..100cc49b28 100644 --- a/api/bean/ValuesOverrideRequest.go +++ b/api/bean/ValuesOverrideRequest.go @@ -113,4 +113,7 @@ type ArtifactsListFilterOptions struct { //excludeArtifactIds ExcludeArtifactIds []int + + //excludeWfRunners + ExcludeWfrIds []int } diff --git a/internal/sql/repository/CiArtifactsListingQueryBuilder.go b/internal/sql/repository/CiArtifactsListingQueryBuilder.go index ba0604232b..0cf2a6a166 100644 --- a/internal/sql/repository/CiArtifactsListingQueryBuilder.go +++ b/internal/sql/repository/CiArtifactsListingQueryBuilder.go @@ -73,7 +73,7 @@ func BuildQueryForArtifactsForRollback(listingFilterOptions bean.ArtifactsListFi " WHERE cdw.pipeline_id=%v AND cdwr.workflow_type = '%v' AND cia.image LIKE '%v'" commonQuery = fmt.Sprintf(commonQuery, listingFilterOptions.PipelineId, listingFilterOptions.StageType, listingFilterOptions.SearchString) if len(listingFilterOptions.ExcludeArtifactIds) > 0 { - commonQuery = fmt.Sprintf(" %s AND cd_workflow__ci_artifact.id NOT IN (%s)", commonQuery, pg.In(listingFilterOptions.ExcludeArtifactIds)) + commonQuery = fmt.Sprintf(" %s AND cdwr.id NOT IN (%s)", commonQuery, pg.In(listingFilterOptions.ExcludeWfrIds)) } totalCountQuery := " SELECT COUNT(cia.id) as total_count " + commonQuery orderByQuery := " ORDER BY cdwr.id DESC " diff --git a/pkg/pipeline/AppArtifactManager.go b/pkg/pipeline/AppArtifactManager.go index 50cf79a56d..836119ad83 100644 --- a/pkg/pipeline/AppArtifactManager.go +++ b/pkg/pipeline/AppArtifactManager.go @@ -323,7 +323,7 @@ func (impl *AppArtifactManagerImpl) BuildRollbackArtifactsList(artifactListingFi } if len(latestWf) > 0 { //we should never show current deployed artifact in rollback API - artifactListingFilterOpts.ExcludeArtifactIds = []int{latestWf[0].CdWorkflow.CiArtifactId} + artifactListingFilterOpts.ExcludeWfrIds = []int{latestWf[0].Id} } ciArtifacts, totalCount, err := impl.ciArtifactRepository.FetchArtifactsByCdPipelineIdV2(artifactListingFilterOpts) @@ -363,6 +363,8 @@ func (impl *AppArtifactManagerImpl) BuildRollbackArtifactsList(artifactListingFi DeployedTime: formatDate(ciArtifact.StartedOn, bean2.LayoutRFC3339), WfrId: ciArtifact.CdWorkflowRunnerId, DeployedBy: userEmail, + Scanned: ciArtifact.Scanned, + ScanEnabled: ciArtifact.ScanEnabled, }) artifactIds = append(artifactIds, ciArtifact.Id) } From abc6fad5807476801fa12016078abe9685a13b9d Mon Sep 17 00:00:00 2001 From: ayushmaheshwari Date: Fri, 10 Nov 2023 15:30:57 +0530 Subject: [PATCH 120/143] qa fixes --- api/restHandler/app/BuildPipelineRestHandler.go | 2 +- .../sql/repository/CiArtifactsListingQueryBuilder.go | 3 +-- pkg/pipeline/CiService.go | 6 ++++++ pkg/pipeline/CustomTagService.go | 3 +-- pkg/pipeline/WorkflowDagExecutor.go | 11 ++++++++++- 5 files changed, 19 insertions(+), 6 deletions(-) diff --git a/api/restHandler/app/BuildPipelineRestHandler.go b/api/restHandler/app/BuildPipelineRestHandler.go index 225fef0dcc..a04ba2e870 100644 --- a/api/restHandler/app/BuildPipelineRestHandler.go +++ b/api/restHandler/app/BuildPipelineRestHandler.go @@ -596,7 +596,7 @@ func (handler PipelineConfigRestHandlerImpl) TriggerCiPipeline(w http.ResponseWr resp, err := handler.ciHandler.HandleCIManual(ciTriggerRequest) if errors.Is(err, bean1.ErrImagePathInUse) { handler.Logger.Errorw("service err duplicate image tag, TriggerCiPipeline", "err", err, "payload", ciTriggerRequest) - common.WriteJsonResp(w, err, response, http.StatusConflict) + common.WriteJsonResp(w, err, err, http.StatusConflict) return } if err != nil { diff --git a/internal/sql/repository/CiArtifactsListingQueryBuilder.go b/internal/sql/repository/CiArtifactsListingQueryBuilder.go index df160121ba..0685dca5c2 100644 --- a/internal/sql/repository/CiArtifactsListingQueryBuilder.go +++ b/internal/sql/repository/CiArtifactsListingQueryBuilder.go @@ -4,7 +4,6 @@ import ( "fmt" "github.com/devtron-labs/devtron/api/bean" "github.com/devtron-labs/devtron/internal/sql/repository/helper" - "github.com/go-pg/pg" ) func BuildQueryForParentTypeCIOrWebhook(listingFilterOpts bean.ArtifactsListFilterOptions) string { @@ -81,7 +80,7 @@ func BuildQueryForArtifactsForRollback(listingFilterOptions bean.ArtifactsListFi " WHERE cdw.pipeline_id=%v AND cdwr.workflow_type = '%v' AND cia.image LIKE '%v'" commonQuery = fmt.Sprintf(commonQuery, listingFilterOptions.PipelineId, listingFilterOptions.StageType, listingFilterOptions.SearchString) if len(listingFilterOptions.ExcludeArtifactIds) > 0 { - commonQuery = fmt.Sprintf(" %s AND cd_workflow__ci_artifact.id NOT IN (%s)", commonQuery, pg.In(listingFilterOptions.ExcludeArtifactIds)) + commonQuery = fmt.Sprintf(" %s AND cia.id NOT IN (%s)", commonQuery, helper.GetCommaSepratedString(listingFilterOptions.ExcludeArtifactIds)) } totalCountQuery := " SELECT COUNT(cia.id) as total_count " + commonQuery orderByQuery := " ORDER BY cdwr.id DESC " diff --git a/pkg/pipeline/CiService.go b/pkg/pipeline/CiService.go index 949ff93a60..832092cede 100644 --- a/pkg/pipeline/CiService.go +++ b/pkg/pipeline/CiService.go @@ -493,6 +493,12 @@ func (impl *CiServiceImpl) buildWfRequestForCiPipeline(pipeline *pipelineConfig. fmt.Sprintf(bean2.ImagePathPattern, pipeline.CiTemplate.DockerRegistry.RegistryURL, pipeline.CiTemplate.DockerRepository, dockerImageTag), pipeline.CiTemplate.DockerRegistry.Id) if err != nil { impl.Logger.Errorw("error in getting env variables for skopeo plugin") + savedWf.Status = pipelineConfig.WorkflowFailed + savedWf.Message = err.Error() + err1 := impl.ciWorkflowRepository.UpdateWorkFlow(savedWf) + if err1 != nil { + impl.Logger.Errorw("could not save workflow, after failing due to conflicting image tag") + } return nil, err } diff --git a/pkg/pipeline/CustomTagService.go b/pkg/pipeline/CustomTagService.go index b4efa8a5c7..78d81b50c4 100644 --- a/pkg/pipeline/CustomTagService.go +++ b/pkg/pipeline/CustomTagService.go @@ -1,7 +1,6 @@ package pipeline import ( - "errors" "fmt" "github.com/devtron-labs/devtron/api/bean" "github.com/devtron-labs/devtron/internal/sql/repository" @@ -213,7 +212,7 @@ func (impl *CustomTagServiceImpl) ReserveImagePath(imagePath string, customTagId return nil, err } if len(imagePathReservations) > 0 { - return nil, errors.New("cannot copy image using skopeo, image with similar name already exist") + return nil, bean2.ErrImagePathInUse } imagePathReservation := &repository.ImagePathReservation{ ImagePath: imagePath, diff --git a/pkg/pipeline/WorkflowDagExecutor.go b/pkg/pipeline/WorkflowDagExecutor.go index 86dcd5ba52..aca148419c 100644 --- a/pkg/pipeline/WorkflowDagExecutor.go +++ b/pkg/pipeline/WorkflowDagExecutor.go @@ -773,11 +773,16 @@ func (impl *WorkflowDagExecutorImpl) TriggerPreStage(ctx context.Context, cdWf * } registryDestinationImageMap, registryCredentialMap, err := impl.pluginInputVariableParser.HandleSkopeoPluginInputVariable(step.InputVars, dockerImageTag, cdStageWorkflowRequest.CiArtifactDTO.Image, sourceDockerRegistryId) if err != nil { + runner.Status = pipelineConfig.WorkflowFailed + err = impl.cdWorkflowRepository.UpdateWorkFlowRunner(runner) impl.logger.Errorw("error in parsing skopeo input variable", "err", err) return err } imagePathReservationIds, err := impl.ReserveImagesGeneratedAtPlugin(customTagId, registryDestinationImageMap) if err != nil { + runner.Status = pipelineConfig.WorkflowFailed + runner.Message = err.Error() + err = impl.cdWorkflowRepository.UpdateWorkFlowRunner(runner) return err } runner.ImagePathReservationIds = imagePathReservationIds @@ -918,7 +923,7 @@ func (impl *WorkflowDagExecutorImpl) TriggerPostStage(cdWf *pipelineConfig.CdWor } var pluginImagePathReservationIds []int - for _, step := range cdStageWorkflowRequest.PostCiSteps { + for _, step := range cdStageWorkflowRequest.PrePostDeploySteps { if skopeoRefPluginId != 0 && step.RefPluginId == skopeoRefPluginId { // for Skopeo plugin parse destination images and save its data in image path reservation table customTag, dockerImageTag, err := impl.customTagService.GetCustomTag(bean3.EntityTypePostCD, strconv.Itoa(pipeline.Id)) @@ -942,11 +947,15 @@ func (impl *WorkflowDagExecutorImpl) TriggerPostStage(cdWf *pipelineConfig.CdWor } registryDestinationImageMap, registryCredentialMap, err := impl.pluginInputVariableParser.HandleSkopeoPluginInputVariable(step.InputVars, dockerImageTag, cdStageWorkflowRequest.CiArtifactDTO.Image, sourceDockerRegistryId) if err != nil { + runner.Status = pipelineConfig.WorkflowFailed + err = impl.cdWorkflowRepository.UpdateWorkFlowRunner(runner) impl.logger.Errorw("error in parsing skopeo input variable", "err", err) return err } imagePathReservationIds, err := impl.ReserveImagesGeneratedAtPlugin(customTagId, registryDestinationImageMap) if err != nil { + runner.Status = pipelineConfig.WorkflowFailed + err = impl.cdWorkflowRepository.UpdateWorkFlowRunner(runner) return err } pluginImagePathReservationIds = imagePathReservationIds From 26ff795aaa69961f05c072a1d92e89e47c68eeed Mon Sep 17 00:00:00 2001 From: Gireesh Naidu Date: Fri, 10 Nov 2023 16:52:43 +0530 Subject: [PATCH 121/143] offset issue --- api/restHandler/app/DeploymentPipelineRestHandler.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/api/restHandler/app/DeploymentPipelineRestHandler.go b/api/restHandler/app/DeploymentPipelineRestHandler.go index 0c8e2c7060..078dbf8ccf 100644 --- a/api/restHandler/app/DeploymentPipelineRestHandler.go +++ b/api/restHandler/app/DeploymentPipelineRestHandler.go @@ -1189,7 +1189,7 @@ func (handler PipelineConfigRestHandlerImpl) GetArtifactsByCDPipeline(w http.Res offsetQueryParam := r.URL.Query().Get("offset") if offsetQueryParam != "" { offset, err = strconv.Atoi(offsetQueryParam) - if err != nil || offset < 1 { + if err != nil || offset < 0 { handler.Logger.Errorw("request err, GetArtifactsForRollback", "err", err, "offsetQueryParam", offsetQueryParam) common.WriteJsonResp(w, err, "invalid offset", http.StatusBadRequest) } From cc152f41ba88d11c16c66990422e2c02d8b3f556 Mon Sep 17 00:00:00 2001 From: Gireesh Naidu Date: Fri, 10 Nov 2023 17:02:47 +0530 Subject: [PATCH 122/143] query change --- .../pipelineConfig/CdWorfkflowRepository.go | 12 ++++++++---- pkg/pipeline/AppArtifactManager.go | 10 +++++----- 2 files changed, 13 insertions(+), 9 deletions(-) diff --git a/internal/sql/repository/pipelineConfig/CdWorfkflowRepository.go b/internal/sql/repository/pipelineConfig/CdWorfkflowRepository.go index c62ddb9573..e115b34eb8 100644 --- a/internal/sql/repository/pipelineConfig/CdWorfkflowRepository.go +++ b/internal/sql/repository/pipelineConfig/CdWorfkflowRepository.go @@ -40,7 +40,7 @@ type CdWorkflowRepository interface { FindById(wfId int) (*CdWorkflow, error) FindCdWorkflowMetaByEnvironmentId(appId int, environmentId int, offset int, size int) ([]CdWorkflowRunner, error) FindCdWorkflowMetaByPipelineId(pipelineId int, offset int, size int) ([]CdWorkflowRunner, error) - FindArtifactByPipelineIdAndRunnerType(pipelineId int, runnerType bean.WorkflowType, limit int) ([]CdWorkflowRunner, error) + FindArtifactByPipelineIdAndRunnerType(pipelineId int, runnerType bean.WorkflowType, limit int, runnerStatuses []string) ([]CdWorkflowRunner, error) SaveWorkFlowRunner(wfr *CdWorkflowRunner) (*CdWorkflowRunner, error) UpdateWorkFlowRunner(wfr *CdWorkflowRunner) error UpdateWorkFlowRunnersWithTxn(wfrs []*CdWorkflowRunner, tx *pg.Tx) error @@ -395,13 +395,17 @@ func (impl *CdWorkflowRepositoryImpl) FindCdWorkflowMetaByPipelineId(pipelineId return wfrList, err } -func (impl *CdWorkflowRepositoryImpl) FindArtifactByPipelineIdAndRunnerType(pipelineId int, runnerType bean.WorkflowType, limit int) ([]CdWorkflowRunner, error) { +func (impl *CdWorkflowRepositoryImpl) FindArtifactByPipelineIdAndRunnerType(pipelineId int, runnerType bean.WorkflowType, limit int, runnerStatuses []string) ([]CdWorkflowRunner, error) { var wfrList []CdWorkflowRunner - err := impl.dbConnection. + query := impl.dbConnection. Model(&wfrList). Column("cd_workflow_runner.*", "CdWorkflow", "CdWorkflow.Pipeline", "CdWorkflow.CiArtifact"). Where("cd_workflow.pipeline_id = ?", pipelineId). - Where("cd_workflow_runner.workflow_type = ?", runnerType). + Where("cd_workflow_runner.workflow_type = ?", runnerType) + if len(runnerStatuses) > 0 { + query.Where("cd_workflow_runner.status IN (?)", pg.In(runnerStatuses)) + } + err := query. Order("cd_workflow_runner.id DESC"). Limit(limit). Select() diff --git a/pkg/pipeline/AppArtifactManager.go b/pkg/pipeline/AppArtifactManager.go index 836119ad83..0049d5ce43 100644 --- a/pkg/pipeline/AppArtifactManager.go +++ b/pkg/pipeline/AppArtifactManager.go @@ -98,7 +98,7 @@ func (impl *AppArtifactManagerImpl) BuildArtifactsForCdStage(pipelineId int, sta //getting running artifact id for parent cd parentCdRunningArtifactId := 0 if parentCdId > 0 && parent { - parentCdWfrList, err := impl.cdWorkflowRepository.FindArtifactByPipelineIdAndRunnerType(parentCdId, bean.CD_WORKFLOW_TYPE_DEPLOY, 1) + parentCdWfrList, err := impl.cdWorkflowRepository.FindArtifactByPipelineIdAndRunnerType(parentCdId, bean.CD_WORKFLOW_TYPE_DEPLOY, 1, nil) if err != nil || len(parentCdWfrList) == 0 { impl.logger.Errorw("error in getting artifact for parent cd", "parentCdPipelineId", parentCdId) return ciArtifacts, artifactMap, 0, "", err @@ -106,7 +106,7 @@ func (impl *AppArtifactManagerImpl) BuildArtifactsForCdStage(pipelineId int, sta parentCdRunningArtifactId = parentCdWfrList[0].CdWorkflow.CiArtifact.Id } //getting wfr for parent and updating artifacts - parentWfrList, err := impl.cdWorkflowRepository.FindArtifactByPipelineIdAndRunnerType(pipelineId, stageType, limit) + parentWfrList, err := impl.cdWorkflowRepository.FindArtifactByPipelineIdAndRunnerType(pipelineId, stageType, limit, nil) if err != nil { impl.logger.Errorw("error in getting artifact for deployed items", "cdPipelineId", pipelineId) return ciArtifacts, artifactMap, 0, "", err @@ -316,7 +316,7 @@ func (impl *AppArtifactManagerImpl) BuildRollbackArtifactsList(artifactListingFi totalCount := 0 //1)get current deployed artifact on this pipeline - latestWf, err := impl.cdWorkflowRepository.FindArtifactByPipelineIdAndRunnerType(artifactListingFilterOpts.PipelineId, artifactListingFilterOpts.StageType, 1) + latestWf, err := impl.cdWorkflowRepository.FindArtifactByPipelineIdAndRunnerType(artifactListingFilterOpts.PipelineId, artifactListingFilterOpts.StageType, 1, []string{application.Healthy, application.SUCCEEDED, application.Progressing}) if err != nil && err != pg.ErrNoRows { impl.logger.Errorw("error in getting latest workflow by pipelineId", "pipelineId", artifactListingFilterOpts.PipelineId, "currentStageType", artifactListingFilterOpts.StageType) return deployedCiArtifacts, nil, totalCount, err @@ -674,7 +674,7 @@ func (impl *AppArtifactManagerImpl) BuildArtifactsList(listingFilterOpts *bean.A var ciArtifacts []*bean2.CiArtifactBean totalCount := 0 //1)get current deployed artifact on this pipeline - latestWf, err := impl.cdWorkflowRepository.FindArtifactByPipelineIdAndRunnerType(listingFilterOpts.PipelineId, listingFilterOpts.StageType, 1) + latestWf, err := impl.cdWorkflowRepository.FindArtifactByPipelineIdAndRunnerType(listingFilterOpts.PipelineId, listingFilterOpts.StageType, 1, []string{application.Healthy, application.SUCCEEDED, application.Progressing}) if err != nil && err != pg.ErrNoRows { impl.logger.Errorw("error in getting latest workflow by pipelineId", "pipelineId", listingFilterOpts.PipelineId, "currentStageType", listingFilterOpts.StageType) return ciArtifacts, 0, "", totalCount, err @@ -754,7 +754,7 @@ func (impl *AppArtifactManagerImpl) BuildArtifactsForCdStageV2(listingFilterOpts artifactRunningOnParentCd := 0 if listingFilterOpts.ParentCdId > 0 { //TODO: check if we can fetch LastSuccessfulTriggerOnParent wfr along with last running wf - parentCdWfrList, err := impl.cdWorkflowRepository.FindArtifactByPipelineIdAndRunnerType(listingFilterOpts.ParentCdId, bean.CD_WORKFLOW_TYPE_DEPLOY, 1) + parentCdWfrList, err := impl.cdWorkflowRepository.FindArtifactByPipelineIdAndRunnerType(listingFilterOpts.ParentCdId, bean.CD_WORKFLOW_TYPE_DEPLOY, 1, []string{application.Healthy, application.SUCCEEDED, application.Progressing}) if err != nil || len(parentCdWfrList) == 0 { impl.logger.Errorw("error in getting artifact for parent cd", "parentCdPipelineId", listingFilterOpts.ParentCdId) return ciArtifacts, totalCount, err From 47be57792f0cbaf6914a1ca030776134ab6c701f Mon Sep 17 00:00:00 2001 From: Gireesh Naidu Date: Fri, 10 Nov 2023 17:09:04 +0530 Subject: [PATCH 123/143] query change --- internal/sql/repository/CiArtifactsListingQueryBuilder.go | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/internal/sql/repository/CiArtifactsListingQueryBuilder.go b/internal/sql/repository/CiArtifactsListingQueryBuilder.go index 0cf2a6a166..5722508f70 100644 --- a/internal/sql/repository/CiArtifactsListingQueryBuilder.go +++ b/internal/sql/repository/CiArtifactsListingQueryBuilder.go @@ -4,7 +4,6 @@ import ( "fmt" "github.com/devtron-labs/devtron/api/bean" "github.com/devtron-labs/devtron/internal/sql/repository/helper" - "github.com/go-pg/pg" ) func BuildQueryForParentTypeCIOrWebhook(listingFilterOpts bean.ArtifactsListFilterOptions) string { @@ -73,7 +72,7 @@ func BuildQueryForArtifactsForRollback(listingFilterOptions bean.ArtifactsListFi " WHERE cdw.pipeline_id=%v AND cdwr.workflow_type = '%v' AND cia.image LIKE '%v'" commonQuery = fmt.Sprintf(commonQuery, listingFilterOptions.PipelineId, listingFilterOptions.StageType, listingFilterOptions.SearchString) if len(listingFilterOptions.ExcludeArtifactIds) > 0 { - commonQuery = fmt.Sprintf(" %s AND cdwr.id NOT IN (%s)", commonQuery, pg.In(listingFilterOptions.ExcludeWfrIds)) + commonQuery = fmt.Sprintf(" %s AND cdwr.id NOT IN (%s)", commonQuery, helper.GetCommaSepratedString(listingFilterOptions.ExcludeWfrIds)) } totalCountQuery := " SELECT COUNT(cia.id) as total_count " + commonQuery orderByQuery := " ORDER BY cdwr.id DESC " From 92aac1d156baa2d1dd65a1f9c8da5b368280835d Mon Sep 17 00:00:00 2001 From: Gireesh Naidu Date: Fri, 10 Nov 2023 17:30:40 +0530 Subject: [PATCH 124/143] fix --- api/restHandler/app/DeploymentPipelineRestHandler.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/api/restHandler/app/DeploymentPipelineRestHandler.go b/api/restHandler/app/DeploymentPipelineRestHandler.go index 078dbf8ccf..df262ba4b1 100644 --- a/api/restHandler/app/DeploymentPipelineRestHandler.go +++ b/api/restHandler/app/DeploymentPipelineRestHandler.go @@ -1192,8 +1192,8 @@ func (handler PipelineConfigRestHandlerImpl) GetArtifactsByCDPipeline(w http.Res if err != nil || offset < 0 { handler.Logger.Errorw("request err, GetArtifactsForRollback", "err", err, "offsetQueryParam", offsetQueryParam) common.WriteJsonResp(w, err, "invalid offset", http.StatusBadRequest) + return } - return } sizeQueryParam := r.URL.Query().Get("size") @@ -1202,8 +1202,8 @@ func (handler PipelineConfigRestHandlerImpl) GetArtifactsByCDPipeline(w http.Res if err != nil { handler.Logger.Errorw("request err, GetArtifactsForRollback", "err", err, "sizeQueryParam", sizeQueryParam) common.WriteJsonResp(w, err, "invalid size", http.StatusBadRequest) + return } - return } handler.Logger.Infow("request payload, GetArtifactsByCDPipeline", "cdPipelineId", cdPipelineId, "stage", stage) From 13c60fb4b3957cad97855264b928fb5eedea3082 Mon Sep 17 00:00:00 2001 From: Gireesh Naidu Date: Fri, 10 Nov 2023 18:00:22 +0530 Subject: [PATCH 125/143] fix --- internal/sql/repository/CiArtifactsListingQueryBuilder.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/internal/sql/repository/CiArtifactsListingQueryBuilder.go b/internal/sql/repository/CiArtifactsListingQueryBuilder.go index 5722508f70..c01d6995f4 100644 --- a/internal/sql/repository/CiArtifactsListingQueryBuilder.go +++ b/internal/sql/repository/CiArtifactsListingQueryBuilder.go @@ -71,7 +71,7 @@ func BuildQueryForArtifactsForRollback(listingFilterOptions bean.ArtifactsListFi " INNER JOIN ci_artifact cia ON cia.id=cdw.ci_artifact_id " + " WHERE cdw.pipeline_id=%v AND cdwr.workflow_type = '%v' AND cia.image LIKE '%v'" commonQuery = fmt.Sprintf(commonQuery, listingFilterOptions.PipelineId, listingFilterOptions.StageType, listingFilterOptions.SearchString) - if len(listingFilterOptions.ExcludeArtifactIds) > 0 { + if len(listingFilterOptions.ExcludeWfrIds) > 0 { commonQuery = fmt.Sprintf(" %s AND cdwr.id NOT IN (%s)", commonQuery, helper.GetCommaSepratedString(listingFilterOptions.ExcludeWfrIds)) } totalCountQuery := " SELECT COUNT(cia.id) as total_count " + commonQuery From 24fa7579bcc572ad59d0f91d9ba1061bf78a9974 Mon Sep 17 00:00:00 2001 From: ayushmaheshwari Date: Tue, 14 Nov 2023 13:17:42 +0530 Subject: [PATCH 126/143] down script file --- scripts/sql/190_custom_tag.down.sql | 0 1 file changed, 0 insertions(+), 0 deletions(-) create mode 100644 scripts/sql/190_custom_tag.down.sql diff --git a/scripts/sql/190_custom_tag.down.sql b/scripts/sql/190_custom_tag.down.sql new file mode 100644 index 0000000000..e69de29bb2 From ba4bc33df4f08402a77cbc5fcc4fdc6873745832 Mon Sep 17 00:00:00 2001 From: ayushmaheshwari Date: Tue, 14 Nov 2023 14:33:51 +0530 Subject: [PATCH 127/143] fix migration script --- scripts/sql/189_skopeo_plugin.up.sql | 3 --- 1 file changed, 3 deletions(-) diff --git a/scripts/sql/189_skopeo_plugin.up.sql b/scripts/sql/189_skopeo_plugin.up.sql index 49ac154884..4d9e9ff8f3 100644 --- a/scripts/sql/189_skopeo_plugin.up.sql +++ b/scripts/sql/189_skopeo_plugin.up.sql @@ -1,9 +1,6 @@ INSERT INTO "plugin_metadata" ("id", "name", "description","type","icon","deleted", "created_on", "created_by", "updated_on", "updated_by") VALUES (nextval('id_seq_plugin_metadata'), 'Skopeo','','PRESET','','f', 'now()', 1, 'now()', 1); -INSERT INTO "plugin_tag" ("id", "name", "deleted", "created_on", "created_by", "updated_on", "updated_by") -VALUES (nextval('id_seq_plugin_tag'), 'CI task','f', 'now()',1, 'now()', 1); - INSERT INTO "plugin_tag_relation" ("id", "tag_id", "plugin_id", "created_on", "created_by", "updated_on", "updated_by") VALUES (nextval('id_seq_plugin_tag_relation'),(SELECT id FROM plugin_tag WHERE name='CI task') , (SELECT id FROM plugin_metadata WHERE name='Skopeo'),'now()', 1, 'now()', 1); From 4acfa07f222c694e0bc2d961bf113340d28774e6 Mon Sep 17 00:00:00 2001 From: ayushmaheshwari Date: Tue, 14 Nov 2023 16:58:33 +0530 Subject: [PATCH 128/143] plugin migration --- scripts/sql/189_skopeo_plugin.up.sql | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/scripts/sql/189_skopeo_plugin.up.sql b/scripts/sql/189_skopeo_plugin.up.sql index 4d9e9ff8f3..f72021d872 100644 --- a/scripts/sql/189_skopeo_plugin.up.sql +++ b/scripts/sql/189_skopeo_plugin.up.sql @@ -14,7 +14,11 @@ INSERT INTO "plugin_step" ("id", "plugin_id","name","description","index","step_ VALUES (nextval('id_seq_plugin_step'), (SELECT id FROM plugin_metadata WHERE name='Skopeo'),'Step 1','Step 1 - Copy container images','1','INLINE',(SELECT last_value FROM id_seq_plugin_pipeline_script),'f','now()', 1, 'now()', 1); INSERT INTO "plugin_step_variable" ("id", "plugin_step_id", "name", "format", "description", "is_exposed", "allow_empty_value","variable_type", "value_type", "variable_step_index", "deleted", "created_on", "created_by", "updated_on", "updated_by") -VALUES (nextval('id_seq_plugin_step_variable'), (SELECT ps.id FROM plugin_metadata p inner JOIN plugin_step ps on ps.plugin_id=p.id WHERE p.name='Skopeo' and ps."index"=1 and ps.deleted=false), 'DESTINATION_INFO','STRING','Skopeo account username',true,true,'INPUT','NEW',1 ,'f','now()', 1, 'now()', 1); +VALUES (nextval('id_seq_plugin_step_variable'), (SELECT ps.id FROM plugin_metadata p inner JOIN plugin_step ps on ps.plugin_id=p.id WHERE p.name='Skopeo' and ps."index"=1 and ps.deleted=false), 'DESTINATION_INFO','STRING', + 'In case of CI, build image will be copied to registry and repository provided in DESTINATION_INFO. In case of PRE-CD/POST-CD, Image used to trigger stage will be copied in DESTINATION_INFO + Format: + | ,', + ,true,false,'INPUT','NEW',1 ,'f','now()', 1, 'now()', 1); INSERT INTO "plugin_step_variable" ("id", "plugin_step_id", "name", "format", "description", "is_exposed", "allow_empty_value","variable_type", "value_type", "variable_step_index",reference_variable_name, "deleted", "created_on", "created_by", "updated_on", "updated_by") VALUES (nextval('id_seq_plugin_step_variable'), (SELECT ps.id FROM plugin_metadata p inner JOIN plugin_step ps on ps.plugin_id=p.id WHERE p.name='Skopeo' and ps."index"=1 and ps.deleted=false), 'DOCKER_IMAGE','STRING','',false,true,'INPUT','GLOBAL',1 ,'DOCKER_IMAGE','f','now()', 1, 'now()', 1); From 6f4edfb535a4bf2760e68aecb0ed7937eb1fe82d Mon Sep 17 00:00:00 2001 From: ayushmaheshwari Date: Wed, 15 Nov 2023 12:43:35 +0530 Subject: [PATCH 129/143] bug fixes --- .../pipelineConfig/CdWorfkflowRepository.go | 2 +- pkg/pipeline/WorkflowDagExecutor.go | 154 ++++++++---------- 2 files changed, 67 insertions(+), 89 deletions(-) diff --git a/internal/sql/repository/pipelineConfig/CdWorfkflowRepository.go b/internal/sql/repository/pipelineConfig/CdWorfkflowRepository.go index bf67977106..bb9de29cda 100644 --- a/internal/sql/repository/pipelineConfig/CdWorfkflowRepository.go +++ b/internal/sql/repository/pipelineConfig/CdWorfkflowRepository.go @@ -168,7 +168,7 @@ type CdWorkflowRunner struct { PodName string `sql:"pod_name"` BlobStorageEnabled bool `sql:"blob_storage_enabled,notnull"` RefCdWorkflowRunnerId int `sql:"ref_cd_workflow_runner_id,notnull"` - ImagePathReservationIds []int `sql:"image_path_reservation_ids" pg:",array"` + ImagePathReservationIds []int `sql:"image_path_reservation_ids,notnull" pg:",array"` CdWorkflow *CdWorkflow sql.AuditLog } diff --git a/pkg/pipeline/WorkflowDagExecutor.go b/pkg/pipeline/WorkflowDagExecutor.go index aca148419c..17f2cfbc9c 100644 --- a/pkg/pipeline/WorkflowDagExecutor.go +++ b/pkg/pipeline/WorkflowDagExecutor.go @@ -743,27 +743,68 @@ func (impl *WorkflowDagExecutorImpl) TriggerPreStage(ctx context.Context, cdWf * return err } cdStageWorkflowRequest.StageType = types.PRE - // handling plugin specific logic + // handling skopeo plugin specific logic + imagePathReservationIds, err := impl.SetSkopeoPluginDataInWorkflowRequest(cdStageWorkflowRequest, pipeline.Id, types.PRE, artifact) + if err != nil { + runner.Status = pipelineConfig.WorkflowFailed + runner.Message = err.Error() + _ = impl.cdWorkflowRepository.UpdateWorkFlowRunner(runner) + return err + } else { + runner.ImagePathReservationIds = imagePathReservationIds + _ = impl.cdWorkflowRepository.UpdateWorkFlowRunner(runner) + } + + _, span = otel.Tracer("orchestrator").Start(ctx, "cdWorkflowService.SubmitWorkflow") + cdStageWorkflowRequest.Pipeline = pipeline + cdStageWorkflowRequest.Env = env + cdStageWorkflowRequest.Type = bean3.CD_WORKFLOW_PIPELINE_TYPE + _, err = impl.cdWorkflowService.SubmitWorkflow(cdStageWorkflowRequest) + span.End() + err = impl.sendPreStageNotification(ctx, cdWf, pipeline) + if err != nil { + return err + } + //creating cd config history entry + _, span = otel.Tracer("orchestrator").Start(ctx, "prePostCdScriptHistoryService.CreatePrePostCdScriptHistory") + err = impl.prePostCdScriptHistoryService.CreatePrePostCdScriptHistory(pipeline, nil, repository3.PRE_CD_TYPE, true, triggeredBy, triggeredAt) + span.End() + if err != nil { + impl.logger.Errorw("error in creating pre cd script entry", "err", err, "pipeline", pipeline) + return err + } + return nil +} + +func (impl *WorkflowDagExecutorImpl) SetSkopeoPluginDataInWorkflowRequest(cdStageWorkflowRequest *types.WorkflowRequest, pipelineId int, pipelineStage string, artifact *repository.CiArtifact) ([]int, error) { skopeoRefPluginId, err := impl.globalPluginService.GetRefPluginIdByRefPluginName(SKOPEO) + var imagePathReservationIds []int if err != nil && err != pg.ErrNoRows { impl.logger.Errorw("error in getting skopeo plugin id", "err", err) - return err + return imagePathReservationIds, err } for _, step := range cdStageWorkflowRequest.PrePostDeploySteps { if skopeoRefPluginId != 0 && step.RefPluginId == skopeoRefPluginId { + var pipelineStageEntityType int + if pipelineStage == types.PRE { + pipelineStageEntityType = bean3.EntityTypePreCD + } else { + pipelineStageEntityType = bean3.EntityTypePostCD + } // for Skopeo plugin parse destination images and save its data in image path reservation table - customTag, dockerImageTag, err := impl.customTagService.GetCustomTag(bean3.EntityTypePreCD, strconv.Itoa(pipeline.Id)) + customTagDbObject, customDockerImageTag, err := impl.customTagService.GetCustomTag(pipelineStageEntityType, strconv.Itoa(pipelineId)) if err != nil && err != pg.ErrNoRows { impl.logger.Errorw("error in fetching custom tag by entity key and value for CD", "err", err) - return err + return imagePathReservationIds, err } var customTagId int - if customTag != nil && customTag.Id > 0 { - customTagId = customTag.Id + if customTagDbObject != nil && customTagDbObject.Id > 0 { + customTagId = customTagDbObject.Id } else { customTagId = -1 } var sourceDockerRegistryId string + if artifact.DataSource == repository.PRE_CD || artifact.DataSource == repository.POST_CD || artifact.DataSource == repository.POST_CI { if artifact.CredentialsSourceType == repository.GLOBAL_CONTAINER_REGISTRY { sourceDockerRegistryId = artifact.CredentialSourceValue @@ -771,50 +812,28 @@ func (impl *WorkflowDagExecutorImpl) TriggerPreStage(ctx context.Context, cdWf * } else { sourceDockerRegistryId = cdStageWorkflowRequest.DockerRegistryId } - registryDestinationImageMap, registryCredentialMap, err := impl.pluginInputVariableParser.HandleSkopeoPluginInputVariable(step.InputVars, dockerImageTag, cdStageWorkflowRequest.CiArtifactDTO.Image, sourceDockerRegistryId) + registryDestinationImageMap, registryCredentialMap, err := impl.pluginInputVariableParser.HandleSkopeoPluginInputVariable(step.InputVars, customDockerImageTag, cdStageWorkflowRequest.CiArtifactDTO.Image, sourceDockerRegistryId) if err != nil { - runner.Status = pipelineConfig.WorkflowFailed - err = impl.cdWorkflowRepository.UpdateWorkFlowRunner(runner) impl.logger.Errorw("error in parsing skopeo input variable", "err", err) - return err - } - imagePathReservationIds, err := impl.ReserveImagesGeneratedAtPlugin(customTagId, registryDestinationImageMap) - if err != nil { - runner.Status = pipelineConfig.WorkflowFailed - runner.Message = err.Error() - err = impl.cdWorkflowRepository.UpdateWorkFlowRunner(runner) - return err + return imagePathReservationIds, err } - runner.ImagePathReservationIds = imagePathReservationIds - err = impl.cdWorkflowRepository.UpdateWorkFlowRunner(runner) + imagePathReservationIds, err = impl.ReserveImagesGeneratedAtPlugin(customTagId, registryDestinationImageMap) if err != nil { - impl.logger.Errorw("error in updating image path reservation ") + impl.logger.Errorw("error in reserving image", "err", err) + return imagePathReservationIds, err } cdStageWorkflowRequest.RegistryDestinationImageMap = registryDestinationImageMap cdStageWorkflowRequest.RegistryCredentialMap = registryCredentialMap - cdStageWorkflowRequest.PluginArtifactStage = repository.PRE_CD + var pluginArtifactStage string + if pipelineStage == types.PRE { + pluginArtifactStage = repository.PRE_CD + } else { + pluginArtifactStage = repository.POST_CD + } + cdStageWorkflowRequest.PluginArtifactStage = pluginArtifactStage } } - - _, span = otel.Tracer("orchestrator").Start(ctx, "cdWorkflowService.SubmitWorkflow") - cdStageWorkflowRequest.Pipeline = pipeline - cdStageWorkflowRequest.Env = env - cdStageWorkflowRequest.Type = bean3.CD_WORKFLOW_PIPELINE_TYPE - _, err = impl.cdWorkflowService.SubmitWorkflow(cdStageWorkflowRequest) - span.End() - err = impl.sendPreStageNotification(ctx, cdWf, pipeline) - if err != nil { - return err - } - //creating cd config history entry - _, span = otel.Tracer("orchestrator").Start(ctx, "prePostCdScriptHistoryService.CreatePrePostCdScriptHistory") - err = impl.prePostCdScriptHistoryService.CreatePrePostCdScriptHistory(pipeline, nil, repository3.PRE_CD_TYPE, true, triggeredBy, triggeredAt) - span.End() - if err != nil { - impl.logger.Errorw("error in creating pre cd script entry", "err", err, "pipeline", pipeline) - return err - } - return nil + return imagePathReservationIds, nil } func (impl *WorkflowDagExecutorImpl) sendPreStageNotification(ctx context.Context, cdWf *pipelineConfig.CdWorkflow, pipeline *pipelineConfig.Pipeline) error { @@ -916,54 +935,15 @@ func (impl *WorkflowDagExecutorImpl) TriggerPostStage(cdWf *pipelineConfig.CdWor cdStageWorkflowRequest.Env = env cdStageWorkflowRequest.Type = bean3.CD_WORKFLOW_PIPELINE_TYPE // handling plugin specific logic - skopeoRefPluginId, err := impl.globalPluginService.GetRefPluginIdByRefPluginName(SKOPEO) - if err != nil && err != pg.ErrNoRows { - impl.logger.Errorw("error in getting skopeo plugin id", "err", err) + + pluginImagePathReservationIds, err := impl.SetSkopeoPluginDataInWorkflowRequest(cdStageWorkflowRequest, pipeline.Id, types.POST, cdWf.CiArtifact) + if err != nil { + runner.Status = pipelineConfig.WorkflowFailed + runner.Message = err.Error() + _ = impl.cdWorkflowRepository.UpdateWorkFlowRunner(runner) return err } - var pluginImagePathReservationIds []int - for _, step := range cdStageWorkflowRequest.PrePostDeploySteps { - if skopeoRefPluginId != 0 && step.RefPluginId == skopeoRefPluginId { - // for Skopeo plugin parse destination images and save its data in image path reservation table - customTag, dockerImageTag, err := impl.customTagService.GetCustomTag(bean3.EntityTypePostCD, strconv.Itoa(pipeline.Id)) - if err != nil && err != pg.ErrNoRows { - impl.logger.Errorw("error in fetching custom tag by entity key and value for CD", "err", err) - return err - } - var customTagId int - if customTag != nil && customTag.Id > 0 { - customTagId = customTag.Id - } else { - customTagId = -1 - } - var sourceDockerRegistryId string - if cdWf.CiArtifact.DataSource == repository.PRE_CD || cdWf.CiArtifact.DataSource == repository.POST_CD || cdWf.CiArtifact.DataSource == repository.POST_CI { - if cdWf.CiArtifact.CredentialsSourceType == repository.GLOBAL_CONTAINER_REGISTRY { - sourceDockerRegistryId = cdWf.CiArtifact.CredentialSourceValue - } - } else { - sourceDockerRegistryId = cdStageWorkflowRequest.DockerRegistryId - } - registryDestinationImageMap, registryCredentialMap, err := impl.pluginInputVariableParser.HandleSkopeoPluginInputVariable(step.InputVars, dockerImageTag, cdStageWorkflowRequest.CiArtifactDTO.Image, sourceDockerRegistryId) - if err != nil { - runner.Status = pipelineConfig.WorkflowFailed - err = impl.cdWorkflowRepository.UpdateWorkFlowRunner(runner) - impl.logger.Errorw("error in parsing skopeo input variable", "err", err) - return err - } - imagePathReservationIds, err := impl.ReserveImagesGeneratedAtPlugin(customTagId, registryDestinationImageMap) - if err != nil { - runner.Status = pipelineConfig.WorkflowFailed - err = impl.cdWorkflowRepository.UpdateWorkFlowRunner(runner) - return err - } - pluginImagePathReservationIds = imagePathReservationIds - cdStageWorkflowRequest.RegistryDestinationImageMap = registryDestinationImageMap - cdStageWorkflowRequest.RegistryCredentialMap = registryCredentialMap - cdStageWorkflowRequest.PluginArtifactStage = repository.POST_CD - } - } _, err = impl.cdWorkflowService.SubmitWorkflow(cdStageWorkflowRequest) if err != nil { impl.logger.Errorw("error in submitting workflow", "err", err, "cdStageWorkflowRequest", cdStageWorkflowRequest, "pipeline", pipeline, "env", env) @@ -1396,7 +1376,6 @@ func (impl *WorkflowDagExecutorImpl) buildWFRequest(runner *pipelineConfig.CdWor cdStageWorkflowRequest.DockerRegistryURL = ciPipeline.CiTemplate.DockerRegistry.RegistryURL cdStageWorkflowRequest.DockerRegistryId = ciPipeline.CiTemplate.DockerRegistry.Id cdStageWorkflowRequest.CiPipelineType = ciPipeline.PipelineType - cdStageWorkflowRequest.DockerRegistryId = *ciPipeline.CiTemplate.DockerRegistryId } else if cdPipeline.AppId > 0 { ciTemplate, err := impl.CiTemplateRepository.FindByAppId(cdPipeline.AppId) if err != nil { @@ -1412,7 +1391,6 @@ func (impl *WorkflowDagExecutorImpl) buildWFRequest(runner *pipelineConfig.CdWor cdStageWorkflowRequest.SecretKey = ciTemplate.DockerRegistry.AWSSecretAccessKey cdStageWorkflowRequest.DockerRegistryType = string(ciTemplate.DockerRegistry.RegistryType) cdStageWorkflowRequest.DockerRegistryURL = ciTemplate.DockerRegistry.RegistryURL - cdStageWorkflowRequest.DockerRegistryId = *ciTemplate.DockerRegistryId appLabels, err := impl.appLabelRepository.FindAllByAppId(cdPipeline.AppId) cdStageWorkflowRequest.DockerRegistryId = ciPipeline.CiTemplate.DockerRegistry.Id if err != nil && err != pg.ErrNoRows { From 20d5514d0d12d4b82a50620b3b97d45b2fe4633b Mon Sep 17 00:00:00 2001 From: ayushmaheshwari Date: Wed, 15 Nov 2023 13:19:13 +0530 Subject: [PATCH 130/143] qa issue fix --- pkg/pipeline/DeploymentPipelineConfigService.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/pkg/pipeline/DeploymentPipelineConfigService.go b/pkg/pipeline/DeploymentPipelineConfigService.go index 7a9b760be5..28c1a0c2c6 100644 --- a/pkg/pipeline/DeploymentPipelineConfigService.go +++ b/pkg/pipeline/DeploymentPipelineConfigService.go @@ -428,12 +428,12 @@ func (impl *CdPipelineConfigServiceImpl) CreateCdPipelines(pipelineCreateRequest } func (impl *CdPipelineConfigServiceImpl) CDPipelineCustomTagDBOperations(pipeline *bean.CDPipelineConfigObject) error { - if !pipeline.EnableCustomTag { - return nil - } - if pipeline.EnableCustomTag && len(pipeline.CustomTagObject.TagPattern) == 0 { + if pipeline.EnableCustomTag && (pipeline.CustomTagObject != nil && len(pipeline.CustomTagObject.TagPattern) == 0) { return fmt.Errorf("please provide custom tag data if tag is enabled") } + if pipeline.CustomTagObject != nil && pipeline.CustomTagObject.CounterX < 0 { + return fmt.Errorf("value of {x} cannot be negative") + } if pipeline.CustomTagObject == nil && pipeline.CustomTagStage == nil { // delete custom tag if removed from request err := impl.DeleteCustomTag(pipeline) From a736d119e679c6484028c810e04f190020e0f920 Mon Sep 17 00:00:00 2001 From: ayushmaheshwari Date: Thu, 16 Nov 2023 16:23:43 +0530 Subject: [PATCH 131/143] bug fixes --- .../app/BuildPipelineRestHandler.go | 52 +++++++++++++----- .../app/PipelineConfigRestHandler.go | 5 +- .../sql/repository/CiArtifactRepository.go | 14 +++++ .../pipelineConfig/CdWorfkflowRepository.go | 4 +- pkg/pipeline/AppArtifactManager.go | 10 ++-- pkg/pipeline/CdHandler.go | 1 + pkg/pipeline/CiService.go | 8 +++ .../DeploymentPipelineConfigService.go | 12 ++-- pkg/pipeline/WebhookService.go | 55 ++++++++++--------- pkg/pipeline/WorkflowDagExecutor.go | 24 +++++++- pkg/pipeline/pipelineStageVariableParser.go | 4 ++ wire_gen.go | 2 +- 12 files changed, 135 insertions(+), 56 deletions(-) diff --git a/api/restHandler/app/BuildPipelineRestHandler.go b/api/restHandler/app/BuildPipelineRestHandler.go index a04ba2e870..78eda463e6 100644 --- a/api/restHandler/app/BuildPipelineRestHandler.go +++ b/api/restHandler/app/BuildPipelineRestHandler.go @@ -1976,27 +1976,49 @@ func (handler PipelineConfigRestHandlerImpl) extractCipipelineMetaForImageTags(a externalCi = false ciPipelineId = 0 appId = 0 - - ciPipeline, err := handler.ciPipelineRepository.GetCiPipelineByArtifactId(artifactId) - var externalCiPipeline *pipelineConfig.ExternalCiPipeline + ciArtifact, err := handler.ciArtifactRepository.Get(artifactId) if err != nil { - if err == pg.ErrNoRows { - handler.Logger.Infow("no ciPipeline found by artifact Id, fetching external ci-pipeline ", "artifactId", artifactId) - externalCiPipeline, err = handler.ciPipelineRepository.GetExternalCiPipelineByArtifactId(artifactId) - } + handler.Logger.Errorw("Error in fetching ci artifact by ci artifact id", "err", err) + return externalCi, ciPipelineId, appId, err + } + if ciArtifact.DataSource == repository.POST_CI { + ciPipelineId = ciArtifact.ComponentId + ciPipeline, err := handler.pipelineBuilder.GetCiPipelineById(ciPipelineId) if err != nil { - handler.Logger.Errorw("error occurred in fetching ciPipeline/externalCiPipeline by artifact Id ", "err", err, "artifactId", artifactId) + handler.Logger.Errorw("no ci pipeline found for given artifact", "err", err, "artifactId", artifactId, "ciPipelineId", ciPipelineId) return externalCi, ciPipelineId, appId, err } - } - - if ciPipeline.Id != 0 { - ciPipelineId = ciPipeline.Id appId = ciPipeline.AppId + } else if ciArtifact.DataSource == repository.PRE_CD || ciArtifact.DataSource == repository.POST_CD { + cdPipelineId := ciArtifact.ComponentId + cdPipeline, err := handler.pipelineBuilder.GetCdPipelineById(cdPipelineId) + if err != nil { + handler.Logger.Errorw("no cd pipeline found for given artifact", "err", err, "artifactId", artifactId, "cdPipelineId", cdPipelineId) + return externalCi, ciPipelineId, appId, err + } + ciPipelineId = cdPipeline.CiPipelineId + appId = cdPipeline.AppId } else { - externalCi = true - ciPipelineId = externalCiPipeline.Id - appId = externalCiPipeline.AppId + ciPipeline, err := handler.ciPipelineRepository.GetCiPipelineByArtifactId(artifactId) + var externalCiPipeline *pipelineConfig.ExternalCiPipeline + if err != nil { + if err == pg.ErrNoRows { + handler.Logger.Infow("no ciPipeline found by artifact Id, fetching external ci-pipeline ", "artifactId", artifactId) + externalCiPipeline, err = handler.ciPipelineRepository.GetExternalCiPipelineByArtifactId(artifactId) + } + if err != nil { + handler.Logger.Errorw("error occurred in fetching ciPipeline/externalCiPipeline by artifact Id ", "err", err, "artifactId", artifactId) + return externalCi, ciPipelineId, appId, err + } + } + if ciPipeline.Id != 0 { + ciPipelineId = ciPipeline.Id + appId = ciPipeline.AppId + } else { + externalCi = true + ciPipelineId = externalCiPipeline.Id + appId = externalCiPipeline.AppId + } } return externalCi, ciPipelineId, appId, nil } diff --git a/api/restHandler/app/PipelineConfigRestHandler.go b/api/restHandler/app/PipelineConfigRestHandler.go index 693bafedac..e020ceb95f 100644 --- a/api/restHandler/app/PipelineConfigRestHandler.go +++ b/api/restHandler/app/PipelineConfigRestHandler.go @@ -130,6 +130,7 @@ type PipelineConfigRestHandlerImpl struct { imageTaggingService pipeline.ImageTaggingService deploymentTemplateService generateManifest.DeploymentTemplateService pipelineRestHandlerEnvConfig *PipelineRestHandlerEnvConfig + ciArtifactRepository repository.CiArtifactRepository } func NewPipelineRestHandlerImpl(pipelineBuilder pipeline.PipelineBuilder, Logger *zap.SugaredLogger, @@ -153,7 +154,8 @@ func NewPipelineRestHandlerImpl(pipelineBuilder pipeline.PipelineBuilder, Logger materialRepository pipelineConfig.MaterialRepository, policyService security2.PolicyService, scanResultRepository security.ImageScanResultRepository, gitProviderRepo repository.GitProviderRepository, argoUserService argo.ArgoUserService, ciPipelineMaterialRepository pipelineConfig.CiPipelineMaterialRepository, - imageTaggingService pipeline.ImageTaggingService) *PipelineConfigRestHandlerImpl { + imageTaggingService pipeline.ImageTaggingService, + ciArtifactRepository repository.CiArtifactRepository) *PipelineConfigRestHandlerImpl { envConfig := &PipelineRestHandlerEnvConfig{} err := env.Parse(envConfig) if err != nil { @@ -190,6 +192,7 @@ func NewPipelineRestHandlerImpl(pipelineBuilder pipeline.PipelineBuilder, Logger imageTaggingService: imageTaggingService, deploymentTemplateService: deploymentTemplateService, pipelineRestHandlerEnvConfig: envConfig, + ciArtifactRepository: ciArtifactRepository, } } diff --git a/internal/sql/repository/CiArtifactRepository.go b/internal/sql/repository/CiArtifactRepository.go index f69f391ac0..fe535234f7 100644 --- a/internal/sql/repository/CiArtifactRepository.go +++ b/internal/sql/repository/CiArtifactRepository.go @@ -102,6 +102,7 @@ type CiArtifactRepository interface { FetchArtifactsByCdPipelineIdV2(listingFilterOptions bean.ArtifactsListFilterOptions) ([]CiArtifactWithExtraData, int, error) FindArtifactByListFilter(listingFilterOptions *bean.ArtifactsListFilterOptions) ([]CiArtifact, int, error) GetArtifactsByDataSourceAndComponentId(dataSource string, componentId int) ([]CiArtifact, error) + FindCiArtifactByImagePaths(images []string) ([]CiArtifact, error) } type CiArtifactRepositoryImpl struct { @@ -729,3 +730,16 @@ func (impl CiArtifactRepositoryImpl) GetArtifactsByDataSourceAndComponentId(data } return ciArtifacts, nil } + +func (impl CiArtifactRepositoryImpl) FindCiArtifactByImagePaths(images []string) ([]CiArtifact, error) { + var ciArtifacts []CiArtifact + err := impl.dbConnection. + Model(&ciArtifacts). + Where(" image in (?) ", pg.In(images)). + Select() + if err != nil && err != pg.ErrNoRows { + impl.logger.Errorw("error in getting ci artifacts by data_source and component_id") + return ciArtifacts, err + } + return ciArtifacts, nil +} diff --git a/internal/sql/repository/pipelineConfig/CdWorfkflowRepository.go b/internal/sql/repository/pipelineConfig/CdWorfkflowRepository.go index bb9de29cda..8f5b0d522c 100644 --- a/internal/sql/repository/pipelineConfig/CdWorfkflowRepository.go +++ b/internal/sql/repository/pipelineConfig/CdWorfkflowRepository.go @@ -168,7 +168,7 @@ type CdWorkflowRunner struct { PodName string `sql:"pod_name"` BlobStorageEnabled bool `sql:"blob_storage_enabled,notnull"` RefCdWorkflowRunnerId int `sql:"ref_cd_workflow_runner_id,notnull"` - ImagePathReservationIds []int `sql:"image_path_reservation_ids,notnull" pg:",array"` + ImagePathReservationIds []int `sql:"image_path_reservation_ids" pg:",array"` CdWorkflow *CdWorkflow sql.AuditLog } @@ -483,7 +483,7 @@ func (impl *CdWorkflowRepositoryImpl) UpdateWorkFlowRunner(wfr *CdWorkflowRunner } func (impl *CdWorkflowRepositoryImpl) UpdateWorkFlowRunnersWithTxn(wfrs []*CdWorkflowRunner, tx *pg.Tx) error { - _, err := tx.Model(&wfrs).Update() + _, err := tx.Model(&wfrs).WherePK().Update() return err } diff --git a/pkg/pipeline/AppArtifactManager.go b/pkg/pipeline/AppArtifactManager.go index 1b1059ac8c..11fcf368c0 100644 --- a/pkg/pipeline/AppArtifactManager.go +++ b/pkg/pipeline/AppArtifactManager.go @@ -644,17 +644,17 @@ func (impl *AppArtifactManagerImpl) setAdditionalDataInArtifacts(ciArtifacts []b ciArtifacts[i].ImageComment = imageCommentResp } var dockerRegistryId string - if ciArtifacts[i].CiPipelineId != 0 { + if ciArtifacts[i].DataSource == repository.POST_CI || ciArtifacts[i].DataSource == repository.PRE_CD || ciArtifacts[i].DataSource == repository.POST_CD { + if ciArtifacts[i].CredentialsSourceType == repository.GLOBAL_CONTAINER_REGISTRY { + dockerRegistryId = ciArtifacts[i].CredentialsSourceValue + } + } else { ciPipeline, err := impl.CiPipelineRepository.FindById(ciArtifacts[i].CiPipelineId) if err != nil { impl.logger.Errorw("error in fetching ciPipeline", "ciPipelineId", ciPipeline.Id, "error", err) return nil, err } dockerRegistryId = *ciPipeline.CiTemplate.DockerRegistryId - } else { - if ciArtifacts[i].CredentialsSourceType == repository.GLOBAL_CONTAINER_REGISTRY { - dockerRegistryId = ciArtifacts[i].CredentialsSourceValue - } } if len(dockerRegistryId) > 0 { dockerArtifact, err := impl.dockerArtifactRegistry.FindOne(dockerRegistryId) diff --git a/pkg/pipeline/CdHandler.go b/pkg/pipeline/CdHandler.go index e0b1ac8ecd..f7e7b6831e 100644 --- a/pkg/pipeline/CdHandler.go +++ b/pkg/pipeline/CdHandler.go @@ -787,6 +787,7 @@ func (impl *CdHandlerImpl) GetCdBuildHistory(appId int, environmentId int, pipel var newCiArtifactIds []int for _, ciArtifact := range ciArtifacts { if ciArtifact.ParentCiArtifact > 0 && ciArtifact.WorkflowId == nil { + // parent ci artifact ID can be greater than zero when pipeline is linked or when image is copied at plugin level from some other image isLinked = true newCiArtifactIds = append(newCiArtifactIds, ciArtifact.ParentCiArtifact) parentCiArtifact[ciArtifact.Id] = ciArtifact.ParentCiArtifact diff --git a/pkg/pipeline/CiService.go b/pkg/pipeline/CiService.go index 832092cede..86312d71ed 100644 --- a/pkg/pipeline/CiService.go +++ b/pkg/pipeline/CiService.go @@ -761,6 +761,14 @@ func (impl *CiServiceImpl) GetWorkflowRequestVariablesForSkopeoPlugin(preCiSteps pluginArtifactStage = repository5.POST_CI } } + for _, images := range registryDestinationImageMap { + for _, image := range images { + if image == buildImagePath { + return registryDestinationImageMap, registryCredentialMap, pluginArtifactStage, imagePathReservationIds, + fmt.Errorf("source image cannot be same as destination image") + } + } + } imagePathReservationIds, err = impl.ReserveImagesGeneratedAtPlugin(customTagId, registryDestinationImageMap) if err != nil { return nil, nil, pluginArtifactStage, imagePathReservationIds, err diff --git a/pkg/pipeline/DeploymentPipelineConfigService.go b/pkg/pipeline/DeploymentPipelineConfigService.go index 28c1a0c2c6..4e02e6909e 100644 --- a/pkg/pipeline/DeploymentPipelineConfigService.go +++ b/pkg/pipeline/DeploymentPipelineConfigService.go @@ -329,6 +329,7 @@ func (impl *CdPipelineConfigServiceImpl) GetCdPipelineById(pipelineId int) (cdPi CustomTagObject: customTag, CustomTagStage: &customTagStage, EnableCustomTag: customTagEnabled, + AppId: dbPipeline.AppId, } var preDeployStage *bean3.PipelineStageDto var postDeployStage *bean3.PipelineStageDto @@ -418,11 +419,6 @@ func (impl *CdPipelineConfigServiceImpl) CreateCdPipelines(pipelineCreateRequest return nil, err } } - // save custom tag data - err = impl.CDPipelineCustomTagDBOperations(pipeline) - if err != nil { - return nil, err - } } return pipelineCreateRequest, nil } @@ -1726,7 +1722,11 @@ func (impl *CdPipelineConfigServiceImpl) createCdPipeline(ctx context.Context, a } } - + // save custom tag data + err = impl.CDPipelineCustomTagDBOperations(pipeline) + if err != nil { + return pipelineId, err + } err = tx.Commit() if err != nil { return 0, err diff --git a/pkg/pipeline/WebhookService.go b/pkg/pipeline/WebhookService.go index a3d8d952d8..3928e6e87b 100644 --- a/pkg/pipeline/WebhookService.go +++ b/pkg/pipeline/WebhookService.go @@ -213,30 +213,7 @@ func (impl WebhookServiceImpl) HandleCiSuccessEvent(ciPipelineId int, request *C if !imagePushedAt.IsZero() { createdOn = *imagePushedAt } - var pluginArtifacts []*repository.CiArtifact - for registry, artifacts := range request.PluginRegistryArtifactDetails { - for _, image := range artifacts { - pluginArtifact := &repository.CiArtifact{ - Image: image, - ImageDigest: request.ImageDigest, - MaterialInfo: string(materialJson), - DataSource: request.PluginArtifactStage, - ComponentId: pipeline.Id, - PipelineId: pipeline.Id, - AuditLog: sql.AuditLog{CreatedBy: request.UserId, UpdatedBy: request.UserId, CreatedOn: createdOn, UpdatedOn: updatedOn}, - CredentialsSourceType: repository.GLOBAL_CONTAINER_REGISTRY, - CredentialSourceValue: registry, - } - pluginArtifacts = append(pluginArtifacts, pluginArtifact) - } - } - if len(pluginArtifacts) > 0 { - err = impl.ciArtifactRepository.SaveAll(pluginArtifacts) - if err != nil { - impl.logger.Errorw("error while saving ci artifacts", "err", err) - return 0, err - } - } + buildArtifact := &repository.CiArtifact{ Image: request.Image, ImageDigest: request.ImageDigest, @@ -259,7 +236,7 @@ func (impl WebhookServiceImpl) HandleCiSuccessEvent(ciPipelineId int, request *C impl.logger.Errorw("error in getting ci pipeline plugin", "err", err, "pipelineId", pipeline.Id, "pluginId", plugin[0].Id) return 0, err } - if pipeline.ScanEnabled || isScanPluginConfigured{ + if pipeline.ScanEnabled || isScanPluginConfigured { buildArtifact.Scanned = true buildArtifact.ScanEnabled = true } @@ -268,6 +245,34 @@ func (impl WebhookServiceImpl) HandleCiSuccessEvent(ciPipelineId int, request *C return 0, err } + var pluginArtifacts []*repository.CiArtifact + for registry, artifacts := range request.PluginRegistryArtifactDetails { + for _, image := range artifacts { + pluginArtifact := &repository.CiArtifact{ + Image: image, + ImageDigest: request.ImageDigest, + MaterialInfo: string(materialJson), + DataSource: request.PluginArtifactStage, + ComponentId: pipeline.Id, + PipelineId: pipeline.Id, + AuditLog: sql.AuditLog{CreatedBy: request.UserId, UpdatedBy: request.UserId, CreatedOn: createdOn, UpdatedOn: updatedOn}, + CredentialsSourceType: repository.GLOBAL_CONTAINER_REGISTRY, + CredentialSourceValue: registry, + ParentCiArtifact: buildArtifact.Id, + Scanned: buildArtifact.Scanned, + ScanEnabled: buildArtifact.ScanEnabled, + } + pluginArtifacts = append(pluginArtifacts, pluginArtifact) + } + } + if len(pluginArtifacts) > 0 { + err = impl.ciArtifactRepository.SaveAll(pluginArtifacts) + if err != nil { + impl.logger.Errorw("error while saving ci artifacts", "err", err) + return 0, err + } + } + childrenCi, err := impl.ciPipelineRepository.FindByParentCiPipelineId(ciPipelineId) if err != nil && !util2.IsErrNoRows(err) { impl.logger.Errorw("error while fetching childern ci ", "err", err) diff --git a/pkg/pipeline/WorkflowDagExecutor.go b/pkg/pipeline/WorkflowDagExecutor.go index 17f2cfbc9c..3100185617 100644 --- a/pkg/pipeline/WorkflowDagExecutor.go +++ b/pkg/pipeline/WorkflowDagExecutor.go @@ -615,7 +615,12 @@ func (impl *WorkflowDagExecutorImpl) SavePluginArtifacts(ciArtifact *repository. for _, artifact := range saveArtifacts { PipelineArtifacts[artifact.Image] = true } - + var parentCiArtifactId int + if ciArtifact.ParentCiArtifact > 0 { + parentCiArtifactId = ciArtifact.ParentCiArtifact + } else { + parentCiArtifactId = ciArtifact.Id + } var CDArtifacts []*repository.CiArtifact for registry, artifacts := range pluginArtifactsDetail { // artifacts are list of images @@ -638,6 +643,7 @@ func (impl *WorkflowDagExecutorImpl) SavePluginArtifacts(ciArtifact *repository. UpdatedOn: time.Now(), UpdatedBy: DEVTRON_SYSTEM_USER_ID, }, + ParentCiArtifact: parentCiArtifactId, } CDArtifacts = append(CDArtifacts, pluginArtifact) } @@ -817,6 +823,22 @@ func (impl *WorkflowDagExecutorImpl) SetSkopeoPluginDataInWorkflowRequest(cdStag impl.logger.Errorw("error in parsing skopeo input variable", "err", err) return imagePathReservationIds, err } + var destinationImages []string + for _, images := range registryDestinationImageMap { + for _, image := range images { + destinationImages = append(destinationImages, image) + } + } + // fetch already saved artifacts to check if they are already present + savedCIArtifacts, err := impl.ciArtifactRepository.FindCiArtifactByImagePaths(destinationImages) + if err != nil { + impl.logger.Errorw("error in fetching artifacts by image path", "err", err) + return imagePathReservationIds, err + } + if len(savedCIArtifacts) > 0 { + // if already present in ci artifact, return "image path already in use error" + return imagePathReservationIds, bean3.ErrImagePathInUse + } imagePathReservationIds, err = impl.ReserveImagesGeneratedAtPlugin(customTagId, registryDestinationImageMap) if err != nil { impl.logger.Errorw("error in reserving image", "err", err) diff --git a/pkg/pipeline/pipelineStageVariableParser.go b/pkg/pipeline/pipelineStageVariableParser.go index 73ebcc36f4..45b487e4f3 100644 --- a/pkg/pipeline/pipelineStageVariableParser.go +++ b/pkg/pipeline/pipelineStageVariableParser.go @@ -61,6 +61,10 @@ func (impl *PluginInputVariableParserImpl) HandleSkopeoPluginInputVariable(input return nil, nil, errors.New("no image provided during trigger time") } + if len(DestinationInfo) == 0 { + return nil, nil, errors.New("destination info now") + } + if len(dockerImageTag) == 0 { // case when custom tag is not configured - source image tag will be taken as docker image tag pluginTriggerImageSplit := strings.Split(pluginTriggerImage, ":") diff --git a/wire_gen.go b/wire_gen.go index c4b2ec67f2..6bddf2ece0 100644 --- a/wire_gen.go +++ b/wire_gen.go @@ -528,7 +528,7 @@ func InitializeApp() (*App, error) { imageScanObjectMetaRepositoryImpl := security.NewImageScanObjectMetaRepositoryImpl(db, sugaredLogger) cveStoreRepositoryImpl := security.NewCveStoreRepositoryImpl(db, sugaredLogger) policyServiceImpl := security2.NewPolicyServiceImpl(environmentServiceImpl, sugaredLogger, appRepositoryImpl, pipelineOverrideRepositoryImpl, cvePolicyRepositoryImpl, clusterServiceImplExtended, pipelineRepositoryImpl, imageScanResultRepositoryImpl, imageScanDeployInfoRepositoryImpl, imageScanObjectMetaRepositoryImpl, httpClient, ciArtifactRepositoryImpl, ciCdConfig, imageScanHistoryRepositoryImpl, cveStoreRepositoryImpl, ciTemplateRepositoryImpl) - pipelineConfigRestHandlerImpl := app3.NewPipelineRestHandlerImpl(pipelineBuilderImpl, sugaredLogger, chartServiceImpl, propertiesConfigServiceImpl, dbMigrationServiceImpl, applicationServiceClientImpl, userServiceImpl, teamServiceImpl, enforcerImpl, ciHandlerImpl, validate, clientImpl, ciPipelineRepositoryImpl, pipelineRepositoryImpl, enforcerUtilImpl, environmentServiceImpl, gitRegistryConfigImpl, dockerRegistryConfigImpl, cdHandlerImpl, appCloneServiceImpl, deploymentTemplateServiceImpl, appWorkflowServiceImpl, materialRepositoryImpl, policyServiceImpl, imageScanResultRepositoryImpl, gitProviderRepositoryImpl, argoUserServiceImpl, ciPipelineMaterialRepositoryImpl, imageTaggingServiceImpl) + pipelineConfigRestHandlerImpl := app3.NewPipelineRestHandlerImpl(pipelineBuilderImpl, sugaredLogger, chartServiceImpl, propertiesConfigServiceImpl, dbMigrationServiceImpl, applicationServiceClientImpl, userServiceImpl, teamServiceImpl, enforcerImpl, ciHandlerImpl, validate, clientImpl, ciPipelineRepositoryImpl, pipelineRepositoryImpl, enforcerUtilImpl, environmentServiceImpl, gitRegistryConfigImpl, dockerRegistryConfigImpl, cdHandlerImpl, appCloneServiceImpl, deploymentTemplateServiceImpl, appWorkflowServiceImpl, materialRepositoryImpl, policyServiceImpl, imageScanResultRepositoryImpl, gitProviderRepositoryImpl, argoUserServiceImpl, ciPipelineMaterialRepositoryImpl, imageTaggingServiceImpl, ciArtifactRepositoryImpl) appWorkflowRestHandlerImpl := restHandler.NewAppWorkflowRestHandlerImpl(sugaredLogger, userServiceImpl, appWorkflowServiceImpl, teamServiceImpl, enforcerImpl, pipelineBuilderImpl, appRepositoryImpl, enforcerUtilImpl) webhookEventDataRepositoryImpl := repository.NewWebhookEventDataRepositoryImpl(db) webhookEventDataConfigImpl := pipeline.NewWebhookEventDataConfigImpl(sugaredLogger, webhookEventDataRepositoryImpl) From 57d2eb14abe88239a49acce79dad3b13016f680c Mon Sep 17 00:00:00 2001 From: ayushmaheshwari Date: Fri, 17 Nov 2023 02:48:31 +0530 Subject: [PATCH 132/143] bug fixes --- api/restHandler/PipelineTriggerRestHandler.go | 2 +- pkg/pipeline/AppArtifactManager.go | 17 ++++++++++------- 2 files changed, 11 insertions(+), 8 deletions(-) diff --git a/api/restHandler/PipelineTriggerRestHandler.go b/api/restHandler/PipelineTriggerRestHandler.go index bd6235c478..0082f14324 100644 --- a/api/restHandler/PipelineTriggerRestHandler.go +++ b/api/restHandler/PipelineTriggerRestHandler.go @@ -133,7 +133,7 @@ func (handler PipelineTriggerRestHandlerImpl) OverrideConfig(w http.ResponseWrit span.End() if err != nil { handler.logger.Errorw("request err, OverrideConfig", "err", err, "payload", overrideRequest) - common.WriteJsonResp(w, err, nil, http.StatusInternalServerError) + common.WriteJsonResp(w, err, err, http.StatusInternalServerError) return } res := map[string]interface{}{"releaseId": mergeResp} diff --git a/pkg/pipeline/AppArtifactManager.go b/pkg/pipeline/AppArtifactManager.go index 11fcf368c0..f840fa583e 100644 --- a/pkg/pipeline/AppArtifactManager.go +++ b/pkg/pipeline/AppArtifactManager.go @@ -836,13 +836,16 @@ func (impl *AppArtifactManagerImpl) BuildArtifactsForCdStageV2(listingFilterOpts ImageDigest: artifact.ImageDigest, MaterialInfo: mInfo, //TODO:LastSuccessfulTriggerOnParent - Scanned: artifact.Scanned, - ScanEnabled: artifact.ScanEnabled, - RunningOnParentCd: artifact.Id == artifactRunningOnParentCd, - ExternalCiPipelineId: artifact.ExternalCiPipelineId, - ParentCiArtifact: artifact.ParentCiArtifact, - CreatedTime: formatDate(artifact.CreatedOn, bean2.LayoutRFC3339), - DataSource: artifact.DataSource, + Scanned: artifact.Scanned, + ScanEnabled: artifact.ScanEnabled, + RunningOnParentCd: artifact.Id == artifactRunningOnParentCd, + ExternalCiPipelineId: artifact.ExternalCiPipelineId, + ParentCiArtifact: artifact.ParentCiArtifact, + CreatedTime: formatDate(artifact.CreatedOn, bean2.LayoutRFC3339), + DataSource: artifact.DataSource, + CiPipelineId: artifact.PipelineId, + CredentialsSourceType: artifact.CredentialsSourceType, + CredentialsSourceValue: artifact.CredentialSourceValue, } if artifact.WorkflowId != nil { ciArtifact.CiWorkflowId = *artifact.WorkflowId From 079e310cd2bc55cb2f709d4cdbe1cbb88cadf0ef Mon Sep 17 00:00:00 2001 From: ayushmaheshwari Date: Fri, 17 Nov 2023 04:08:35 +0530 Subject: [PATCH 133/143] create ecr repo if not present --- pkg/pipeline/pipelineStageVariableParser.go | 30 +++++++++++++++++++++ 1 file changed, 30 insertions(+) diff --git a/pkg/pipeline/pipelineStageVariableParser.go b/pkg/pipeline/pipelineStageVariableParser.go index 45b487e4f3..3fb8fb19b2 100644 --- a/pkg/pipeline/pipelineStageVariableParser.go +++ b/pkg/pipeline/pipelineStageVariableParser.go @@ -3,9 +3,12 @@ package pipeline import ( "errors" "fmt" + dockerRegistryRepository "github.com/devtron-labs/devtron/internal/sql/repository/dockerRegistry" + "github.com/devtron-labs/devtron/internal/util" "github.com/devtron-labs/devtron/pkg/pipeline/bean" "github.com/devtron-labs/devtron/pkg/plugin" "github.com/go-pg/pg" + errors1 "github.com/juju/errors" "go.uber.org/zap" "strings" ) @@ -78,6 +81,12 @@ func (impl *PluginInputVariableParserImpl) HandleSkopeoPluginInputVariable(input } registryDestinationImageMap = impl.getRegistryDestinationImageMapping(registryRepoMapping, dockerImageTag, registryCredentials) + err = impl.createEcrRepoIfRequired(registryCredentials, registryRepoMapping) + if err != nil { + impl.logger.Errorw("error in creating ecr repo", "err", err) + return registryDestinationImageMap, registryCredentials, err + } + return registryDestinationImageMap, registryCredentials, nil } @@ -164,3 +173,24 @@ func (impl *PluginInputVariableParserImpl) getRegistryDestinationImageMapping( return registryDestinationImageMapping } + +func (impl *PluginInputVariableParserImpl) createEcrRepoIfRequired(registryCredentials map[string]plugin.RegistryCredentials, registryRepoMapping map[string][]string) error { + for registry, registryCredential := range registryCredentials { + if registryCredential.RegistryType == dockerRegistryRepository.REGISTRYTYPE_ECR { + repositories := registryRepoMapping[registry] + for _, dockerRepo := range repositories { + err := util.CreateEcrRepo(dockerRepo, registryCredential.AWSRegion, registryCredential.AWSAccessKeyId, registryCredential.AWSSecretAccessKey) + if err != nil { + if errors1.IsAlreadyExists(err) { + impl.logger.Warnw("this repo already exists!!, skipping repo creation", "repo", dockerRepo) + } else { + impl.logger.Errorw("ecr repo creation failed, it might be due to authorization or any other external "+ + "dependency. please create repo manually before triggering ci", "repo", dockerRepo, "err", err) + return err + } + } + } + } + } + return nil +} From 3f5129d86966f4434d73a4eaa5dcb4c0b04ad940 Mon Sep 17 00:00:00 2001 From: ayushmaheshwari Date: Fri, 17 Nov 2023 14:20:52 +0530 Subject: [PATCH 134/143] big fixes --- .../pipelineConfig/CdWorfkflowRepository.go | 16 +++++++++---- pkg/pipeline/WebhookService.go | 23 +++++++++++++++++++ pkg/pipeline/WorkflowDagExecutor.go | 2 +- 3 files changed, 35 insertions(+), 6 deletions(-) diff --git a/internal/sql/repository/pipelineConfig/CdWorfkflowRepository.go b/internal/sql/repository/pipelineConfig/CdWorfkflowRepository.go index 8f5b0d522c..c98c749305 100644 --- a/internal/sql/repository/pipelineConfig/CdWorfkflowRepository.go +++ b/internal/sql/repository/pipelineConfig/CdWorfkflowRepository.go @@ -168,7 +168,7 @@ type CdWorkflowRunner struct { PodName string `sql:"pod_name"` BlobStorageEnabled bool `sql:"blob_storage_enabled,notnull"` RefCdWorkflowRunnerId int `sql:"ref_cd_workflow_runner_id,notnull"` - ImagePathReservationIds []int `sql:"image_path_reservation_ids" pg:",array"` + ImagePathReservationIds []int `sql:"image_path_reservation_ids" pg:",array,notnull"` CdWorkflow *CdWorkflow sql.AuditLog } @@ -483,13 +483,19 @@ func (impl *CdWorkflowRepositoryImpl) UpdateWorkFlowRunner(wfr *CdWorkflowRunner } func (impl *CdWorkflowRepositoryImpl) UpdateWorkFlowRunnersWithTxn(wfrs []*CdWorkflowRunner, tx *pg.Tx) error { - _, err := tx.Model(&wfrs).WherePK().Update() + _, err := tx.Model(&wfrs).Update() return err } -func (impl *CdWorkflowRepositoryImpl) UpdateWorkFlowRunners(wfr []*CdWorkflowRunner) error { - _, err := impl.dbConnection.Model(&wfr).Update() - return err +func (impl *CdWorkflowRepositoryImpl) UpdateWorkFlowRunners(wfrs []*CdWorkflowRunner) error { + for _, wfr := range wfrs { + err := impl.dbConnection.Update(wfr) + if err != nil { + impl.logger.Errorw("error in updating wfr", "err", err) + return err + } + } + return nil } func (impl *CdWorkflowRepositoryImpl) FindWorkflowRunnerByCdWorkflowId(wfIds []int) ([]*CdWorkflowRunner, error) { var wfr []*CdWorkflowRunner diff --git a/pkg/pipeline/WebhookService.go b/pkg/pipeline/WebhookService.go index 3928e6e87b..f2f5c8176a 100644 --- a/pkg/pipeline/WebhookService.go +++ b/pkg/pipeline/WebhookService.go @@ -296,6 +296,29 @@ func (impl WebhookServiceImpl) HandleCiSuccessEvent(ciPipelineId int, request *C if ci.ScanEnabled { ciArtifact.Scanned = true } + for registry, artifacts := range request.PluginRegistryArtifactDetails { + for _, image := range artifacts { + pluginArtifact := &repository.CiArtifact{ + Image: image, + ImageDigest: request.ImageDigest, + MaterialInfo: string(materialJson), + DataSource: request.PluginArtifactStage, + ComponentId: ci.Id, + PipelineId: ci.Id, + AuditLog: sql.AuditLog{CreatedBy: request.UserId, UpdatedBy: request.UserId, CreatedOn: createdOn, UpdatedOn: updatedOn}, + CredentialsSourceType: repository.GLOBAL_CONTAINER_REGISTRY, + CredentialSourceValue: registry, + ParentCiArtifact: buildArtifact.Id, + ScanEnabled: ci.ScanEnabled, + } + pluginArtifacts = append(pluginArtifacts, pluginArtifact) + } + } + if len(pluginArtifacts) == 0 { + ciArtifactArr = append(ciArtifactArr, buildArtifact) + } else { + ciArtifactArr = append(ciArtifactArr, pluginArtifacts[0]) + } ciArtifactArr = append(ciArtifactArr, ciArtifact) } diff --git a/pkg/pipeline/WorkflowDagExecutor.go b/pkg/pipeline/WorkflowDagExecutor.go index 3100185617..8e401350ba 100644 --- a/pkg/pipeline/WorkflowDagExecutor.go +++ b/pkg/pipeline/WorkflowDagExecutor.go @@ -1859,7 +1859,7 @@ func (impl *WorkflowDagExecutorImpl) updatePreviousDeploymentStatus(currentRunne timelines = append(timelines, timeline) } - err = impl.cdWorkflowRepository.UpdateWorkFlowRunnersWithTxn(previousNonTerminalRunners, tx) + err = impl.cdWorkflowRepository.UpdateWorkFlowRunners(previousNonTerminalRunners) if err != nil { impl.logger.Errorw("error updating cd wf runner status", "err", err, "previousNonTerminalRunners", previousNonTerminalRunners) return err From ebae885b95448222b8225f47d19f01fa059365eb Mon Sep 17 00:00:00 2001 From: ayushmaheshwari Date: Fri, 17 Nov 2023 14:38:43 +0530 Subject: [PATCH 135/143] sql script correction --- scripts/sql/189_skopeo_plugin.down.sql | 6 ++++++ scripts/sql/190_custom_tag.down.sql | 7 +++++++ 2 files changed, 13 insertions(+) diff --git a/scripts/sql/189_skopeo_plugin.down.sql b/scripts/sql/189_skopeo_plugin.down.sql index e69de29bb2..acd75e6427 100644 --- a/scripts/sql/189_skopeo_plugin.down.sql +++ b/scripts/sql/189_skopeo_plugin.down.sql @@ -0,0 +1,6 @@ +DELETE FROM plugin_step_variable WHERE plugin_step_id =(SELECT ps.id FROM plugin_metadata p inner JOIN plugin_step ps on ps.plugin_id=p.id WHERE p.name='Skopeo' and ps."index"=1 and ps.deleted=false); +DELETE FROM plugin_step WHERE plugin_id=(SELECT id FROM plugin_metadata WHERE name='Skopeo'); +DELETE FROM plugin_stage_mapping WHERE plugin_id =(SELECT id FROM plugin_metadata WHERE name='Skopeo'); +DELETE FROM pipeline_stage_step_variable WHERE pipeline_stage_step_id in (SELECT id FROM pipeline_stage_step where ref_plugin_id =(SELECT id from plugin_metadata WHERE name ='Skopeo')); +DELETE FROM pipeline_stage_step where ref_plugin_id in (SELECT id from plugin_metadata WHERE name ='Skopeo'); +DELETE FROM plugin_metadata WHERE name ='Skopeo'; diff --git a/scripts/sql/190_custom_tag.down.sql b/scripts/sql/190_custom_tag.down.sql index e69de29bb2..f7a9862c5b 100644 --- a/scripts/sql/190_custom_tag.down.sql +++ b/scripts/sql/190_custom_tag.down.sql @@ -0,0 +1,7 @@ +ALTER TABLE custom_tag DROP COLUMN enabled; +ALTER TABLE ci_artifact DROP COLUMN credentials_source_type ; +ALTER TABLE ci_artifact DROP COLUMN credentials_source_value ; +ALTER TABLE ci_artifact DROP COLUMN component_id; +ALTER TABLE ci_workflow DROP COLUMN image_path_reservation_ids; +ALTER TABLE cd_workflow_runner DROP COLUMN image_path_reservation_ids; +ALTER TABLE image_path_reservation DROP CONSTRAINT image_path_reservation_custom_tag_id_fkey; \ No newline at end of file From e328c05974c2b5d70dea0c94ea10a03a41676b5b Mon Sep 17 00:00:00 2001 From: ayushmaheshwari Date: Fri, 17 Nov 2023 18:35:05 +0530 Subject: [PATCH 136/143] bug fixes --- api/restHandler/PipelineTriggerRestHandler.go | 2 +- api/router/ApplicationRouter.go | 3 +- .../CiArtifactsListingQueryBuilder.go | 5 ++-- pkg/pipeline/WebhookService.go | 23 --------------- pkg/pipeline/WorkflowDagExecutor.go | 29 +++++++++++++------ 5 files changed, 26 insertions(+), 36 deletions(-) diff --git a/api/restHandler/PipelineTriggerRestHandler.go b/api/restHandler/PipelineTriggerRestHandler.go index 0082f14324..bf34401970 100644 --- a/api/restHandler/PipelineTriggerRestHandler.go +++ b/api/restHandler/PipelineTriggerRestHandler.go @@ -133,7 +133,7 @@ func (handler PipelineTriggerRestHandlerImpl) OverrideConfig(w http.ResponseWrit span.End() if err != nil { handler.logger.Errorw("request err, OverrideConfig", "err", err, "payload", overrideRequest) - common.WriteJsonResp(w, err, err, http.StatusInternalServerError) + common.WriteJsonResp(w, err, err.Error(), http.StatusInternalServerError) return } res := map[string]interface{}{"releaseId": mergeResp} diff --git a/api/router/ApplicationRouter.go b/api/router/ApplicationRouter.go index 789a92c675..729f7f5242 100644 --- a/api/router/ApplicationRouter.go +++ b/api/router/ApplicationRouter.go @@ -79,7 +79,8 @@ func (r ApplicationRouterImpl) initApplicationRouter(router *mux.Router) { router.Path("/{applicationName}/managed-resources"). Methods("GET"). HandlerFunc(r.handler.ManagedResources) - router.Path("/{name}/rollback"). + router.Path("/{name}" + + "/rollback"). Methods("GET"). HandlerFunc(r.handler.Rollback) diff --git a/internal/sql/repository/CiArtifactsListingQueryBuilder.go b/internal/sql/repository/CiArtifactsListingQueryBuilder.go index e176c61f54..9c607d70e8 100644 --- a/internal/sql/repository/CiArtifactsListingQueryBuilder.go +++ b/internal/sql/repository/CiArtifactsListingQueryBuilder.go @@ -5,6 +5,7 @@ import ( "github.com/devtron-labs/devtron/api/bean" "github.com/devtron-labs/devtron/internal/sql/repository/helper" ) + const EmptyLikeRegex = "%%" func BuildQueryForParentTypeCIOrWebhook(listingFilterOpts bean.ArtifactsListFilterOptions) string { @@ -80,9 +81,9 @@ func BuildQueryForArtifactsForRollback(listingFilterOptions bean.ArtifactsListFi " INNER JOIN ci_artifact cia ON cia.id=cdw.ci_artifact_id " + " WHERE cdw.pipeline_id=%v AND cdwr.workflow_type = '%v' " if listingFilterOptions.SearchString != EmptyLikeRegex { - commonQuery += " AND cia.image LIKE '%v' " + commonQuery += fmt.Sprintf(" AND cia.image LIKE '%v' ", listingFilterOptions.SearchString) } - commonQuery = fmt.Sprintf(commonQuery, listingFilterOptions.PipelineId, listingFilterOptions.StageType, listingFilterOptions.SearchString) + commonQuery = fmt.Sprintf(commonQuery, listingFilterOptions.PipelineId, listingFilterOptions.StageType) if len(listingFilterOptions.ExcludeWfrIds) > 0 { commonQuery = fmt.Sprintf(" %s AND cdwr.id NOT IN (%s)", commonQuery, helper.GetCommaSepratedString(listingFilterOptions.ExcludeWfrIds)) } diff --git a/pkg/pipeline/WebhookService.go b/pkg/pipeline/WebhookService.go index f2f5c8176a..3928e6e87b 100644 --- a/pkg/pipeline/WebhookService.go +++ b/pkg/pipeline/WebhookService.go @@ -296,29 +296,6 @@ func (impl WebhookServiceImpl) HandleCiSuccessEvent(ciPipelineId int, request *C if ci.ScanEnabled { ciArtifact.Scanned = true } - for registry, artifacts := range request.PluginRegistryArtifactDetails { - for _, image := range artifacts { - pluginArtifact := &repository.CiArtifact{ - Image: image, - ImageDigest: request.ImageDigest, - MaterialInfo: string(materialJson), - DataSource: request.PluginArtifactStage, - ComponentId: ci.Id, - PipelineId: ci.Id, - AuditLog: sql.AuditLog{CreatedBy: request.UserId, UpdatedBy: request.UserId, CreatedOn: createdOn, UpdatedOn: updatedOn}, - CredentialsSourceType: repository.GLOBAL_CONTAINER_REGISTRY, - CredentialSourceValue: registry, - ParentCiArtifact: buildArtifact.Id, - ScanEnabled: ci.ScanEnabled, - } - pluginArtifacts = append(pluginArtifacts, pluginArtifact) - } - } - if len(pluginArtifacts) == 0 { - ciArtifactArr = append(ciArtifactArr, buildArtifact) - } else { - ciArtifactArr = append(ciArtifactArr, pluginArtifacts[0]) - } ciArtifactArr = append(ciArtifactArr, ciArtifact) } diff --git a/pkg/pipeline/WorkflowDagExecutor.go b/pkg/pipeline/WorkflowDagExecutor.go index 8e401350ba..940e0edb73 100644 --- a/pkg/pipeline/WorkflowDagExecutor.go +++ b/pkg/pipeline/WorkflowDagExecutor.go @@ -797,20 +797,31 @@ func (impl *WorkflowDagExecutorImpl) SetSkopeoPluginDataInWorkflowRequest(cdStag } else { pipelineStageEntityType = bean3.EntityTypePostCD } - // for Skopeo plugin parse destination images and save its data in image path reservation table - customTagDbObject, customDockerImageTag, err := impl.customTagService.GetCustomTag(pipelineStageEntityType, strconv.Itoa(pipelineId)) + customTagId := -1 + var DockerImageTag string + + customTag, err := impl.customTagService.GetActiveCustomTagByEntityKeyAndValue(pipelineStageEntityType, strconv.Itoa(pipelineId)) if err != nil && err != pg.ErrNoRows { - impl.logger.Errorw("error in fetching custom tag by entity key and value for CD", "err", err) + impl.logger.Errorw("error in fetching custom tag data", "err", err) return imagePathReservationIds, err } - var customTagId int - if customTagDbObject != nil && customTagDbObject.Id > 0 { - customTagId = customTagDbObject.Id + + if !customTag.Enabled { + DockerImageTag = "" } else { - customTagId = -1 + // for Skopeo plugin parse destination images and save its data in image path reservation table + customTagDbObject, customDockerImageTag, err := impl.customTagService.GetCustomTag(pipelineStageEntityType, strconv.Itoa(pipelineId)) + if err != nil && err != pg.ErrNoRows { + impl.logger.Errorw("error in fetching custom tag by entity key and value for CD", "err", err) + return imagePathReservationIds, err + } + if customTagDbObject != nil && customTagDbObject.Id > 0 { + customTagId = customTagDbObject.Id + } + DockerImageTag = customDockerImageTag } - var sourceDockerRegistryId string + var sourceDockerRegistryId string if artifact.DataSource == repository.PRE_CD || artifact.DataSource == repository.POST_CD || artifact.DataSource == repository.POST_CI { if artifact.CredentialsSourceType == repository.GLOBAL_CONTAINER_REGISTRY { sourceDockerRegistryId = artifact.CredentialSourceValue @@ -818,7 +829,7 @@ func (impl *WorkflowDagExecutorImpl) SetSkopeoPluginDataInWorkflowRequest(cdStag } else { sourceDockerRegistryId = cdStageWorkflowRequest.DockerRegistryId } - registryDestinationImageMap, registryCredentialMap, err := impl.pluginInputVariableParser.HandleSkopeoPluginInputVariable(step.InputVars, customDockerImageTag, cdStageWorkflowRequest.CiArtifactDTO.Image, sourceDockerRegistryId) + registryDestinationImageMap, registryCredentialMap, err := impl.pluginInputVariableParser.HandleSkopeoPluginInputVariable(step.InputVars, DockerImageTag, cdStageWorkflowRequest.CiArtifactDTO.Image, sourceDockerRegistryId) if err != nil { impl.logger.Errorw("error in parsing skopeo input variable", "err", err) return imagePathReservationIds, err From 542072a1e480e5b35204383a9ee407824cc43f16 Mon Sep 17 00:00:00 2001 From: ayushmaheshwari Date: Fri, 17 Nov 2023 19:52:19 +0530 Subject: [PATCH 137/143] renaming skopeo to copy container image --- pkg/pipeline/CiService.go | 27 ++++++++++----------- pkg/pipeline/WebhookService.go | 2 +- pkg/pipeline/WorkflowDagExecutor.go | 20 +++++++-------- pkg/pipeline/pipelineStageVariableParser.go | 13 +++++----- scripts/sql/189_skopeo_plugin.down.sql | 12 ++++----- scripts/sql/189_skopeo_plugin.up.sql | 16 ++++++------ 6 files changed, 44 insertions(+), 46 deletions(-) diff --git a/pkg/pipeline/CiService.go b/pkg/pipeline/CiService.go index c30e0f7928..78efba7280 100644 --- a/pkg/pipeline/CiService.go +++ b/pkg/pipeline/CiService.go @@ -486,12 +486,12 @@ func (impl *CiServiceImpl) buildWfRequestForCiPipeline(pipeline *pipelineConfig. dockerImageTag = impl.buildImageTag(commitHashes, pipeline.Id, savedWf.Id) } - // skopeo plugin specific logic - registryDestinationImageMap, registryCredentialMap, pluginArtifactStage, imageReservationIds, err := impl.GetWorkflowRequestVariablesForSkopeoPlugin( + // copyContainerImage plugin specific logic + registryDestinationImageMap, registryCredentialMap, pluginArtifactStage, imageReservationIds, err := impl.GetWorkflowRequestVariablesForCopyContainerImagePlugin( preCiSteps, postCiSteps, dockerImageTag, customTag.Id, fmt.Sprintf(bean2.ImagePathPattern, pipeline.CiTemplate.DockerRegistry.RegistryURL, pipeline.CiTemplate.DockerRepository, dockerImageTag), pipeline.CiTemplate.DockerRegistry.Id) if err != nil { - impl.Logger.Errorw("error in getting env variables for skopeo plugin") + impl.Logger.Errorw("error in getting env variables for copyContainerImage plugin") savedWf.Status = pipelineConfig.WorkflowFailed savedWf.Message = err.Error() err1 := impl.ciWorkflowRepository.UpdateWorkFlow(savedWf) @@ -502,7 +502,6 @@ func (impl *CiServiceImpl) buildWfRequestForCiPipeline(pipeline *pipelineConfig. } savedWf.ImagePathReservationIds = append(savedWf.ImagePathReservationIds, imageReservationIds...) - // skopeo plugin logic ends if ciWorkflowConfig.CiCacheBucket == "" { ciWorkflowConfig.CiCacheBucket = impl.config.DefaultCacheBucket @@ -733,28 +732,28 @@ func (impl *CiServiceImpl) buildWfRequestForCiPipeline(pipeline *pipelineConfig. return workflowRequest, nil } -func (impl *CiServiceImpl) GetWorkflowRequestVariablesForSkopeoPlugin(preCiSteps []*bean2.StepObject, postCiSteps []*bean2.StepObject, customTag string, customTagId int, buildImagePath string, buildImagedockerRegistryId string) (map[string][]string, map[string]plugin.RegistryCredentials, string, []int, error) { +func (impl *CiServiceImpl) GetWorkflowRequestVariablesForCopyContainerImagePlugin(preCiSteps []*bean2.StepObject, postCiSteps []*bean2.StepObject, customTag string, customTagId int, buildImagePath string, buildImagedockerRegistryId string) (map[string][]string, map[string]plugin.RegistryCredentials, string, []int, error) { var registryDestinationImageMap map[string][]string var registryCredentialMap map[string]plugin.RegistryCredentials var pluginArtifactStage string var imagePathReservationIds []int - skopeoRefPluginId, err := impl.globalPluginService.GetRefPluginIdByRefPluginName(SKOPEO) + copyContainerImagePluginId, err := impl.globalPluginService.GetRefPluginIdByRefPluginName(COPY_CONTAINER_IMAGE) if err != nil && err != pg.ErrNoRows { - impl.Logger.Errorw("error in getting skopeo plugin id", "err", err) + impl.Logger.Errorw("error in getting copyContainerImage plugin id", "err", err) return registryDestinationImageMap, registryCredentialMap, pluginArtifactStage, imagePathReservationIds, err } for _, step := range preCiSteps { - if skopeoRefPluginId != 0 && step.RefPluginId == skopeoRefPluginId { - // for Skopeo plugin parse destination images and save its data in image path reservation table - return nil, nil, pluginArtifactStage, nil, errors.New("skopeo plugin not allowed in pre-ci step, please remove it and try again") + if copyContainerImagePluginId != 0 && step.RefPluginId == copyContainerImagePluginId { + // for copyContainerImage plugin parse destination images and save its data in image path reservation table + return nil, nil, pluginArtifactStage, nil, errors.New("copyContainerImage plugin not allowed in pre-ci step, please remove it and try again") } } for _, step := range postCiSteps { - if skopeoRefPluginId != 0 && step.RefPluginId == skopeoRefPluginId { - // for Skopeo plugin parse destination images and save its data in image path reservation table - registryDestinationImageMap, registryCredentialMap, err = impl.pluginInputVariableParser.HandleSkopeoPluginInputVariable(step.InputVars, customTag, buildImagePath, buildImagedockerRegistryId) + if copyContainerImagePluginId != 0 && step.RefPluginId == copyContainerImagePluginId { + // for copyContainerImage plugin parse destination images and save its data in image path reservation table + registryDestinationImageMap, registryCredentialMap, err = impl.pluginInputVariableParser.HandleCopyContainerImagePluginInputVariables(step.InputVars, customTag, buildImagePath, buildImagedockerRegistryId) if err != nil { - impl.Logger.Errorw("error in parsing skopeo input variable", "err", err) + impl.Logger.Errorw("error in parsing copyContainerImage input variable", "err", err) return registryDestinationImageMap, registryCredentialMap, pluginArtifactStage, imagePathReservationIds, err } pluginArtifactStage = repository5.POST_CI diff --git a/pkg/pipeline/WebhookService.go b/pkg/pipeline/WebhookService.go index 3928e6e87b..23f97c12c5 100644 --- a/pkg/pipeline/WebhookService.go +++ b/pkg/pipeline/WebhookService.go @@ -53,7 +53,7 @@ type CiArtifactWebhookRequest struct { UserId int32 `json:"userId"` IsArtifactUploaded bool `json:"isArtifactUploaded"` FailureReason string `json:"failureReason"` - PluginRegistryArtifactDetails map[string][]string `json:"PluginRegistryArtifactDetails"` //map of registry and array of images generated by skopeo plugin + PluginRegistryArtifactDetails map[string][]string `json:"PluginRegistryArtifactDetails"` //map of registry and array of images generated by Copy container image plugin PluginArtifactStage string `json:"pluginArtifactStage"` // at which stage of CI artifact was generated by plugin ("pre_ci/post_ci") } diff --git a/pkg/pipeline/WorkflowDagExecutor.go b/pkg/pipeline/WorkflowDagExecutor.go index 940e0edb73..9f849201d0 100644 --- a/pkg/pipeline/WorkflowDagExecutor.go +++ b/pkg/pipeline/WorkflowDagExecutor.go @@ -749,8 +749,8 @@ func (impl *WorkflowDagExecutorImpl) TriggerPreStage(ctx context.Context, cdWf * return err } cdStageWorkflowRequest.StageType = types.PRE - // handling skopeo plugin specific logic - imagePathReservationIds, err := impl.SetSkopeoPluginDataInWorkflowRequest(cdStageWorkflowRequest, pipeline.Id, types.PRE, artifact) + // handling copyContainerImage plugin specific logic + imagePathReservationIds, err := impl.SetCopyContainerImagePluginDataInWorkflowRequest(cdStageWorkflowRequest, pipeline.Id, types.PRE, artifact) if err != nil { runner.Status = pipelineConfig.WorkflowFailed runner.Message = err.Error() @@ -782,15 +782,15 @@ func (impl *WorkflowDagExecutorImpl) TriggerPreStage(ctx context.Context, cdWf * return nil } -func (impl *WorkflowDagExecutorImpl) SetSkopeoPluginDataInWorkflowRequest(cdStageWorkflowRequest *types.WorkflowRequest, pipelineId int, pipelineStage string, artifact *repository.CiArtifact) ([]int, error) { - skopeoRefPluginId, err := impl.globalPluginService.GetRefPluginIdByRefPluginName(SKOPEO) +func (impl *WorkflowDagExecutorImpl) SetCopyContainerImagePluginDataInWorkflowRequest(cdStageWorkflowRequest *types.WorkflowRequest, pipelineId int, pipelineStage string, artifact *repository.CiArtifact) ([]int, error) { + copyContainerImagePluginId, err := impl.globalPluginService.GetRefPluginIdByRefPluginName(COPY_CONTAINER_IMAGE) var imagePathReservationIds []int if err != nil && err != pg.ErrNoRows { - impl.logger.Errorw("error in getting skopeo plugin id", "err", err) + impl.logger.Errorw("error in getting copyContainerImage plugin id", "err", err) return imagePathReservationIds, err } for _, step := range cdStageWorkflowRequest.PrePostDeploySteps { - if skopeoRefPluginId != 0 && step.RefPluginId == skopeoRefPluginId { + if copyContainerImagePluginId != 0 && step.RefPluginId == copyContainerImagePluginId { var pipelineStageEntityType int if pipelineStage == types.PRE { pipelineStageEntityType = bean3.EntityTypePreCD @@ -809,7 +809,7 @@ func (impl *WorkflowDagExecutorImpl) SetSkopeoPluginDataInWorkflowRequest(cdStag if !customTag.Enabled { DockerImageTag = "" } else { - // for Skopeo plugin parse destination images and save its data in image path reservation table + // for copyContainerImage plugin parse destination images and save its data in image path reservation table customTagDbObject, customDockerImageTag, err := impl.customTagService.GetCustomTag(pipelineStageEntityType, strconv.Itoa(pipelineId)) if err != nil && err != pg.ErrNoRows { impl.logger.Errorw("error in fetching custom tag by entity key and value for CD", "err", err) @@ -829,9 +829,9 @@ func (impl *WorkflowDagExecutorImpl) SetSkopeoPluginDataInWorkflowRequest(cdStag } else { sourceDockerRegistryId = cdStageWorkflowRequest.DockerRegistryId } - registryDestinationImageMap, registryCredentialMap, err := impl.pluginInputVariableParser.HandleSkopeoPluginInputVariable(step.InputVars, DockerImageTag, cdStageWorkflowRequest.CiArtifactDTO.Image, sourceDockerRegistryId) + registryDestinationImageMap, registryCredentialMap, err := impl.pluginInputVariableParser.HandleCopyContainerImagePluginInputVariables(step.InputVars, DockerImageTag, cdStageWorkflowRequest.CiArtifactDTO.Image, sourceDockerRegistryId) if err != nil { - impl.logger.Errorw("error in parsing skopeo input variable", "err", err) + impl.logger.Errorw("error in parsing copyContainerImage input variable", "err", err) return imagePathReservationIds, err } var destinationImages []string @@ -969,7 +969,7 @@ func (impl *WorkflowDagExecutorImpl) TriggerPostStage(cdWf *pipelineConfig.CdWor cdStageWorkflowRequest.Type = bean3.CD_WORKFLOW_PIPELINE_TYPE // handling plugin specific logic - pluginImagePathReservationIds, err := impl.SetSkopeoPluginDataInWorkflowRequest(cdStageWorkflowRequest, pipeline.Id, types.POST, cdWf.CiArtifact) + pluginImagePathReservationIds, err := impl.SetCopyContainerImagePluginDataInWorkflowRequest(cdStageWorkflowRequest, pipeline.Id, types.POST, cdWf.CiArtifact) if err != nil { runner.Status = pipelineConfig.WorkflowFailed runner.Message = err.Error() diff --git a/pkg/pipeline/pipelineStageVariableParser.go b/pkg/pipeline/pipelineStageVariableParser.go index 3fb8fb19b2..68995b3b6a 100644 --- a/pkg/pipeline/pipelineStageVariableParser.go +++ b/pkg/pipeline/pipelineStageVariableParser.go @@ -13,21 +13,20 @@ import ( "strings" ) -type SkopeoInputVariable = string +type copyContainerImagePluginInputVariable = string type RefPluginName = string const ( - SKOPEO RefPluginName = "Skopeo" + COPY_CONTAINER_IMAGE RefPluginName = "Copy container image" ) const ( - DESTINATION_INFO SkopeoInputVariable = "DESTINATION_INFO" - SOURCE_INFO SkopeoInputVariable = "SOURCE_INFO" - SOURCE_REGISTRY_CREDENTIALS_KEY = "SOURCE_REGISTRY_CREDENTIAL" + DESTINATION_INFO copyContainerImagePluginInputVariable = "DESTINATION_INFO" + SOURCE_REGISTRY_CREDENTIALS_KEY = "SOURCE_REGISTRY_CREDENTIAL" ) type PluginInputVariableParser interface { - HandleSkopeoPluginInputVariable(inputVariables []*bean.VariableObject, dockerImageTag string, pluginTriggerImage string, sourceImageDockerRegistry string) (registryDestinationImageMap map[string][]string, registryCredentials map[string]plugin.RegistryCredentials, err error) + HandleCopyContainerImagePluginInputVariables(inputVariables []*bean.VariableObject, dockerImageTag string, pluginTriggerImage string, sourceImageDockerRegistry string) (registryDestinationImageMap map[string][]string, registryCredentials map[string]plugin.RegistryCredentials, err error) } type PluginInputVariableParserImpl struct { @@ -48,7 +47,7 @@ func NewPluginInputVariableParserImpl( } } -func (impl *PluginInputVariableParserImpl) HandleSkopeoPluginInputVariable(inputVariables []*bean.VariableObject, +func (impl *PluginInputVariableParserImpl) HandleCopyContainerImagePluginInputVariables(inputVariables []*bean.VariableObject, dockerImageTag string, pluginTriggerImage string, sourceImageDockerRegistry string) (registryDestinationImageMap map[string][]string, registryCredentials map[string]plugin.RegistryCredentials, err error) { diff --git a/scripts/sql/189_skopeo_plugin.down.sql b/scripts/sql/189_skopeo_plugin.down.sql index acd75e6427..140cdf9cf3 100644 --- a/scripts/sql/189_skopeo_plugin.down.sql +++ b/scripts/sql/189_skopeo_plugin.down.sql @@ -1,6 +1,6 @@ -DELETE FROM plugin_step_variable WHERE plugin_step_id =(SELECT ps.id FROM plugin_metadata p inner JOIN plugin_step ps on ps.plugin_id=p.id WHERE p.name='Skopeo' and ps."index"=1 and ps.deleted=false); -DELETE FROM plugin_step WHERE plugin_id=(SELECT id FROM plugin_metadata WHERE name='Skopeo'); -DELETE FROM plugin_stage_mapping WHERE plugin_id =(SELECT id FROM plugin_metadata WHERE name='Skopeo'); -DELETE FROM pipeline_stage_step_variable WHERE pipeline_stage_step_id in (SELECT id FROM pipeline_stage_step where ref_plugin_id =(SELECT id from plugin_metadata WHERE name ='Skopeo')); -DELETE FROM pipeline_stage_step where ref_plugin_id in (SELECT id from plugin_metadata WHERE name ='Skopeo'); -DELETE FROM plugin_metadata WHERE name ='Skopeo'; +DELETE FROM plugin_step_variable WHERE plugin_step_id =(SELECT ps.id FROM plugin_metadata p inner JOIN plugin_step ps on ps.plugin_id=p.id WHERE p.name='Copy container image' and ps."index"=1 and ps.deleted=false); +DELETE FROM plugin_step WHERE plugin_id=(SELECT id FROM plugin_metadata WHERE name='Copy container image'); +DELETE FROM plugin_stage_mapping WHERE plugin_id =(SELECT id FROM plugin_metadata WHERE name='Copy container image'); +DELETE FROM pipeline_stage_step_variable WHERE pipeline_stage_step_id in (SELECT id FROM pipeline_stage_step where ref_plugin_id =(SELECT id from plugin_metadata WHERE name ='Copy container image')); +DELETE FROM pipeline_stage_step where ref_plugin_id in (SELECT id from plugin_metadata WHERE name ='Copy container image'); +DELETE FROM plugin_metadata WHERE name ='Copy container image'; diff --git a/scripts/sql/189_skopeo_plugin.up.sql b/scripts/sql/189_skopeo_plugin.up.sql index f72021d872..09de47f546 100644 --- a/scripts/sql/189_skopeo_plugin.up.sql +++ b/scripts/sql/189_skopeo_plugin.up.sql @@ -1,30 +1,30 @@ INSERT INTO "plugin_metadata" ("id", "name", "description","type","icon","deleted", "created_on", "created_by", "updated_on", "updated_by") -VALUES (nextval('id_seq_plugin_metadata'), 'Skopeo','','PRESET','','f', 'now()', 1, 'now()', 1); +VALUES (nextval('id_seq_plugin_metadata'), 'Copy container image','Copy container images from the source repository to a desired repository','PRESET','','f', 'now()', 1, 'now()', 1); INSERT INTO "plugin_tag_relation" ("id", "tag_id", "plugin_id", "created_on", "created_by", "updated_on", "updated_by") -VALUES (nextval('id_seq_plugin_tag_relation'),(SELECT id FROM plugin_tag WHERE name='CI task') , (SELECT id FROM plugin_metadata WHERE name='Skopeo'),'now()', 1, 'now()', 1); +VALUES (nextval('id_seq_plugin_tag_relation'),(SELECT id FROM plugin_tag WHERE name='Image source') , (SELECT id FROM plugin_metadata WHERE name='Copy container image'),'now()', 1, 'now()', 1); INSERT INTO "plugin_stage_mapping" ("plugin_id","stage_type","created_on", "created_by", "updated_on", "updated_by") -VALUES ((SELECT id FROM plugin_metadata WHERE name='Skopeo'),0,'now()', 1, 'now()', 1); +VALUES ((SELECT id FROM plugin_metadata WHERE name='Copy container image'),0,'now()', 1, 'now()', 1); INSERT INTO "plugin_pipeline_script" ("id","type","mount_directory_from_host","container_image_path","deleted","created_on", "created_by", "updated_on", "updated_by") VALUES (nextval('id_seq_plugin_pipeline_script'),'CONTAINER_IMAGE','t','quay.io/devtron/test:8d5a6d8d-81-1031','f','now()',1,'now()',1); INSERT INTO "plugin_step" ("id", "plugin_id","name","description","index","step_type","script_id","deleted", "created_on", "created_by", "updated_on", "updated_by") -VALUES (nextval('id_seq_plugin_step'), (SELECT id FROM plugin_metadata WHERE name='Skopeo'),'Step 1','Step 1 - Copy container images','1','INLINE',(SELECT last_value FROM id_seq_plugin_pipeline_script),'f','now()', 1, 'now()', 1); +VALUES (nextval('id_seq_plugin_step'), (SELECT id FROM plugin_metadata WHERE name='Copy container image'),'Step 1','Step 1 - Copy container images','1','INLINE',(SELECT last_value FROM id_seq_plugin_pipeline_script),'f','now()', 1, 'now()', 1); INSERT INTO "plugin_step_variable" ("id", "plugin_step_id", "name", "format", "description", "is_exposed", "allow_empty_value","variable_type", "value_type", "variable_step_index", "deleted", "created_on", "created_by", "updated_on", "updated_by") -VALUES (nextval('id_seq_plugin_step_variable'), (SELECT ps.id FROM plugin_metadata p inner JOIN plugin_step ps on ps.plugin_id=p.id WHERE p.name='Skopeo' and ps."index"=1 and ps.deleted=false), 'DESTINATION_INFO','STRING', +VALUES (nextval('id_seq_plugin_step_variable'), (SELECT ps.id FROM plugin_metadata p inner JOIN plugin_step ps on ps.plugin_id=p.id WHERE p.name='Copy container image' and ps."index"=1 and ps.deleted=false), 'DESTINATION_INFO','STRING', 'In case of CI, build image will be copied to registry and repository provided in DESTINATION_INFO. In case of PRE-CD/POST-CD, Image used to trigger stage will be copied in DESTINATION_INFO Format: | ,', ,true,false,'INPUT','NEW',1 ,'f','now()', 1, 'now()', 1); INSERT INTO "plugin_step_variable" ("id", "plugin_step_id", "name", "format", "description", "is_exposed", "allow_empty_value","variable_type", "value_type", "variable_step_index",reference_variable_name, "deleted", "created_on", "created_by", "updated_on", "updated_by") -VALUES (nextval('id_seq_plugin_step_variable'), (SELECT ps.id FROM plugin_metadata p inner JOIN plugin_step ps on ps.plugin_id=p.id WHERE p.name='Skopeo' and ps."index"=1 and ps.deleted=false), 'DOCKER_IMAGE','STRING','',false,true,'INPUT','GLOBAL',1 ,'DOCKER_IMAGE','f','now()', 1, 'now()', 1); +VALUES (nextval('id_seq_plugin_step_variable'), (SELECT ps.id FROM plugin_metadata p inner JOIN plugin_step ps on ps.plugin_id=p.id WHERE p.name='Copy container image' and ps."index"=1 and ps.deleted=false), 'DOCKER_IMAGE','STRING','',false,true,'INPUT','GLOBAL',1 ,'DOCKER_IMAGE','f','now()', 1, 'now()', 1); INSERT INTO "plugin_step_variable" ("id", "plugin_step_id", "name", "format", "description", "is_exposed", "allow_empty_value","variable_type", "value_type", "variable_step_index",reference_variable_name, "deleted", "created_on", "created_by", "updated_on", "updated_by") -VALUES (nextval('id_seq_plugin_step_variable'), (SELECT ps.id FROM plugin_metadata p inner JOIN plugin_step ps on ps.plugin_id=p.id WHERE p.name='Skopeo' and ps."index"=1 and ps.deleted=false), 'REGISTRY_DESTINATION_IMAGE_MAP','STRING','map of registry name and images needed to be copied in that images',false,true,'INPUT','GLOBAL',1 ,'REGISTRY_DESTINATION_IMAGE_MAP','f','now()', 1, 'now()', 1); +VALUES (nextval('id_seq_plugin_step_variable'), (SELECT ps.id FROM plugin_metadata p inner JOIN plugin_step ps on ps.plugin_id=p.id WHERE p.name='Copy container image' and ps."index"=1 and ps.deleted=false), 'REGISTRY_DESTINATION_IMAGE_MAP','STRING','map of registry name and images needed to be copied in that images',false,true,'INPUT','GLOBAL',1 ,'REGISTRY_DESTINATION_IMAGE_MAP','f','now()', 1, 'now()', 1); INSERT INTO "plugin_step_variable" ("id", "plugin_step_id", "name", "format", "description", "is_exposed", "allow_empty_value","variable_type", "value_type", "variable_step_index",reference_variable_name, "deleted", "created_on", "created_by", "updated_on", "updated_by") -VALUES (nextval('id_seq_plugin_step_variable'), (SELECT ps.id FROM plugin_metadata p inner JOIN plugin_step ps on ps.plugin_id=p.id WHERE p.name='Skopeo' and ps."index"=1 and ps.deleted=false), 'REGISTRY_CREDENTIALS','STRING','',false,true,'INPUT','GLOBAL',1 ,'REGISTRY_CREDENTIALS','f','now()', 1, 'now()', 1); +VALUES (nextval('id_seq_plugin_step_variable'), (SELECT ps.id FROM plugin_metadata p inner JOIN plugin_step ps on ps.plugin_id=p.id WHERE p.name='Copy container image' and ps."index"=1 and ps.deleted=false), 'REGISTRY_CREDENTIALS','STRING','',false,true,'INPUT','GLOBAL',1 ,'REGISTRY_CREDENTIALS','f','now()', 1, 'now()', 1); From 7e6d3cfc571e13386887761880f4ec9908833dce Mon Sep 17 00:00:00 2001 From: ayushmaheshwari Date: Mon, 20 Nov 2023 10:09:56 +0530 Subject: [PATCH 138/143] sql script update --- assets/ic-plugin-copy-container-image.png | Bin 0 -> 682 bytes ...own.sql => 189_copy_container_images.down.sql} | 0 ...in.up.sql => 189_copy_container_images.up.sql} | 5 ++--- 3 files changed, 2 insertions(+), 3 deletions(-) create mode 100644 assets/ic-plugin-copy-container-image.png rename scripts/sql/{189_skopeo_plugin.down.sql => 189_copy_container_images.down.sql} (100%) rename scripts/sql/{189_skopeo_plugin.up.sql => 189_copy_container_images.up.sql} (94%) diff --git a/assets/ic-plugin-copy-container-image.png b/assets/ic-plugin-copy-container-image.png new file mode 100644 index 0000000000000000000000000000000000000000..1b5a3c47e59d4ca7c92c6905c2956c57a96f31b1 GIT binary patch literal 682 zcmV;b0#*HqP)h?%mCNfmez7)PbN|_BjEG1lT8i=X_fvbJ;_2@_hq}k)^`y+@OPbDgoWK9-t~7Y7 zCuNs@flt(|BaXn|hOW$Pm%EIgqSMNpMR1zp(Upvwlj6yCL}YAJY-O5{K&h5F)#~?= z#pa*H-k-$Xin`i`sKx*9!kVh8VTP$pe5I+eqVd^{x3r9!qKjdEiyc*n)5CrzR(tW$ zZ=vh{O#lD@2Xs|^i#eNLcfq$Di_lTcGU0FL$z~`FzdJn?gsqTp)JMA@im1=| zOQ`gnVsXRCRfH;C!b67F6R69&pbJh{pfArp=(?f;Mauwcwr#h71SiI4ncL*rJWf2L z1Z4rl_!kKe%5vix(vXHU6j*5Og=}$Zzh1~Ups56%R~=E0@-B(4Wb49}sgA^rY>(V( zMM8o6uK9(mu(3W9EU8a;kb%ACL3=xf2MJOi0Ml4s*zzDJqCS?wp`ZKjP-E=UW0H3# zMq^o53}l8xL)n42f4WtH@@?SbjK_DIv*dQTb70Qt@VQJXK`8($D8(so)7%`8=S?7{oKJ!)&liYi*;mlCp#t%WJO2%r0{LAd zM}1;D | ,', - ,true,false,'INPUT','NEW',1 ,'f','now()', 1, 'now()', 1); + | ,', true,false,'INPUT','NEW',1 ,'f','now()', 1, 'now()', 1); INSERT INTO "plugin_step_variable" ("id", "plugin_step_id", "name", "format", "description", "is_exposed", "allow_empty_value","variable_type", "value_type", "variable_step_index",reference_variable_name, "deleted", "created_on", "created_by", "updated_on", "updated_by") VALUES (nextval('id_seq_plugin_step_variable'), (SELECT ps.id FROM plugin_metadata p inner JOIN plugin_step ps on ps.plugin_id=p.id WHERE p.name='Copy container image' and ps."index"=1 and ps.deleted=false), 'DOCKER_IMAGE','STRING','',false,true,'INPUT','GLOBAL',1 ,'DOCKER_IMAGE','f','now()', 1, 'now()', 1); From 98631ef273383066eb516161872cf09036e76d28 Mon Sep 17 00:00:00 2001 From: ayushmaheshwari Date: Mon, 20 Nov 2023 17:26:35 +0530 Subject: [PATCH 139/143] enterprise bugs --- .../CiArtifactsListingQueryBuilder.go | 10 ++--- pkg/pipeline/AppArtifactManager.go | 41 +++++++++++++++---- pkg/pipeline/WorkflowDagExecutor.go | 12 +++--- 3 files changed, 42 insertions(+), 21 deletions(-) diff --git a/internal/sql/repository/CiArtifactsListingQueryBuilder.go b/internal/sql/repository/CiArtifactsListingQueryBuilder.go index 9c607d70e8..9561b5c497 100644 --- a/internal/sql/repository/CiArtifactsListingQueryBuilder.go +++ b/internal/sql/repository/CiArtifactsListingQueryBuilder.go @@ -54,12 +54,7 @@ func BuildQueryForArtifactsForCdStage(listingFilterOptions bean.ArtifactsListFil " AND ((cd_workflow.pipeline_id= %v and cd_workflow_runner.workflow_type = '%v' ) OR (cd_workflow.pipeline_id = %v AND cd_workflow_runner.workflow_type = '%v' AND cd_workflow_runner.status IN ('Healthy','Succeeded') )))" + " OR (ci_artifact.component_id = %v and ci_artifact.data_source= '%v' ))" + " AND (ci_artifact.image LIKE '%v' )" - //commonQuery := " FROM cd_workflow_runner " + - // " INNER JOIN cd_workflow ON cd_workflow.id=cd_workflow_runner.cd_workflow_id " + - // " INNER JOIN ci_artifact cia ON cia.id = cd_workflow.ci_artifact_id " + - // " WHERE (cd_workflow.pipeline_id = %v AND cd_workflow_runner.workflow_type = '%v') " + - // " OR (cd_workflow.pipeline_id = %v AND cd_workflow_runner.workflow_type = '%v' AND cd_workflow_runner.status IN ('Healthy','Succeeded'))" + - // " AND cia.image LIKE '%v' " + commonQuery = fmt.Sprintf(commonQuery, listingFilterOptions.PipelineId, listingFilterOptions.StageType, listingFilterOptions.ParentId, listingFilterOptions.ParentStageType, listingFilterOptions.ParentId, listingFilterOptions.PluginStage, listingFilterOptions.SearchString) if len(listingFilterOptions.ExcludeArtifactIds) > 0 { commonQuery = commonQuery + fmt.Sprintf(" AND ( ci_artifact.id NOT IN (%v))", helper.GetCommaSepratedString(listingFilterOptions.ExcludeArtifactIds)) @@ -80,10 +75,11 @@ func BuildQueryForArtifactsForRollback(listingFilterOptions bean.ArtifactsListFi " INNER JOIN cd_workflow cdw ON cdw.id=cdwr.cd_workflow_id " + " INNER JOIN ci_artifact cia ON cia.id=cdw.ci_artifact_id " + " WHERE cdw.pipeline_id=%v AND cdwr.workflow_type = '%v' " + + commonQuery = fmt.Sprintf(commonQuery, listingFilterOptions.PipelineId, listingFilterOptions.StageType) if listingFilterOptions.SearchString != EmptyLikeRegex { commonQuery += fmt.Sprintf(" AND cia.image LIKE '%v' ", listingFilterOptions.SearchString) } - commonQuery = fmt.Sprintf(commonQuery, listingFilterOptions.PipelineId, listingFilterOptions.StageType) if len(listingFilterOptions.ExcludeWfrIds) > 0 { commonQuery = fmt.Sprintf(" %s AND cdwr.id NOT IN (%s)", commonQuery, helper.GetCommaSepratedString(listingFilterOptions.ExcludeWfrIds)) } diff --git a/pkg/pipeline/AppArtifactManager.go b/pkg/pipeline/AppArtifactManager.go index 5d26cab826..88eb0842a7 100644 --- a/pkg/pipeline/AppArtifactManager.go +++ b/pkg/pipeline/AppArtifactManager.go @@ -313,6 +313,27 @@ func (impl *AppArtifactManagerImpl) FetchArtifactForRollbackV2(cdPipelineId, app if imageCommentResp := imageCommentsDataMap[deployedCiArtifacts[i].Id]; imageCommentResp != nil { deployedCiArtifacts[i].ImageComment = imageCommentResp } + var dockerRegistryId string + if deployedCiArtifacts[i].DataSource == repository.POST_CI || deployedCiArtifacts[i].DataSource == repository.PRE_CD || deployedCiArtifacts[i].DataSource == repository.POST_CD { + if deployedCiArtifacts[i].CredentialsSourceType == repository.GLOBAL_CONTAINER_REGISTRY { + dockerRegistryId = deployedCiArtifacts[i].CredentialsSourceValue + } + } else { + ciPipeline, err := impl.CiPipelineRepository.FindById(deployedCiArtifacts[i].CiPipelineId) + if err != nil { + impl.logger.Errorw("error in fetching ciPipeline", "ciPipelineId", ciPipeline.Id, "error", err) + return deployedCiArtifactsResponse, err + } + dockerRegistryId = *ciPipeline.CiTemplate.DockerRegistryId + } + if len(dockerRegistryId) > 0 { + dockerArtifact, err := impl.dockerArtifactRegistry.FindOne(dockerRegistryId) + if err != nil { + impl.logger.Errorw("error in getting docker registry details", "err", err, "dockerArtifactStoreId", dockerRegistryId) + } + deployedCiArtifacts[i].RegistryType = string(dockerArtifact.RegistryType) + deployedCiArtifacts[i].RegistryName = dockerRegistryId + } } deployedCiArtifactsResponse.CdPipelineId = cdPipelineId @@ -370,14 +391,18 @@ func (impl *AppArtifactManagerImpl) BuildRollbackArtifactsList(artifactListingFi } userEmail := userEmails[ciArtifact.TriggeredBy] deployedCiArtifacts = append(deployedCiArtifacts, bean2.CiArtifactBean{ - Id: ciArtifact.Id, - Image: ciArtifact.Image, - MaterialInfo: mInfo, - DeployedTime: formatDate(ciArtifact.StartedOn, bean2.LayoutRFC3339), - WfrId: ciArtifact.CdWorkflowRunnerId, - DeployedBy: userEmail, - Scanned: ciArtifact.Scanned, - ScanEnabled: ciArtifact.ScanEnabled, + Id: ciArtifact.Id, + Image: ciArtifact.Image, + MaterialInfo: mInfo, + DeployedTime: formatDate(ciArtifact.StartedOn, bean2.LayoutRFC3339), + WfrId: ciArtifact.CdWorkflowRunnerId, + DeployedBy: userEmail, + Scanned: ciArtifact.Scanned, + ScanEnabled: ciArtifact.ScanEnabled, + CiPipelineId: ciArtifact.PipelineId, + CredentialsSourceType: ciArtifact.CredentialsSourceType, + CredentialsSourceValue: ciArtifact.CredentialSourceValue, + DataSource: ciArtifact.DataSource, }) artifactIds = append(artifactIds, ciArtifact.Id) } diff --git a/pkg/pipeline/WorkflowDagExecutor.go b/pkg/pipeline/WorkflowDagExecutor.go index 9f849201d0..cb5bae209a 100644 --- a/pkg/pipeline/WorkflowDagExecutor.go +++ b/pkg/pipeline/WorkflowDagExecutor.go @@ -578,7 +578,7 @@ func (impl *WorkflowDagExecutorImpl) HandlePreStageSuccessEvent(cdStageCompleteE if err != nil { return err } - PreCDArtifacts, err := impl.SavePluginArtifacts(ciArtifact, cdStageCompleteEvent.PluginRegistryArtifactDetails, pipeline.Id, repository.PRE_CD) + PreCDArtifacts, err := impl.SavePluginArtifacts(ciArtifact, cdStageCompleteEvent.PluginRegistryArtifactDetails, pipeline.Id, repository.PRE_CD, cdStageCompleteEvent.TriggeredBy) if err != nil { impl.logger.Errorw("error in saving plugin artifacts", "err", err) return err @@ -605,7 +605,7 @@ func (impl *WorkflowDagExecutorImpl) HandlePreStageSuccessEvent(cdStageCompleteE return nil } -func (impl *WorkflowDagExecutorImpl) SavePluginArtifacts(ciArtifact *repository.CiArtifact, pluginArtifactsDetail map[string][]string, pipelineId int, stage string) ([]*repository.CiArtifact, error) { +func (impl *WorkflowDagExecutorImpl) SavePluginArtifacts(ciArtifact *repository.CiArtifact, pluginArtifactsDetail map[string][]string, pipelineId int, stage string, triggerdBy int32) ([]*repository.CiArtifact, error) { saveArtifacts, err := impl.ciArtifactRepository.GetArtifactsByDataSourceAndComponentId(stage, pipelineId) if err != nil { @@ -639,9 +639,9 @@ func (impl *WorkflowDagExecutorImpl) SavePluginArtifacts(ciArtifact *repository. CredentialSourceValue: registry, AuditLog: sql.AuditLog{ CreatedOn: time.Now(), - CreatedBy: DEVTRON_SYSTEM_USER_ID, + CreatedBy: triggerdBy, UpdatedOn: time.Now(), - UpdatedBy: DEVTRON_SYSTEM_USER_ID, + UpdatedBy: triggerdBy, }, ParentCiArtifact: parentCiArtifactId, } @@ -661,7 +661,7 @@ func (impl *WorkflowDagExecutorImpl) TriggerPreStage(ctx context.Context, cdWf * triggeredAt := time.Now() //in case of pre stage manual trigger auth is already applied - if applyAuth { + if applyAuth && triggeredBy != 1 { user, err := impl.user.GetById(artifact.UpdatedBy) if err != nil { impl.logger.Errorw("error in fetching user for auto pipeline", "UpdatedBy", artifact.UpdatedBy) @@ -1596,7 +1596,7 @@ func (impl *WorkflowDagExecutorImpl) HandlePostStageSuccessEvent(cdWorkflowId in return err } if len(pluginRegistryImageDetails) > 0 { - PostCDArtifacts, err := impl.SavePluginArtifacts(ciArtifact, pluginRegistryImageDetails, cdPipelineId, repository.POST_CD) + PostCDArtifacts, err := impl.SavePluginArtifacts(ciArtifact, pluginRegistryImageDetails, cdPipelineId, repository.POST_CD, triggeredBy) if err != nil { impl.logger.Errorw("error in saving plugin artifacts", "err", err) return err From b9eda3ceddbb2f2d33d5af33a8d9eb29837cdfdd Mon Sep 17 00:00:00 2001 From: ayushmaheshwari Date: Mon, 20 Nov 2023 17:48:43 +0530 Subject: [PATCH 140/143] PE REVIEW CHANFS --- .../sql/repository/CiArtifactRepository.go | 9 +++++---- .../CiArtifactsListingQueryBuilder.go | 2 ++ pkg/pipeline/PipelineBuilder.go | 2 +- pkg/pipeline/pipelineStageVariableParser.go | 5 +++-- .../sql/189_copy_container_images.down.sql | 9 +++++++++ scripts/sql/189_copy_container_images.up.sql | 20 +++++++++++++++++++ 6 files changed, 40 insertions(+), 7 deletions(-) diff --git a/internal/sql/repository/CiArtifactRepository.go b/internal/sql/repository/CiArtifactRepository.go index fe535234f7..5a1318757f 100644 --- a/internal/sql/repository/CiArtifactRepository.go +++ b/internal/sql/repository/CiArtifactRepository.go @@ -38,12 +38,13 @@ const ( GLOBAL_CONTAINER_REGISTRY credentialsSource = "global_container_registry" ) const ( - CI_RUNNER artifactsSourceType = "ci_runner" - WEBHOOK artifactsSourceType = "ext" + CI_RUNNER artifactsSourceType = "CI-RUNNER" + WEBHOOK artifactsSourceType = "EXTERNAL" PRE_CD artifactsSourceType = "pre_cd" POST_CD artifactsSourceType = "post_cd" PRE_CI artifactsSourceType = "pre_ci" POST_CI artifactsSourceType = "post_ci" + GOCD artifactsSourceType = "GOCD" ) type CiArtifactWithExtraData struct { @@ -61,8 +62,8 @@ type CiArtifact struct { PipelineId int `sql:"pipeline_id"` //id of the ci pipeline from which this webhook was triggered Image string `sql:"image,notnull"` ImageDigest string `sql:"image_digest,notnull"` - MaterialInfo string `sql:"material_info"` //git material metadata json array string - DataSource string `sql:"data_source,notnull"` + MaterialInfo string `sql:"material_info"` //git material metadata json array string + DataSource string `sql:"data_source,notnull"` // possible values -> (CI_RUNNER,ext,post_ci,pre_cd,post_cd) CI_runner is for normal build ci WorkflowId *int `sql:"ci_workflow_id"` ParentCiArtifact int `sql:"parent_ci_artifact"` ScanEnabled bool `sql:"scan_enabled,notnull"` diff --git a/internal/sql/repository/CiArtifactsListingQueryBuilder.go b/internal/sql/repository/CiArtifactsListingQueryBuilder.go index 9561b5c497..a7ff12332c 100644 --- a/internal/sql/repository/CiArtifactsListingQueryBuilder.go +++ b/internal/sql/repository/CiArtifactsListingQueryBuilder.go @@ -47,6 +47,8 @@ func BuildQueryForParentTypeCIOrWebhook(listingFilterOpts bean.ArtifactsListFilt } func BuildQueryForArtifactsForCdStage(listingFilterOptions bean.ArtifactsListFilterOptions) string { + // expected result -> will fetch all successfully deployed artifacts ar parent stage plus its own stage. Along with this it will + // also fetch all artifacts generated by plugin at pre_cd or post_cd process (will use data_source in ci artifact table for this) commonQuery := " from ci_artifact LEFT JOIN cd_workflow ON ci_artifact.id = cd_workflow.ci_artifact_id" + " LEFT JOIN cd_workflow_runner ON cd_workflow_runner.cd_workflow_id=cd_workflow.id " + diff --git a/pkg/pipeline/PipelineBuilder.go b/pkg/pipeline/PipelineBuilder.go index 38de5b8516..1b41718880 100644 --- a/pkg/pipeline/PipelineBuilder.go +++ b/pkg/pipeline/PipelineBuilder.go @@ -250,7 +250,7 @@ type ConfigMapSecretsResponse struct { } func parseMaterialInfo(materialInfo json.RawMessage, source string) (json.RawMessage, error) { - if source != "GOCD" && source != "CI-RUNNER" && source != "EXTERNAL" && source != "pre_cd" && source != "post_cd" && source != "post_ci" { + if source != repository.GOCD && source != repository.CI_RUNNER && source != repository.WEBHOOK && source != repository.PRE_CD && source != repository.POST_CD && source != repository.POST_CI { return nil, fmt.Errorf("datasource: %s not supported", source) } var ciMaterials []repository.CiMaterialInfo diff --git a/pkg/pipeline/pipelineStageVariableParser.go b/pkg/pipeline/pipelineStageVariableParser.go index 68995b3b6a..b938e384ce 100644 --- a/pkg/pipeline/pipelineStageVariableParser.go +++ b/pkg/pipeline/pipelineStageVariableParser.go @@ -18,6 +18,7 @@ type RefPluginName = string const ( COPY_CONTAINER_IMAGE RefPluginName = "Copy container image" + EMPTY_STRING = " " ) const ( @@ -100,11 +101,11 @@ func (impl *PluginInputVariableParserImpl) getRegistryRepoMapping(destinationInf destinationRegistryRepoDetails := strings.Split(destinationInfo, "\n") for _, detail := range destinationRegistryRepoDetails { registryRepoSplit := strings.Split(detail, "|") - registryName := strings.Trim(registryRepoSplit[0], " ") + registryName := strings.Trim(registryRepoSplit[0], EMPTY_STRING) repositoryValuesSplit := strings.Split(registryRepoSplit[1], ",") var repositories []string for _, repositoryName := range repositoryValuesSplit { - repositoryName = strings.Trim(repositoryName, " ") + repositoryName = strings.Trim(repositoryName, EMPTY_STRING) repositories = append(repositories, repositoryName) } destinationRegistryRepositoryMap[registryName] = repositories diff --git a/scripts/sql/189_copy_container_images.down.sql b/scripts/sql/189_copy_container_images.down.sql index 140cdf9cf3..afaaf2bc6a 100644 --- a/scripts/sql/189_copy_container_images.down.sql +++ b/scripts/sql/189_copy_container_images.down.sql @@ -4,3 +4,12 @@ DELETE FROM plugin_stage_mapping WHERE plugin_id =(SELECT id FROM plugin_metadat DELETE FROM pipeline_stage_step_variable WHERE pipeline_stage_step_id in (SELECT id FROM pipeline_stage_step where ref_plugin_id =(SELECT id from plugin_metadata WHERE name ='Copy container image')); DELETE FROM pipeline_stage_step where ref_plugin_id in (SELECT id from plugin_metadata WHERE name ='Copy container image'); DELETE FROM plugin_metadata WHERE name ='Copy container image'; + + +ALTER TABLE custom_tag DROP COLUMN enabled; +ALTER TABLE ci_artifact DROP COLUMN credentials_source_type ; +ALTER TABLE ci_artifact DROP COLUMN credentials_source_value ; +ALTER TABLE ci_artifact DROP COLUMN component_id; +ALTER TABLE ci_workflow DROP COLUMN image_path_reservation_ids; +ALTER TABLE cd_workflow_runner DROP COLUMN image_path_reservation_ids; +ALTER TABLE image_path_reservation DROP CONSTRAINT image_path_reservation_custom_tag_id_fkey; \ No newline at end of file diff --git a/scripts/sql/189_copy_container_images.up.sql b/scripts/sql/189_copy_container_images.up.sql index 7269759c31..d3761d8159 100644 --- a/scripts/sql/189_copy_container_images.up.sql +++ b/scripts/sql/189_copy_container_images.up.sql @@ -1,3 +1,6 @@ + +-- copy container images plugin migration script start + INSERT INTO "plugin_metadata" ("id", "name", "description","type","icon","deleted", "created_on", "created_by", "updated_on", "updated_by") VALUES (nextval('id_seq_plugin_metadata'), 'Copy container image','Copy container images from the source repository to a desired repository','PRESET','https://raw.githubusercontent.com/devtron-labs/devtron/main/assets/ic-plugin-copy-container-image.png','f', 'now()', 1, 'now()', 1); @@ -27,3 +30,20 @@ VALUES (nextval('id_seq_plugin_step_variable'), (SELECT ps.id FROM plugin_metada INSERT INTO "plugin_step_variable" ("id", "plugin_step_id", "name", "format", "description", "is_exposed", "allow_empty_value","variable_type", "value_type", "variable_step_index",reference_variable_name, "deleted", "created_on", "created_by", "updated_on", "updated_by") VALUES (nextval('id_seq_plugin_step_variable'), (SELECT ps.id FROM plugin_metadata p inner JOIN plugin_step ps on ps.plugin_id=p.id WHERE p.name='Copy container image' and ps."index"=1 and ps.deleted=false), 'REGISTRY_CREDENTIALS','STRING','',false,true,'INPUT','GLOBAL',1 ,'REGISTRY_CREDENTIALS','f','now()', 1, 'now()', 1); + +-- copy container images plugin migration script ends + +-- requiered db changes for above scipt + +ALTER TABLE custom_tag ADD COLUMN enabled boolean default false; +ALTER TABLE ci_artifact ADD COLUMN credentials_source_type VARCHAR(50); +ALTER TABLE ci_artifact ADD COLUMN credentials_source_value VARCHAR(50); +ALTER TABLE ci_artifact ADD COLUMN component_id integer; + +ALTER TABLE ci_workflow ADD COLUMN image_path_reservation_ids integer[]; + +UPDATE ci_workflow set image_path_reservation_ids=ARRAY["image_path_reservation_id"] where image_path_reservation_id is not NULL; + +ALTER TABLE cd_workflow_runner ADD COLUMN image_path_reservation_ids integer[]; + +ALTER TABLE image_path_reservation DROP CONSTRAINT image_path_reservation_custom_tag_id_fkey; \ No newline at end of file From 0d846362b10c6a2958cfe25116979d844a0b4aa5 Mon Sep 17 00:00:00 2001 From: ayushmaheshwari Date: Mon, 20 Nov 2023 19:51:53 +0530 Subject: [PATCH 141/143] updating plugin image in migration --- scripts/sql/189_copy_container_images.up.sql | 2 +- scripts/sql/190_custom_tag.down.sql | 7 ------- scripts/sql/190_custom_tag.up.sql | 12 ------------ 3 files changed, 1 insertion(+), 20 deletions(-) delete mode 100644 scripts/sql/190_custom_tag.down.sql delete mode 100644 scripts/sql/190_custom_tag.up.sql diff --git a/scripts/sql/189_copy_container_images.up.sql b/scripts/sql/189_copy_container_images.up.sql index d3761d8159..7c3e5324c8 100644 --- a/scripts/sql/189_copy_container_images.up.sql +++ b/scripts/sql/189_copy_container_images.up.sql @@ -11,7 +11,7 @@ INSERT INTO "plugin_stage_mapping" ("plugin_id","stage_type","created_on", "crea VALUES ((SELECT id FROM plugin_metadata WHERE name='Copy container image'),0,'now()', 1, 'now()', 1); INSERT INTO "plugin_pipeline_script" ("id","type","mount_directory_from_host","container_image_path","deleted","created_on", "created_by", "updated_on", "updated_by") -VALUES (nextval('id_seq_plugin_pipeline_script'),'CONTAINER_IMAGE','t','quay.io/devtron/test:8d5a6d8d-81-1031','f','now()',1,'now()',1); +VALUES (nextval('id_seq_plugin_pipeline_script'),'CONTAINER_IMAGE','t','quay.io/devtron/copy-container-images:d3b16093-567-19517','f','now()',1,'now()',1); INSERT INTO "plugin_step" ("id", "plugin_id","name","description","index","step_type","script_id","deleted", "created_on", "created_by", "updated_on", "updated_by") VALUES (nextval('id_seq_plugin_step'), (SELECT id FROM plugin_metadata WHERE name='Copy container image'),'Step 1','Step 1 - Copy container images','1','INLINE',(SELECT last_value FROM id_seq_plugin_pipeline_script),'f','now()', 1, 'now()', 1); diff --git a/scripts/sql/190_custom_tag.down.sql b/scripts/sql/190_custom_tag.down.sql deleted file mode 100644 index f7a9862c5b..0000000000 --- a/scripts/sql/190_custom_tag.down.sql +++ /dev/null @@ -1,7 +0,0 @@ -ALTER TABLE custom_tag DROP COLUMN enabled; -ALTER TABLE ci_artifact DROP COLUMN credentials_source_type ; -ALTER TABLE ci_artifact DROP COLUMN credentials_source_value ; -ALTER TABLE ci_artifact DROP COLUMN component_id; -ALTER TABLE ci_workflow DROP COLUMN image_path_reservation_ids; -ALTER TABLE cd_workflow_runner DROP COLUMN image_path_reservation_ids; -ALTER TABLE image_path_reservation DROP CONSTRAINT image_path_reservation_custom_tag_id_fkey; \ No newline at end of file diff --git a/scripts/sql/190_custom_tag.up.sql b/scripts/sql/190_custom_tag.up.sql deleted file mode 100644 index 0f785fc91a..0000000000 --- a/scripts/sql/190_custom_tag.up.sql +++ /dev/null @@ -1,12 +0,0 @@ -ALTER TABLE custom_tag ADD COLUMN enabled boolean default false; -ALTER TABLE ci_artifact ADD COLUMN credentials_source_type VARCHAR(50); -ALTER TABLE ci_artifact ADD COLUMN credentials_source_value VARCHAR(50); -ALTER TABLE ci_artifact ADD COLUMN component_id integer; - -ALTER TABLE ci_workflow ADD COLUMN image_path_reservation_ids integer[]; - -UPDATE ci_workflow set image_path_reservation_ids=ARRAY["image_path_reservation_id"] where image_path_reservation_id is not NULL; - -ALTER TABLE cd_workflow_runner ADD COLUMN image_path_reservation_ids integer[]; - -ALTER TABLE image_path_reservation DROP CONSTRAINT image_path_reservation_custom_tag_id_fkey; \ No newline at end of file From 335f67ec022d4cae99e0c4770fc311937ff17605 Mon Sep 17 00:00:00 2001 From: ayushmaheshwari Date: Tue, 21 Nov 2023 12:38:36 +0530 Subject: [PATCH 142/143] sql script correction --- ...ntainer_images.down.sql => 190_copy_container_images.down.sql} | 0 ...y_container_images.up.sql => 190_copy_container_images.up.sql} | 0 2 files changed, 0 insertions(+), 0 deletions(-) rename scripts/sql/{189_copy_container_images.down.sql => 190_copy_container_images.down.sql} (100%) rename scripts/sql/{189_copy_container_images.up.sql => 190_copy_container_images.up.sql} (100%) diff --git a/scripts/sql/189_copy_container_images.down.sql b/scripts/sql/190_copy_container_images.down.sql similarity index 100% rename from scripts/sql/189_copy_container_images.down.sql rename to scripts/sql/190_copy_container_images.down.sql diff --git a/scripts/sql/189_copy_container_images.up.sql b/scripts/sql/190_copy_container_images.up.sql similarity index 100% rename from scripts/sql/189_copy_container_images.up.sql rename to scripts/sql/190_copy_container_images.up.sql From 3871da46faf4b07f79e46c0af825d48aa0a2c71e Mon Sep 17 00:00:00 2001 From: ayushmaheshwari Date: Tue, 21 Nov 2023 16:01:17 +0530 Subject: [PATCH 143/143] sql script update --- scripts/sql/190_copy_container_images.up.sql | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/sql/190_copy_container_images.up.sql b/scripts/sql/190_copy_container_images.up.sql index 7c3e5324c8..c41f18e536 100644 --- a/scripts/sql/190_copy_container_images.up.sql +++ b/scripts/sql/190_copy_container_images.up.sql @@ -11,7 +11,7 @@ INSERT INTO "plugin_stage_mapping" ("plugin_id","stage_type","created_on", "crea VALUES ((SELECT id FROM plugin_metadata WHERE name='Copy container image'),0,'now()', 1, 'now()', 1); INSERT INTO "plugin_pipeline_script" ("id","type","mount_directory_from_host","container_image_path","deleted","created_on", "created_by", "updated_on", "updated_by") -VALUES (nextval('id_seq_plugin_pipeline_script'),'CONTAINER_IMAGE','t','quay.io/devtron/copy-container-images:d3b16093-567-19517','f','now()',1,'now()',1); +VALUES (nextval('id_seq_plugin_pipeline_script'),'CONTAINER_IMAGE','t','quay.io/devtron/copy-container-images:7285439d-567-19519','f','now()',1,'now()',1); INSERT INTO "plugin_step" ("id", "plugin_id","name","description","index","step_type","script_id","deleted", "created_on", "created_by", "updated_on", "updated_by") VALUES (nextval('id_seq_plugin_step'), (SELECT id FROM plugin_metadata WHERE name='Copy container image'),'Step 1','Step 1 - Copy container images','1','INLINE',(SELECT last_value FROM id_seq_plugin_pipeline_script),'f','now()', 1, 'now()', 1);