diff --git a/Wire.go b/Wire.go index 2e43644117..e5c056f9cf 100644 --- a/Wire.go +++ b/Wire.go @@ -102,7 +102,6 @@ import ( appWorkflow2 "github.com/devtron-labs/devtron/internal/sql/repository/appWorkflow" "github.com/devtron-labs/devtron/internal/sql/repository/bulkUpdate" "github.com/devtron-labs/devtron/internal/sql/repository/chartConfig" - "github.com/devtron-labs/devtron/internal/sql/repository/deploymentConfig" dockerRegistryRepository "github.com/devtron-labs/devtron/internal/sql/repository/dockerRegistry" "github.com/devtron-labs/devtron/internal/sql/repository/helper" repository8 "github.com/devtron-labs/devtron/internal/sql/repository/imageTagging" @@ -110,6 +109,7 @@ import ( resourceGroup "github.com/devtron-labs/devtron/internal/sql/repository/resourceGroup" "github.com/devtron-labs/devtron/internal/util" "github.com/devtron-labs/devtron/pkg/app" + read4 "github.com/devtron-labs/devtron/pkg/app/appDetails/read" "github.com/devtron-labs/devtron/pkg/app/dbMigration" "github.com/devtron-labs/devtron/pkg/app/status" "github.com/devtron-labs/devtron/pkg/appClone" @@ -128,6 +128,7 @@ import ( "github.com/devtron-labs/devtron/pkg/bulkAction/service" "github.com/devtron-labs/devtron/pkg/chart" "github.com/devtron-labs/devtron/pkg/chart/gitOpsConfig" + read2 "github.com/devtron-labs/devtron/pkg/chart/read" chartRepoRepository "github.com/devtron-labs/devtron/pkg/chartRepo/repository" "github.com/devtron-labs/devtron/pkg/commonService" "github.com/devtron-labs/devtron/pkg/config" @@ -357,6 +358,8 @@ func InitializeApp() (*App, error) { wire.Bind(new(gitOpsConfig.DevtronAppGitOpConfigService), new(*gitOpsConfig.DevtronAppGitOpConfigServiceImpl)), chart.NewChartServiceImpl, wire.Bind(new(chart.ChartService), new(*chart.ChartServiceImpl)), + read2.NewChartReadServiceImpl, + wire.Bind(new(read2.ChartReadService), new(*read2.ChartReadServiceImpl)), service.NewBulkUpdateServiceImpl, wire.Bind(new(service.BulkUpdateService), new(*service.BulkUpdateServiceImpl)), @@ -375,6 +378,10 @@ func InitializeApp() (*App, error) { wire.Bind(new(appList.AppListingRouter), new(*appList.AppListingRouterImpl)), appList2.NewAppListingRestHandlerImpl, wire.Bind(new(appList2.AppListingRestHandler), new(*appList2.AppListingRestHandlerImpl)), + + read4.NewAppDetailsReadServiceImpl, + wire.Bind(new(read4.AppDetailsReadService), new(*read4.AppDetailsReadServiceImpl)), + app.NewAppListingServiceImpl, wire.Bind(new(app.AppListingService), new(*app.AppListingServiceImpl)), repository.NewAppListingRepositoryImpl, @@ -622,6 +629,7 @@ func InitializeApp() (*App, error) { repository9.NewClusterInstalledAppsRepositoryImpl, wire.Bind(new(repository9.ClusterInstalledAppsRepository), new(*repository9.ClusterInstalledAppsRepositoryImpl)), + commonService.NewCommonBaseServiceImpl, commonService.NewCommonServiceImpl, wire.Bind(new(commonService.CommonService), new(*commonService.CommonServiceImpl)), @@ -659,8 +667,8 @@ func InitializeApp() (*App, error) { router.NewCommonRouterImpl, wire.Bind(new(router.CommonRouter), new(*router.CommonRouterImpl)), - restHandler.NewCommonRestHanlderImpl, - wire.Bind(new(restHandler.CommonRestHanlder), new(*restHandler.CommonRestHanlderImpl)), + restHandler.NewCommonRestHandlerImpl, + wire.Bind(new(restHandler.CommonRestHandler), new(*restHandler.CommonRestHandlerImpl)), router.NewScopedVariableRouterImpl, wire.Bind(new(router.ScopedVariableRouter), new(*router.ScopedVariableRouterImpl)), @@ -941,11 +949,7 @@ func InitializeApp() (*App, error) { cel.NewCELServiceImpl, wire.Bind(new(cel.EvaluatorService), new(*cel.EvaluatorServiceImpl)), - deploymentConfig.NewRepositoryImpl, - wire.Bind(new(deploymentConfig.Repository), new(*deploymentConfig.RepositoryImpl)), - - common.NewDeploymentConfigServiceImpl, - wire.Bind(new(common.DeploymentConfigService), new(*common.DeploymentConfigServiceImpl)), + common.WireSet, repoCredsK8sClient.NewRepositoryCredsK8sClientImpl, wire.Bind(new(repoCredsK8sClient.RepositoryCredsK8sClient), new(*repoCredsK8sClient.RepositoryCredsK8sClientImpl)), diff --git a/WiringNilCheck.go b/WiringNilCheck.go index 946b154d92..2e5d82a8b9 100755 --- a/WiringNilCheck.go +++ b/WiringNilCheck.go @@ -114,6 +114,8 @@ func skipUnnecessaryFieldsForCheck(fieldName, valName string) bool { "modulecronserviceimpl": {"cron"}, "oteltracingserviceimpl": {"traceprovider"}, "terminalaccessrepositoryimpl": {"templatescache"}, + "grpcapiclientimpl": {"serviceclient"}, + "serverenvconfig": {"errorencounteredongettingdevtronhelmrelease"}, } if _, ok := fieldAndValName[valName]; ok { for _, ignoreFieldName := range fieldAndValName[valName] { diff --git a/api/argoApplication/ArgoApplicationRestHandler.go b/api/argoApplication/ArgoApplicationRestHandler.go index a48812d329..edcbabdca9 100644 --- a/api/argoApplication/ArgoApplicationRestHandler.go +++ b/api/argoApplication/ArgoApplicationRestHandler.go @@ -17,6 +17,7 @@ package argoApplication import ( + "context" "errors" "github.com/devtron-labs/devtron/api/restHandler/common" "github.com/devtron-labs/devtron/pkg/argoApplication" @@ -89,6 +90,9 @@ func (handler *ArgoApplicationRestHandlerImpl) GetApplicationDetail(w http.Respo common.WriteJsonResp(w, errors.New("unauthorized"), nil, http.StatusForbidden) return } + ctx := r.Context() + ctx = context.WithValue(ctx, "token", token) + var err error v := r.URL.Query() resourceName := v.Get("name") @@ -104,7 +108,7 @@ func (handler *ArgoApplicationRestHandlerImpl) GetApplicationDetail(w http.Respo return } } - resp, err := handler.readService.GetAppDetail(resourceName, namespace, clusterId) + resp, err := handler.readService.GetAppDetailEA(ctx, resourceName, namespace, clusterId) if err != nil { handler.logger.Errorw("error in getting argo application app detail", "err", err, "resourceName", resourceName, "clusterId", clusterId) common.WriteJsonResp(w, err, nil, http.StatusInternalServerError) diff --git a/api/argoApplication/wire_argoApplication.go b/api/argoApplication/wire_argoApplication.go index fa5ea66abf..e3c7381725 100644 --- a/api/argoApplication/wire_argoApplication.go +++ b/api/argoApplication/wire_argoApplication.go @@ -30,6 +30,7 @@ var ArgoApplicationWireSetFull = wire.NewSet( config.NewArgoApplicationConfigServiceImpl, wire.Bind(new(config.ArgoApplicationConfigService), new(*config.ArgoApplicationConfigServiceImpl)), + argoApplication.NewArgoApplicationServiceImpl, argoApplication.NewArgoApplicationServiceExtendedServiceImpl, wire.Bind(new(argoApplication.ArgoApplicationService), new(*argoApplication.ArgoApplicationServiceExtendedImpl)), diff --git a/api/bean/AppView/AppView.go b/api/bean/AppView/AppView.go index d82191cba0..8779691a25 100644 --- a/api/bean/AppView/AppView.go +++ b/api/bean/AppView/AppView.go @@ -294,3 +294,16 @@ type LinkOuts struct { Link string `json:"link,omitempty"` Description string `json:"description,omitempty"` } + +type AppStages struct { + AppId int `json:"app_id,omitempty" sql:"app_id"` + CiTemplateId int `json:"ci_template_id,omitempty" sql:"ci_template_id"` + CiPipelineId int `json:"ci_pipeline_id,omitempty" sql:"ci_pipeline_id"` + ChartId int `json:"chart_id,omitempty" sql:"chart_id"` + ChartGitRepoUrl string `json:"chart_git_repo_url,omitempty" sql:"chart_git_repo_url"` + PipelineId int `json:"pipeline_id,omitempty" sql:"pipeline_id"` + YamlStatus int `json:"yaml_status,omitempty" sql:"yaml_status"` + YamlReviewed bool `json:"yaml_reviewed,omitempty" sql:"yaml_reviewed"` + DeploymentConfigRepoURL string `json:"deployment_config_repo_url" sql:"-"` + GitMaterialExists int `json:"-" sql:"-"` +} diff --git a/api/bean/gitOps/GitOpsConfig.go b/api/bean/gitOps/GitOpsConfig.go index 2cc7e494db..ba57fe8dac 100644 --- a/api/bean/gitOps/GitOpsConfig.go +++ b/api/bean/gitOps/GitOpsConfig.go @@ -42,10 +42,15 @@ type GitOpsConfigDto struct { IsTLSKeyDataPresent bool `json:"isTLSKeyDataPresent"` // TODO refactoring: create different struct for internal fields - GitRepoName string `json:"-"` - UserEmailId string `json:"-"` - Description string `json:"-"` - UserId int32 `json:"-"` + GitRepoName string `json:"-"` + TargetRevision string `json:"-"` + UserEmailId string `json:"-"` + Description string `json:"-"` + UserId int32 `json:"-"` +} + +func (dto GitOpsConfigDto) GetHostUrl() string { + return dto.Host } type GitRepoRequestDto struct { @@ -80,5 +85,5 @@ func IsGitOpsRepoNotConfigured(gitRepoUrl string) bool { } func IsGitOpsRepoConfigured(gitRepoUrl string) bool { - return len(gitRepoUrl) != 0 || gitRepoUrl != GIT_REPO_NOT_CONFIGURED + return !IsGitOpsRepoNotConfigured(gitRepoUrl) } diff --git a/api/helm-app/HelmAppRestHandler.go b/api/helm-app/HelmAppRestHandler.go index eab4e7d218..b5576139f2 100644 --- a/api/helm-app/HelmAppRestHandler.go +++ b/api/helm-app/HelmAppRestHandler.go @@ -26,6 +26,7 @@ import ( "github.com/devtron-labs/devtron/pkg/appStore/installedApp/service/EAMode" "github.com/devtron-labs/devtron/pkg/argoApplication" "github.com/devtron-labs/devtron/pkg/argoApplication/helper" + clusterBean "github.com/devtron-labs/devtron/pkg/cluster/bean" clientErrors "github.com/devtron-labs/devtron/pkg/errors" "github.com/devtron-labs/devtron/pkg/fluxApplication" bean2 "github.com/devtron-labs/devtron/pkg/k8s/application/bean" @@ -467,7 +468,7 @@ func (handler *HelmAppRestHandlerImpl) DeleteApplication(w http.ResponseWriter, // validate if the devtron-operator helm release, block that for deletion if appIdentifier.ReleaseName == handler.serverEnvConfig.DevtronHelmReleaseName && appIdentifier.Namespace == handler.serverEnvConfig.DevtronHelmReleaseNamespace && - appIdentifier.ClusterId == bean.DEFAULT_CLUSTER_ID { + appIdentifier.ClusterId == clusterBean.DefaultClusterId { common.WriteJsonResp(w, errors.New("cannot delete this default helm app"), nil, http.StatusForbidden) return } diff --git a/api/helm-app/bean/bean.go b/api/helm-app/bean/bean.go index d5280126d3..16f4a804d9 100644 --- a/api/helm-app/bean/bean.go +++ b/api/helm-app/bean/bean.go @@ -23,7 +23,6 @@ import ( ) const ( - DEFAULT_CLUSTER_ID = 1 SOURCE_DEVTRON_APP SourceAppType = "devtron-app" SOURCE_HELM_APP SourceAppType = "helm-app" SOURCE_EXTERNAL_HELM_APP SourceAppType = "external-helm-app" diff --git a/api/k8s/application/k8sApplicationRestHandler.go b/api/k8s/application/k8sApplicationRestHandler.go index c445266842..dd68e18e15 100644 --- a/api/k8s/application/k8sApplicationRestHandler.go +++ b/api/k8s/application/k8sApplicationRestHandler.go @@ -253,6 +253,8 @@ func (handler *K8sApplicationRestHandlerImpl) GetHostUrlsByBatch(w http.Response } token := r.Header.Get("token") + ctx := r.Context() + ctx = context.WithValue(ctx, "token", token) var k8sAppDetail AppView.AppDetailContainer var resourceTreeResponse *gRPC.ResourceTreeResponse var clusterId int @@ -276,7 +278,7 @@ func (handler *K8sApplicationRestHandlerImpl) GetHostUrlsByBatch(w http.Response return } //RBAC enforcer Ends - appDetail, err := handler.helmAppService.GetApplicationDetail(r.Context(), appIdentifier) + appDetail, err := handler.helmAppService.GetApplicationDetail(ctx, appIdentifier) if err != nil { apiError := clientErrors.ConvertToApiError(err) if apiError != nil { @@ -303,7 +305,7 @@ func (handler *K8sApplicationRestHandlerImpl) GetHostUrlsByBatch(w http.Response } //RBAC enforcer Ends - appDetail, err := handler.argoApplicationReadService.GetAppDetail(appIdentifier.AppName, appIdentifier.Namespace, appIdentifier.ClusterId) + appDetail, err := handler.argoApplicationReadService.GetAppDetailEA(r.Context(), appIdentifier.AppName, appIdentifier.Namespace, appIdentifier.ClusterId) if err != nil { apiError := clientErrors.ConvertToApiError(err) if apiError != nil { diff --git a/api/module/ModuleRestHandler.go b/api/module/ModuleRestHandler.go index 5d950dd691..664cae0322 100644 --- a/api/module/ModuleRestHandler.go +++ b/api/module/ModuleRestHandler.go @@ -19,6 +19,7 @@ package module import ( "encoding/json" "errors" + "github.com/devtron-labs/devtron/pkg/module/bean" "net/http" "github.com/devtron-labs/devtron/api/restHandler/common" @@ -135,7 +136,7 @@ func (impl ModuleRestHandlerImpl) HandleModuleAction(w http.ResponseWriter, r *h // decode request decoder := json.NewDecoder(r.Body) - var moduleActionRequestDto *module.ModuleActionRequestDto + var moduleActionRequestDto *bean.ModuleActionRequestDto err = decoder.Decode(&moduleActionRequestDto) if err != nil { impl.logger.Errorw("error in decoding request in HandleModuleAction", "err", err) @@ -184,7 +185,7 @@ func (impl ModuleRestHandlerImpl) EnableModule(w http.ResponseWriter, r *http.Re } // decode request decoder := json.NewDecoder(r.Body) - var moduleEnableRequestDto module.ModuleEnableRequestDto + var moduleEnableRequestDto bean.ModuleEnableRequestDto err = decoder.Decode(&moduleEnableRequestDto) if err != nil { impl.logger.Errorw("error in decoding request in ModuleEnableRequestDto", "err", err) diff --git a/api/module/wire_module.go b/api/module/wire_module.go index ed572837a1..d2114f32eb 100644 --- a/api/module/wire_module.go +++ b/api/module/wire_module.go @@ -18,6 +18,8 @@ package module import ( "github.com/devtron-labs/devtron/pkg/module" + "github.com/devtron-labs/devtron/pkg/module/bean" + "github.com/devtron-labs/devtron/pkg/module/read" moduleRepo "github.com/devtron-labs/devtron/pkg/module/repo" moduleDataStore "github.com/devtron-labs/devtron/pkg/module/store" "github.com/google/wire" @@ -28,9 +30,11 @@ var ModuleWireSet = wire.NewSet( wire.Bind(new(module.ModuleActionAuditLogRepository), new(*module.ModuleActionAuditLogRepositoryImpl)), moduleRepo.NewModuleRepositoryImpl, wire.Bind(new(moduleRepo.ModuleRepository), new(*moduleRepo.ModuleRepositoryImpl)), + read.NewModuleReadServiceImpl, + wire.Bind(new(read.ModuleReadService), new(*read.ModuleReadServiceImpl)), moduleRepo.NewModuleResourceStatusRepositoryImpl, wire.Bind(new(moduleRepo.ModuleResourceStatusRepository), new(*moduleRepo.ModuleResourceStatusRepositoryImpl)), - module.ParseModuleEnvConfig, + bean.ParseModuleEnvConfig, moduleDataStore.InitModuleDataStore, module.NewModuleServiceHelperImpl, wire.Bind(new(module.ModuleServiceHelper), new(*module.ModuleServiceHelperImpl)), diff --git a/api/restHandler/CommonRestHanlder.go b/api/restHandler/CommonRestHanlder.go index e8e5d8afe9..5527aa2f4a 100644 --- a/api/restHandler/CommonRestHanlder.go +++ b/api/restHandler/CommonRestHanlder.go @@ -25,28 +25,29 @@ import ( "go.uber.org/zap" ) -type CommonRestHanlder interface { +type CommonRestHandler interface { GlobalChecklist(w http.ResponseWriter, r *http.Request) + EnvironmentVariableList(w http.ResponseWriter, r *http.Request) } -type CommonRestHanlderImpl struct { +type CommonRestHandlerImpl struct { logger *zap.SugaredLogger userAuthService user.UserService commonService commonService.CommonService } -func NewCommonRestHanlderImpl( +func NewCommonRestHandlerImpl( logger *zap.SugaredLogger, userAuthService user.UserService, - commonService commonService.CommonService) *CommonRestHanlderImpl { - return &CommonRestHanlderImpl{ + commonService commonService.CommonService) *CommonRestHandlerImpl { + return &CommonRestHandlerImpl{ logger: logger, userAuthService: userAuthService, commonService: commonService, } } -func (impl CommonRestHanlderImpl) GlobalChecklist(w http.ResponseWriter, r *http.Request) { +func (impl CommonRestHandlerImpl) GlobalChecklist(w http.ResponseWriter, r *http.Request) { userId, err := impl.userAuthService.GetLoggedInUser(r) if userId == 0 || err != nil { common.WriteJsonResp(w, err, "Unauthorized User", http.StatusUnauthorized) @@ -61,3 +62,19 @@ func (impl CommonRestHanlderImpl) GlobalChecklist(w http.ResponseWriter, r *http common.WriteJsonResp(w, err, res, http.StatusOK) } + +func (impl CommonRestHandlerImpl) EnvironmentVariableList(w http.ResponseWriter, r *http.Request) { + userId, err := impl.userAuthService.GetLoggedInUser(r) + if userId == 0 || err != nil { + common.WriteJsonResp(w, err, "Unauthorized User", http.StatusUnauthorized) + return + } + // TODO: ADD RBAC (if required) + res, err := impl.commonService.EnvironmentVariableList() + if err != nil { + impl.logger.Errorw("service err, EnvironmentVariableList", "err", err) + common.WriteJsonResp(w, err, nil, http.StatusInternalServerError) + return + } + common.WriteJsonResp(w, err, res, http.StatusOK) +} diff --git a/api/restHandler/CoreAppRestHandler.go b/api/restHandler/CoreAppRestHandler.go index 3fe96406c5..bb8581dcfc 100644 --- a/api/restHandler/CoreAppRestHandler.go +++ b/api/restHandler/CoreAppRestHandler.go @@ -29,6 +29,8 @@ import ( "github.com/devtron-labs/devtron/pkg/build/git/gitProvider" "github.com/devtron-labs/devtron/pkg/build/git/gitProvider/read" pipelineBean "github.com/devtron-labs/devtron/pkg/build/pipeline/bean" + bean3 "github.com/devtron-labs/devtron/pkg/chart/bean" + read5 "github.com/devtron-labs/devtron/pkg/chart/read" "github.com/devtron-labs/devtron/pkg/cluster/environment/repository" read3 "github.com/devtron-labs/devtron/pkg/team/read" "net/http" @@ -101,6 +103,7 @@ type CoreAppRestHandlerImpl struct { pipelineStageService pipeline.PipelineStageService ciPipelineRepository pipelineConfig.CiPipelineRepository teamReadService read3.TeamReadService + chartReadService read5.ChartReadService } func NewCoreAppRestHandlerImpl(logger *zap.SugaredLogger, userAuthService user.UserService, validator *validator.Validate, enforcerUtil rbac.EnforcerUtil, @@ -112,7 +115,8 @@ func NewCoreAppRestHandlerImpl(logger *zap.SugaredLogger, userAuthService user.U pipelineStageService pipeline.PipelineStageService, ciPipelineRepository pipelineConfig.CiPipelineRepository, gitProviderReadService read.GitProviderReadService, gitMaterialReadService read2.GitMaterialReadService, - teamReadService read3.TeamReadService) *CoreAppRestHandlerImpl { + teamReadService read3.TeamReadService, + chartReadService read5.ChartReadService) *CoreAppRestHandlerImpl { handler := &CoreAppRestHandlerImpl{ logger: logger, userAuthService: userAuthService, @@ -136,6 +140,7 @@ func NewCoreAppRestHandlerImpl(logger *zap.SugaredLogger, userAuthService user.U pipelineStageService: pipelineStageService, ciPipelineRepository: ciPipelineRepository, teamReadService: teamReadService, + chartReadService: chartReadService, } return handler } @@ -551,7 +556,7 @@ func (handler CoreAppRestHandlerImpl) buildAppEnvironmentDeploymentTemplate(appI return nil, err, http.StatusBadRequest } - appDeploymentTemplate, err := handler.chartService.FindLatestChartForAppByAppId(appId) + appDeploymentTemplate, err := handler.chartReadService.FindLatestChartForAppByAppId(appId) if err != nil { if err != pg.ErrNoRows { handler.logger.Errorw("service err, GetDeploymentTemplate in GetAppAllDetail", "err", err, "appId", appId, "envId", envId) @@ -1365,7 +1370,7 @@ func (handler CoreAppRestHandlerImpl) createDockerConfig(appId int, dockerConfig func (handler CoreAppRestHandlerImpl) createDeploymentTemplate(ctx context.Context, appId int, deploymentTemplate *appBean.DeploymentTemplate, userId int32) (error, int) { handler.logger.Infow("Create App - creating deployment template", "appId", appId, "DeploymentStrategy", deploymentTemplate) - createDeploymentTemplateRequest := chart.TemplateRequest{ + createDeploymentTemplateRequest := bean3.TemplateRequest{ AppId: appId, ChartRefId: deploymentTemplate.ChartRefId, IsAppMetricsEnabled: deploymentTemplate.ShowAppMetrics, @@ -1839,7 +1844,7 @@ func (handler CoreAppRestHandlerImpl) createEnvDeploymentTemplate(appId int, use chartEntry, err := handler.chartRepo.FindChartByAppIdAndRefId(appId, chartRefId) if err != nil { if pg.ErrNoRows == err { - templateRequest := chart.TemplateRequest{ + templateRequest := bean3.TemplateRequest{ AppId: appId, ChartRefId: chartRefId, ValuesOverride: []byte("{}"), diff --git a/api/restHandler/GitOpsConfigRestHandler.go b/api/restHandler/GitOpsConfigRestHandler.go index 3c0c4380fe..0511b8204f 100644 --- a/api/restHandler/GitOpsConfigRestHandler.go +++ b/api/restHandler/GitOpsConfigRestHandler.go @@ -21,6 +21,9 @@ import ( "errors" bean2 "github.com/devtron-labs/devtron/api/bean/gitOps" "github.com/devtron-labs/devtron/api/util" + moduleBean "github.com/devtron-labs/devtron/pkg/module/bean" + moduleRead "github.com/devtron-labs/devtron/pkg/module/read" + moduleErr "github.com/devtron-labs/devtron/pkg/module/read/error" "net/http" "strconv" @@ -46,6 +49,7 @@ type GitOpsConfigRestHandler interface { type GitOpsConfigRestHandlerImpl struct { logger *zap.SugaredLogger + moduleReadService moduleRead.ModuleReadService gitOpsConfigService gitops.GitOpsConfigService userAuthService user.UserService validator *validator.Validate @@ -55,10 +59,12 @@ type GitOpsConfigRestHandlerImpl struct { func NewGitOpsConfigRestHandlerImpl( logger *zap.SugaredLogger, + moduleReadService moduleRead.ModuleReadService, gitOpsConfigService gitops.GitOpsConfigService, userAuthService user.UserService, validator *validator.Validate, enforcer casbin.Enforcer, teamService team.TeamService) *GitOpsConfigRestHandlerImpl { return &GitOpsConfigRestHandlerImpl{ logger: logger, + moduleReadService: moduleReadService, gitOpsConfigService: gitOpsConfigService, userAuthService: userAuthService, validator: validator, @@ -302,6 +308,11 @@ func (impl GitOpsConfigRestHandlerImpl) GitOpsValidator(w http.ResponseWriter, r common.WriteJsonResp(w, err, nil, http.StatusBadRequest) return } - detailedErrorGitOpsConfigResponse := impl.gitOpsConfigService.GitOpsValidateDryRun(&bean) + argoModule, err := impl.moduleReadService.GetModuleInfoByName(moduleBean.ModuleNameArgoCd) + if err != nil && !errors.Is(err, moduleErr.ModuleNotFoundError) { + impl.logger.Errorw("error in getting argo module", "error", err) + common.WriteJsonResp(w, err, nil, http.StatusInternalServerError) + } + detailedErrorGitOpsConfigResponse := impl.gitOpsConfigService.GitOpsValidateDryRun(argoModule.IsInstalled(), &bean) common.WriteJsonResp(w, nil, detailedErrorGitOpsConfigResponse, http.StatusOK) } diff --git a/api/restHandler/app/configDiff/DeploymentConfigurationRestHandler.go b/api/restHandler/app/configDiff/DeploymentConfigurationRestHandler.go index df88a3ea20..b7030aa496 100644 --- a/api/restHandler/app/configDiff/DeploymentConfigurationRestHandler.go +++ b/api/restHandler/app/configDiff/DeploymentConfigurationRestHandler.go @@ -14,7 +14,6 @@ import ( "github.com/gorilla/mux" "github.com/gorilla/schema" "go.uber.org/zap" - "gopkg.in/go-playground/validator.v9" "net/http" "time" ) @@ -28,7 +27,6 @@ type DeploymentConfigurationRestHandler interface { type DeploymentConfigurationRestHandlerImpl struct { logger *zap.SugaredLogger userAuthService user.UserService - validator *validator.Validate enforcerUtil rbac.EnforcerUtil deploymentConfigurationService configDiff.DeploymentConfigurationService enforcer casbin.Enforcer diff --git a/api/restHandler/app/pipeline/configure/DeploymentPipelineRestHandler.go b/api/restHandler/app/pipeline/configure/DeploymentPipelineRestHandler.go index 6b12a03169..1c53a8694a 100644 --- a/api/restHandler/app/pipeline/configure/DeploymentPipelineRestHandler.go +++ b/api/restHandler/app/pipeline/configure/DeploymentPipelineRestHandler.go @@ -21,6 +21,7 @@ import ( "encoding/json" "errors" "fmt" + bean3 "github.com/devtron-labs/devtron/pkg/chart/bean" devtronAppGitOpConfigBean "github.com/devtron-labs/devtron/pkg/chart/gitOpsConfig/bean" chartRefBean "github.com/devtron-labs/devtron/pkg/deployment/manifest/deploymentTemplate/chartRef/bean" "github.com/devtron-labs/devtron/pkg/policyGovernance/security/imageScanning/repository" @@ -37,7 +38,6 @@ import ( "github.com/devtron-labs/devtron/internal/util" "github.com/devtron-labs/devtron/pkg/auth/authorisation/casbin" "github.com/devtron-labs/devtron/pkg/bean" - "github.com/devtron-labs/devtron/pkg/chart" "github.com/devtron-labs/devtron/pkg/generateManifest" "github.com/devtron-labs/devtron/pkg/pipeline" pipelineBean "github.com/devtron-labs/devtron/pkg/pipeline/bean" @@ -80,6 +80,7 @@ type DevtronAppDeploymentRestHandler interface { GetCdPipelinesByEnvironmentMin(w http.ResponseWriter, r *http.Request) ChangeChartRef(w http.ResponseWriter, r *http.Request) + ValidateArgoCDAppLinkRequest(w http.ResponseWriter, r *http.Request) } type DevtronAppDeploymentConfigRestHandler interface { @@ -125,7 +126,7 @@ func (handler *PipelineConfigRestHandlerImpl) ConfigureDeploymentTemplateForApp( common.WriteJsonResp(w, err, "Unauthorized User", http.StatusUnauthorized) return } - var templateRequest chart.TemplateRequest + var templateRequest bean3.TemplateRequest err = decoder.Decode(&templateRequest) templateRequest.UserId = userId if err != nil { @@ -201,11 +202,6 @@ func (handler *PipelineConfigRestHandlerImpl) CreateCdPipeline(w http.ResponseWr handler.Logger.Infow("request payload, CreateCdPipeline", "payload", cdPipeline) userUploaded, err := handler.chartService.CheckIfChartRefUserUploadedByAppId(cdPipeline.AppId) if !userUploaded { - for i, p := range cdPipeline.Pipelines { - if len(p.ReleaseMode) == 0 { - cdPipeline.Pipelines[i].ReleaseMode = util.PIPELINE_RELEASE_MODE_CREATE - } - } err = handler.validator.Struct(cdPipeline) if err != nil { handler.Logger.Errorw("validation err, CreateCdPipeline", "err", err, "payload", cdPipeline) @@ -233,6 +229,15 @@ func (handler *PipelineConfigRestHandlerImpl) CreateCdPipeline(w http.ResponseWr } ok := true for _, deploymentPipeline := range cdPipeline.Pipelines { + + if deploymentPipeline.IsLinkedRelease() { + //only super admin is allowed to link pipeline to external helm release/ acd Application + if ok := handler.enforcer.Enforce(token, casbin.ResourceGlobal, casbin.ActionCreate, "*"); !ok { + common.WriteJsonResp(w, errors.New("unauthorized User"), nil, http.StatusForbidden) + return + } + } + //handling case of change of source from CI_PIPELINE to external-ci type (other change of type any -> any has been handled in ci-pipeline/patch api) if deploymentPipeline.IsSwitchCiPipelineRequest() { cdPipelines, err := handler.getCdPipelinesForCdPatchRbac(deploymentPipeline) @@ -521,7 +526,7 @@ func (handler *PipelineConfigRestHandlerImpl) ChangeChartRef(w http.ResponseWrit return } decoder := json.NewDecoder(r.Body) - var request chart.ChartRefChangeRequest + var request bean3.ChartRefChangeRequest err = decoder.Decode(&request) if err != nil || request.EnvId == 0 || request.TargetChartRefId == 0 || request.AppId == 0 { handler.Logger.Errorw("request err, ChangeChartRef", "err", err, "payload", request) @@ -626,7 +631,7 @@ func (handler *PipelineConfigRestHandlerImpl) ChangeChartRef(w http.ResponseWrit if envConfigProperties.AppMetrics != nil { appMetrics = envMetrics } - templateRequest := chart.TemplateRequest{ + templateRequest := bean3.TemplateRequest{ AppId: request.AppId, ChartRefId: request.TargetChartRefId, ValuesOverride: []byte("{}"), @@ -728,7 +733,7 @@ func (handler *PipelineConfigRestHandlerImpl) EnvConfigOverrideCreate(w http.Res if envConfigProperties.AppMetrics != nil { appMetrics = *envConfigProperties.AppMetrics } - templateRequest := chart.TemplateRequest{ + templateRequest := bean3.TemplateRequest{ AppId: appId, ChartRefId: envConfigProperties.ChartRefId, ValuesOverride: []byte("{}"), @@ -1046,7 +1051,7 @@ func (handler *PipelineConfigRestHandlerImpl) GetDeploymentTemplate(w http.Respo handler.Logger.Errorw("err in getting schema and readme, GetDeploymentTemplate", "err", err, "appId", appId, "chartRefId", chartRefId) } - template, err := handler.chartService.FindLatestChartForAppByAppId(appId) + template, err := handler.chartReadService.FindLatestChartForAppByAppId(appId) if err != nil && pg.ErrNoRows != err { handler.Logger.Errorw("service err, GetDeploymentTemplate", "err", err, "appId", appId, "chartRefId", chartRefId) common.WriteJsonResp(w, err, nil, http.StatusInternalServerError) @@ -1423,7 +1428,7 @@ func (handler *PipelineConfigRestHandlerImpl) UpdateAppOverride(w http.ResponseW return } - var templateRequest chart.TemplateRequest + var templateRequest bean3.TemplateRequest err = decoder.Decode(&templateRequest) templateRequest.UserId = userId if err != nil { @@ -1605,7 +1610,7 @@ func (handler *PipelineConfigRestHandlerImpl) EnvConfigOverrideReset(w http.Resp common.WriteJsonResp(w, fmt.Errorf("unauthorized user"), "Unauthorized User", http.StatusForbidden) return } - isSuccess, err := handler.propertiesConfigService.ResetEnvironmentProperties(id) + isSuccess, err := handler.propertiesConfigService.ResetEnvironmentProperties(id, userId) if err != nil { handler.Logger.Errorw("service err, EnvConfigOverrideReset", "err", err, "appId", appId, "environmentId", environmentId) common.WriteJsonResp(w, err, nil, http.StatusInternalServerError) @@ -2221,7 +2226,7 @@ func (handler *PipelineConfigRestHandlerImpl) UpgradeForAllApps(w http.ResponseW } decoder := json.NewDecoder(r.Body) - var chartUpgradeRequest chart.ChartUpgradeRequest + var chartUpgradeRequest bean3.ChartUpgradeRequest err = decoder.Decode(&chartUpgradeRequest) if err != nil { handler.Logger.Errorw("request err, UpgradeForAllApps", "err", err, "payload", chartUpgradeRequest) @@ -2545,3 +2550,34 @@ func (handler *PipelineConfigRestHandlerImpl) getCdPipelinesForCdPatchRbac(deplo } return handler.pipelineRepository.FindByIdsIn(cdPipelineIds) } + +func (handler *PipelineConfigRestHandlerImpl) ValidateArgoCDAppLinkRequest(w http.ResponseWriter, r *http.Request) { + decoder := json.NewDecoder(r.Body) + userId, err := handler.userAuthService.GetLoggedInUser(r) + if userId == 0 || err != nil { + common.WriteJsonResp(w, err, "Unauthorized User", http.StatusUnauthorized) + return + } + var request pipelineBean.MigrateReleaseValidationRequest + err = decoder.Decode(&request) + if err != nil { + handler.Logger.Errorw("request err, request", "err", err, "payload", request) + common.WriteJsonResp(w, err, nil, http.StatusBadRequest) + return + } + handler.Logger.Debugw("request payload, ValidateArgoCDAppLinkRequest", "payload", request) + token := r.Header.Get("token") + if ok := handler.enforcer.Enforce(token, casbin.ResourceGlobal, casbin.ActionUpdate, "*"); !ok { + common.WriteJsonResp(w, errors.New("unauthorized"), nil, http.StatusForbidden) + return + } + if request.DeploymentAppType == util.PIPELINE_DEPLOYMENT_TYPE_ACD { + response := handler.pipelineBuilder.ValidateLinkExternalArgoCDRequest(&request) + common.WriteJsonResp(w, err, response, http.StatusOK) + return + } else { + // handle helm deployment types + } + common.WriteJsonResp(w, errors.New("invalid deployment app type in request"), nil, http.StatusBadRequest) + return +} diff --git a/api/restHandler/app/pipeline/configure/PipelineConfigRestHandler.go b/api/restHandler/app/pipeline/configure/PipelineConfigRestHandler.go index 5d3bc11ebd..10f899556c 100644 --- a/api/restHandler/app/pipeline/configure/PipelineConfigRestHandler.go +++ b/api/restHandler/app/pipeline/configure/PipelineConfigRestHandler.go @@ -27,6 +27,8 @@ import ( gitProviderRead "github.com/devtron-labs/devtron/pkg/build/git/gitProvider/read" bean3 "github.com/devtron-labs/devtron/pkg/build/pipeline/bean" "github.com/devtron-labs/devtron/pkg/chart/gitOpsConfig" + read5 "github.com/devtron-labs/devtron/pkg/chart/read" + repository2 "github.com/devtron-labs/devtron/pkg/cluster/environment/repository" "github.com/devtron-labs/devtron/pkg/deployment/manifest/deployedAppMetrics" "github.com/devtron-labs/devtron/pkg/deployment/manifest/deploymentTemplate" "github.com/devtron-labs/devtron/pkg/deployment/manifest/deploymentTemplate/chartRef" @@ -134,6 +136,8 @@ type PipelineConfigRestHandlerImpl struct { chartRefService chartRef.ChartRefService ciCdPipelineOrchestrator pipeline.CiCdPipelineOrchestrator teamReadService read3.TeamReadService + environmentRepository repository2.EnvironmentRepository + chartReadService read5.ChartReadService } func NewPipelineRestHandlerImpl(pipelineBuilder pipeline.PipelineBuilder, Logger *zap.SugaredLogger, @@ -165,7 +169,9 @@ func NewPipelineRestHandlerImpl(pipelineBuilder pipeline.PipelineBuilder, Logger chartRefService chartRef.ChartRefService, ciCdPipelineOrchestrator pipeline.CiCdPipelineOrchestrator, gitProviderReadService gitProviderRead.GitProviderReadService, - teamReadService read3.TeamReadService) *PipelineConfigRestHandlerImpl { + teamReadService read3.TeamReadService, + EnvironmentRepository repository2.EnvironmentRepository, + chartReadService read5.ChartReadService) *PipelineConfigRestHandlerImpl { envConfig := &PipelineRestHandlerEnvConfig{} err := env.Parse(envConfig) if err != nil { @@ -205,6 +211,8 @@ func NewPipelineRestHandlerImpl(pipelineBuilder pipeline.PipelineBuilder, Logger ciCdPipelineOrchestrator: ciCdPipelineOrchestrator, gitProviderReadService: gitProviderReadService, teamReadService: teamReadService, + environmentRepository: EnvironmentRepository, + chartReadService: chartReadService, } } diff --git a/api/restHandler/app/workflow/AppWorkflowRestHandler.go b/api/restHandler/app/workflow/AppWorkflowRestHandler.go index 4ef1bba590..79c62e025a 100644 --- a/api/restHandler/app/workflow/AppWorkflowRestHandler.go +++ b/api/restHandler/app/workflow/AppWorkflowRestHandler.go @@ -214,7 +214,7 @@ func (impl AppWorkflowRestHandlerImpl) FindAppWorkflow(w http.ResponseWriter, r common.WriteJsonResp(w, err, "unauthorized user", http.StatusForbidden) return } - //RBAC enforcer Ends + // RBAC enforcer Ends workflows := make(map[string]interface{}) workflowsList, err := impl.appWorkflowService.FindAppWorkflows(appId) if err != nil { diff --git a/api/router/CommonRouter.go b/api/router/CommonRouter.go index 451cbdbf8b..07ba7547a5 100644 --- a/api/router/CommonRouter.go +++ b/api/router/CommonRouter.go @@ -25,14 +25,17 @@ type CommonRouter interface { InitCommonRouter(router *mux.Router) } type CommonRouterImpl struct { - commonRestHandler restHandler.CommonRestHanlder + commonRestHandler restHandler.CommonRestHandler } -func NewCommonRouterImpl(commonRestHandler restHandler.CommonRestHanlder) *CommonRouterImpl { +func NewCommonRouterImpl(commonRestHandler restHandler.CommonRestHandler) *CommonRouterImpl { return &CommonRouterImpl{commonRestHandler: commonRestHandler} } func (impl CommonRouterImpl) InitCommonRouter(router *mux.Router) { router.Path("/checklist"). HandlerFunc(impl.commonRestHandler.GlobalChecklist). Methods("GET") + router.Path("/environment-variables"). + HandlerFunc(impl.commonRestHandler.EnvironmentVariableList). + Methods("GET") } diff --git a/api/router/app/pipeline/configure/PipelineConfigRouter.go b/api/router/app/pipeline/configure/PipelineConfigRouter.go index 8f9059c662..ba894f0778 100644 --- a/api/router/app/pipeline/configure/PipelineConfigRouter.go +++ b/api/router/app/pipeline/configure/PipelineConfigRouter.go @@ -69,6 +69,7 @@ func (router PipelineConfigRouterImpl) InitPipelineConfigRouter(configRouter *mu configRouter.Path("/cd-pipeline/patch/deployment/trigger").HandlerFunc(router.restHandler.HandleTriggerDeploymentAfterTypeChange).Methods("POST") configRouter.Path("/cd-pipeline/{appId}").HandlerFunc(router.restHandler.GetCdPipelines).Methods("GET") configRouter.Path("/cd-pipeline/{appId}/env/{envId}").HandlerFunc(router.restHandler.GetCdPipelinesForAppAndEnv).Methods("GET") + configRouter.Path("/cd-pipeline/validate-link-request").HandlerFunc(router.restHandler.ValidateArgoCDAppLinkRequest).Methods("POST") //save environment specific override configRouter.Path("/env/{appId}/{environmentId}").HandlerFunc(router.restHandler.EnvConfigOverrideCreate).Methods("POST") configRouter.Path("/env/patch").HandlerFunc(router.restHandler.ChangeChartRef).Methods("PATCH") diff --git a/client/argocdServer/ArgoClientWrapperService.go b/client/argocdServer/ArgoClientWrapperService.go index 54c7003ad7..4b028eede5 100644 --- a/client/argocdServer/ArgoClientWrapperService.go +++ b/client/argocdServer/ArgoClientWrapperService.go @@ -86,20 +86,24 @@ type ApplicationClientWrapper interface { // GetArgoAppByName fetches an argoCd app by its name GetArgoAppByName(ctx context.Context, appName string) (*v1alpha1.Application, error) + GetArgoAppByNameWithK8sClient(ctx context.Context, clusterId int, namespace, appName string) (*v1alpha1.Application, error) + + DeleteArgoAppWithK8sClient(ctx context.Context, clusterId int, namespace, appName string, cascadeDelete bool) error + // SyncArgoCDApplicationIfNeededAndRefresh - if ARGO_AUTO_SYNC_ENABLED=true, app will be refreshed to initiate refresh at argoCD side or else it will be synced and refreshed - SyncArgoCDApplicationIfNeededAndRefresh(context context.Context, argoAppName string) error + SyncArgoCDApplicationIfNeededAndRefresh(ctx context.Context, argoAppName, targetRevision string) error // UpdateArgoCDSyncModeIfNeeded - if ARGO_AUTO_SYNC_ENABLED=true and app is in manual sync mode or vice versa update app UpdateArgoCDSyncModeIfNeeded(ctx context.Context, argoApplication *v1alpha1.Application) (err error) // RegisterGitOpsRepoInArgoWithRetry - register a repository in argo-cd with retry mechanism - RegisterGitOpsRepoInArgoWithRetry(ctx context.Context, gitOpsRepoUrl string, userId int32) error + RegisterGitOpsRepoInArgoWithRetry(ctx context.Context, gitOpsRepoUrl, targetRevision string, userId int32) error // PatchArgoCdApp performs a patch operation on an argoCd app PatchArgoCdApp(ctx context.Context, dto *bean.ArgoCdAppPatchReqDto) error // IsArgoAppPatchRequired decides weather the v1alpha1.ApplicationSource requires to be updated - IsArgoAppPatchRequired(argoAppSpec *v1alpha1.ApplicationSource, currentGitRepoUrl, currentChartPath string) bool + IsArgoAppPatchRequired(argoAppSpec *v1alpha1.ApplicationSource, currentGitRepoUrl, currentTargetRevision, currentChartPath string) bool // GetGitOpsRepoName returns the GitOps repository name, configured for the argoCd app GetGitOpsRepoNameForApplication(ctx context.Context, appName string) (gitOpsRepoName string, err error) @@ -108,7 +112,7 @@ type ApplicationClientWrapper interface { } type RepositoryClientWrapper interface { - RegisterGitOpsRepoInArgoWithRetry(ctx context.Context, gitOpsRepoUrl string, userId int32) error + RegisterGitOpsRepoInArgoWithRetry(ctx context.Context, gitOpsRepoUrl, targetRevision string, userId int32) error } type RepoCredsClientWrapper interface { @@ -150,6 +154,7 @@ type ArgoClientWrapperServiceImpl struct { gitOperationService git.GitOperationService asyncRunnable *async.Runnable acdConfigGetter config2.ArgoCDConfigGetter + argoK8sClient ArgoK8sClient *ArgoClientWrapperServiceEAImpl } @@ -164,6 +169,7 @@ func NewArgoClientWrapperServiceImpl( gitOperationService git.GitOperationService, asyncRunnable *async.Runnable, acdConfigGetter config2.ArgoCDConfigGetter, ArgoClientWrapperServiceEAImpl *ArgoClientWrapperServiceEAImpl, + argoK8sClient ArgoK8sClient, ) *ArgoClientWrapperServiceImpl { return &ArgoClientWrapperServiceImpl{ acdApplicationClient: acdClient, @@ -178,6 +184,7 @@ func NewArgoClientWrapperServiceImpl( asyncRunnable: asyncRunnable, acdConfigGetter: acdConfigGetter, ArgoClientWrapperServiceEAImpl: ArgoClientWrapperServiceEAImpl, + argoK8sClient: argoK8sClient, } } @@ -235,7 +242,8 @@ func (impl *ArgoClientWrapperServiceImpl) DeleteArgoApp(ctx context.Context, app return impl.acdApplicationClient.Delete(ctx, grpcConfig, req) } -func (impl *ArgoClientWrapperServiceImpl) SyncArgoCDApplicationIfNeededAndRefresh(ctx context.Context, argoAppName string) error { +func (impl *ArgoClientWrapperServiceImpl) SyncArgoCDApplicationIfNeededAndRefresh(ctx context.Context, argoAppName, targetRevision string) error { + newCtx, span := otel.Tracer("orchestrator").Start(ctx, "ArgoClientWrapperServiceImpl.SyncArgoCDApplicationIfNeededAndRefresh") defer span.End() impl.logger.Info("ArgoCd manual sync for app started", "argoAppName", argoAppName) @@ -249,10 +257,9 @@ func (impl *ArgoClientWrapperServiceImpl) SyncArgoCDApplicationIfNeededAndRefres if impl.ACDConfig.IsManualSyncEnabled() { impl.logger.Debugw("syncing ArgoCd app as manual sync is enabled", "argoAppName", argoAppName) - revision := "master" pruneResources := true _, syncErr := impl.acdApplicationClient.Sync(newCtx, grpcConfig, &application2.ApplicationSyncRequest{Name: &argoAppName, - Revision: &revision, + Revision: &targetRevision, Prune: &pruneResources, }) if syncErr != nil { @@ -268,7 +275,7 @@ func (impl *ArgoClientWrapperServiceImpl) SyncArgoCDApplicationIfNeededAndRefres return fmt.Errorf("error in terminating existing sync, err: %w", terminationErr) } _, syncErr = impl.acdApplicationClient.Sync(newCtx, grpcConfig, &application2.ApplicationSyncRequest{Name: &argoAppName, - Revision: &revision, + Revision: &targetRevision, Prune: &pruneResources, RetryStrategy: &v1alpha1.RetryStrategy{ Limit: 1, @@ -334,8 +341,7 @@ func (impl *ArgoClientWrapperServiceImpl) UpdateArgoCDSyncModeIfNeeded(ctx conte return nil } -func (impl *ArgoClientWrapperServiceImpl) RegisterGitOpsRepoInArgoWithRetry(ctx context.Context, gitOpsRepoUrl string, userId int32) error { - +func (impl *ArgoClientWrapperServiceImpl) RegisterGitOpsRepoInArgoWithRetry(ctx context.Context, gitOpsRepoUrl, targetRevision string, userId int32) error { grpcConfig, err := impl.acdConfigGetter.GetGRPCConfig() if err != nil { impl.logger.Errorw("error in getting grpc config", "err", err) @@ -352,7 +358,7 @@ func (impl *ArgoClientWrapperServiceImpl) RegisterGitOpsRepoInArgoWithRetry(ctx impl.logger) if argoCdErr != nil { impl.logger.Errorw("error in registering GitOps repository", "repoName", gitOpsRepoUrl, "err", argoCdErr) - return impl.handleArgoRepoCreationError(ctx, argoCdErr, grpcConfig, gitOpsRepoUrl, userId) + return impl.handleArgoRepoCreationError(ctx, argoCdErr, grpcConfig, gitOpsRepoUrl, targetRevision, userId) } impl.logger.Infow("gitOps repo registered in argo", "repoName", gitOpsRepoUrl) return nil @@ -401,10 +407,49 @@ func (impl *ArgoClientWrapperServiceImpl) GetArgoAppByName(ctx context.Context, return argoApplication, nil } -func (impl *ArgoClientWrapperServiceImpl) IsArgoAppPatchRequired(argoAppSpec *v1alpha1.ApplicationSource, currentGitRepoUrl, currentChartPath string) bool { +func (impl *ArgoClientWrapperServiceImpl) GetArgoAppByNameWithK8sClient(ctx context.Context, clusterId int, namespace, appName string) (*v1alpha1.Application, error) { + k8sConfig, err := impl.acdConfigGetter.GetK8sConfigWithClusterIdAndNamespace(clusterId, namespace) + if err != nil { + impl.logger.Errorw("error in getting k8s config", "err", err) + return nil, err + } + argoApplication, err := impl.argoK8sClient.GetArgoApplication(k8sConfig, appName) + if err != nil { + impl.logger.Errorw("err in getting argo app by name", "app", appName) + return nil, err + } + application, err := GetAppObject(argoApplication) + if err != nil { + impl.logger.Errorw("error in getting app object", "deploymentAppName", appName, "err", err) + return nil, err + } + return application, nil +} + +func (impl *ArgoClientWrapperServiceImpl) DeleteArgoAppWithK8sClient(ctx context.Context, clusterId int, namespace, appName string, cascadeDelete bool) error { + k8sConfig, err := impl.acdConfigGetter.GetK8sConfigWithClusterIdAndNamespace(clusterId, namespace) + if err != nil { + impl.logger.Errorw("error in getting k8s config", "err", err) + return err + } + err = impl.argoK8sClient.DeleteArgoApplication(ctx, k8sConfig, appName, cascadeDelete) + if err != nil { + impl.logger.Errorw("err in getting argo app by name", "app", appName) + return err + } + return nil +} + +func (impl *ArgoClientWrapperServiceImpl) IsArgoAppPatchRequired(argoAppSpec *v1alpha1.ApplicationSource, currentGitRepoUrl, currentTargetRevision, currentChartPath string) bool { + if argoAppSpec == nil { + // if argo app spec is nil, then no need to patch + // this means the argo app object is in corrupted state + impl.logger.Warnw("received argo app spec is nil, skipping for patch request...") + return false + } return (len(currentGitRepoUrl) != 0 && argoAppSpec.RepoURL != currentGitRepoUrl) || argoAppSpec.Path != currentChartPath || - argoAppSpec.TargetRevision != bean.TargetRevisionMaster + argoAppSpec.TargetRevision != currentTargetRevision } func (impl *ArgoClientWrapperServiceImpl) PatchArgoCdApp(ctx context.Context, dto *bean.ArgoCdAppPatchReqDto) error { @@ -489,7 +534,7 @@ func (impl *ArgoClientWrapperServiceImpl) isRetryableArgoRepoCreationError(argoC } // handleArgoRepoCreationError - manages the error thrown while performing createRepoInArgoCd -func (impl *ArgoClientWrapperServiceImpl) handleArgoRepoCreationError(ctx context.Context, argoCdErr error, grpcConfig *bean.ArgoGRPCConfig, gitOpsRepoUrl string, userId int32) error { +func (impl *ArgoClientWrapperServiceImpl) handleArgoRepoCreationError(ctx context.Context, argoCdErr error, grpcConfig *bean.ArgoGRPCConfig, gitOpsRepoUrl string, targetRevision string, userId int32) error { emptyRepoErrorMessages := bean.EmptyRepoErrorList isEmptyRepoError := false for _, errMsg := range emptyRepoErrorMessages { @@ -500,7 +545,7 @@ func (impl *ArgoClientWrapperServiceImpl) handleArgoRepoCreationError(ctx contex if isEmptyRepoError { // - found empty repository, create some file in repository gitOpsRepoName := impl.gitOpsConfigReadService.GetGitOpsRepoNameFromUrl(gitOpsRepoUrl) - err := impl.gitOperationService.CreateReadmeInGitRepo(ctx, gitOpsRepoName, userId) + err := impl.gitOperationService.CreateReadmeInGitRepo(ctx, gitOpsRepoName, targetRevision, userId) if err != nil { impl.logger.Errorw("error in creating file in git repo", "err", err) return err diff --git a/client/argocdServer/ArgoClientWrapperServiceEA.go b/client/argocdServer/ArgoClientWrapperServiceEA.go index b272649ff4..f4a1290fff 100644 --- a/client/argocdServer/ArgoClientWrapperServiceEA.go +++ b/client/argocdServer/ArgoClientWrapperServiceEA.go @@ -58,7 +58,11 @@ func (impl *ArgoClientWrapperServiceEAImpl) DeleteArgoApp(ctx context.Context, a return nil, nil } -func (impl *ArgoClientWrapperServiceEAImpl) SyncArgoCDApplicationIfNeededAndRefresh(ctx context.Context, argoAppName string) error { +func (impl *ArgoClientWrapperServiceEAImpl) DeleteArgoAppWithK8sClient(ctx context.Context, clusterId int, namespace, appName string, cascadeDelete bool) error { + return nil +} + +func (impl *ArgoClientWrapperServiceEAImpl) SyncArgoCDApplicationIfNeededAndRefresh(ctx context.Context, argoAppName, targetRevision string) error { impl.logger.Info("not implemented") return nil } @@ -68,7 +72,7 @@ func (impl *ArgoClientWrapperServiceEAImpl) UpdateArgoCDSyncModeIfNeeded(ctx con return nil } -func (impl *ArgoClientWrapperServiceEAImpl) RegisterGitOpsRepoInArgoWithRetry(ctx context.Context, gitOpsRepoUrl string, userId int32) error { +func (impl *ArgoClientWrapperServiceEAImpl) RegisterGitOpsRepoInArgoWithRetry(ctx context.Context, gitOpsRepoUrl, targetRevision string, userId int32) error { impl.logger.Info("not implemented") return nil } @@ -123,7 +127,12 @@ func (impl *ArgoClientWrapperServiceEAImpl) GetArgoAppByName(ctx context.Context return nil, nil } -func (impl *ArgoClientWrapperServiceEAImpl) IsArgoAppPatchRequired(argoAppSpec *v1alpha1.ApplicationSource, currentGitRepoUrl, currentChartPath string) bool { +func (impl *ArgoClientWrapperServiceEAImpl) GetArgoAppByNameWithK8sClient(ctx context.Context, clusterId int, namespace, appName string) (*v1alpha1.Application, error) { + impl.logger.Info("not implemented for EA mode") + return nil, nil +} + +func (impl *ArgoClientWrapperServiceEAImpl) IsArgoAppPatchRequired(argoAppSpec *v1alpha1.ApplicationSource, currentGitRepoUrl, currentTargetRevision, currentChartPath string) bool { impl.logger.Info("not implemented for EA mode") return false } diff --git a/client/argocdServer/bean/bean.go b/client/argocdServer/bean/bean.go index 78b07d8aad..168ab6694a 100644 --- a/client/argocdServer/bean/bean.go +++ b/client/argocdServer/bean/bean.go @@ -24,9 +24,11 @@ import ( ) const ( - RefreshTypeNormal = "normal" - TargetRevisionMaster = "master" - PatchTypeMerge = "merge" + RefreshTypeNormal = "normal" + TargetRevisionMaster = "master" + TargetRevisionOriginMaster = "origin/master" + PatchTypeMerge = "merge" + TargetRevisionHead = "head" ) type ArgoCdAppPatchReqDto struct { @@ -66,12 +68,11 @@ type Result struct { type ResourceTreeResponse struct { *v1alpha1.ApplicationTree - NewGenerationReplicaSets []string `json:"newGenerationReplicaSets"` - Status string `json:"status"` - RevisionHash string `json:"revisionHash"` - PodMetadata []*PodMetadata `json:"podMetadata"` - Conditions []v1alpha1.ApplicationCondition `json:"conditions"` - ResourcesSyncResultMap map[string]string `json:"resourcesSyncResult"` + Status string `json:"status"` + RevisionHash string `json:"revisionHash"` + PodMetadata []*PodMetadata `json:"podMetadata"` + Conditions []v1alpha1.ApplicationCondition `json:"conditions"` + ResourcesSyncResultMap map[string]string `json:"resourcesSyncResult"` } type PodMetadata struct { diff --git a/client/argocdServer/config/Config.go b/client/argocdServer/config/Config.go index 55cb131621..20b622c38e 100644 --- a/client/argocdServer/config/Config.go +++ b/client/argocdServer/config/Config.go @@ -29,6 +29,7 @@ import ( type ArgoCDConfigGetter interface { GetGRPCConfig() (*bean.ArgoGRPCConfig, error) GetK8sConfig() (*bean.ArgoK8sConfig, error) + GetK8sConfigWithClusterIdAndNamespace(clusterId int, namespace string) (*bean.ArgoK8sConfig, error) } type ArgoCDConfigGetterImpl struct { @@ -88,3 +89,22 @@ func (impl *ArgoCDConfigGetterImpl) GetK8sConfig() (*bean.ArgoK8sConfig, error) } return k8sConfig, nil } + +func (impl *ArgoCDConfigGetterImpl) GetK8sConfigWithClusterIdAndNamespace(clusterId int, namespace string) (*bean.ArgoK8sConfig, error) { + clusterBean, err := impl.clusterReadService.FindById(clusterId) + if err != nil { + impl.logger.Errorw("error in fetching cluster bean from db", "err", err) + return nil, err + } + cfg := clusterBean.GetClusterConfig() + restConfig, err := impl.K8sService.GetRestConfigByCluster(cfg) + if err != nil { + impl.logger.Errorw("error in getting k8s config", "err", err) + return nil, err + } + k8sConfig := &bean.ArgoK8sConfig{ + RestConfig: restConfig, + AcdNamespace: namespace, + } + return k8sConfig, nil +} diff --git a/client/argocdServer/connection/Connection.go b/client/argocdServer/connection/Connection.go index a3779763d5..7e690864d5 100644 --- a/client/argocdServer/connection/Connection.go +++ b/client/argocdServer/connection/Connection.go @@ -29,6 +29,7 @@ import ( bean2 "github.com/devtron-labs/devtron/pkg/cluster/bean" "github.com/devtron-labs/devtron/pkg/deployment/gitOps/config" k8s2 "github.com/devtron-labs/devtron/pkg/k8s" + moduleBean "github.com/devtron-labs/devtron/pkg/module/bean" moduleRepo "github.com/devtron-labs/devtron/pkg/module/repo" util2 "github.com/devtron-labs/devtron/util" "github.com/go-pg/pg" @@ -67,6 +68,7 @@ type ArgoCDConnectionManager interface { GetGrpcClientConnection(grpcConfig *bean.ArgoGRPCConfig) *grpc.ClientConn GetOrUpdateArgoCdUserDetail(grpcConfig *bean.ArgoGRPCConfig) string } + type ArgoCDConnectionManagerImpl struct { logger *zap.SugaredLogger settingsManager *settings.SettingsManager @@ -114,14 +116,9 @@ func NewArgoCDConnectionManagerImpl(Logger *zap.SugaredLogger, return argoUserServiceImpl, nil } -const ( - ModuleNameArgoCd string = "argo-cd" - ModuleStatusInstalled string = "installed" -) - func (impl *ArgoCDConnectionManagerImpl) ValidateGitOpsAndGetOrUpdateArgoCdUserDetail(grpcConfig *bean.ArgoGRPCConfig) string { gitOpsConfigurationStatus, err := impl.gitOpsConfigReadService.IsGitOpsConfigured() - if err != nil || !gitOpsConfigurationStatus.IsGitOpsConfigured { + if err != nil || !gitOpsConfigurationStatus.IsGitOpsConfiguredAndArgoCdInstalled() { return "" } _ = impl.GetOrUpdateArgoCdUserDetail(grpcConfig) @@ -494,12 +491,12 @@ func getK8sClient() (k8sClient *kubernetes.Clientset, k8sConfig clientcmd.Client func (impl *ArgoCDConnectionManagerImpl) getArgoCdSettings() *settings.ArgoCDSettings { settings := impl.argoCDSettings if settings == nil { - module, err := impl.moduleRepository.FindOne(ModuleNameArgoCd) + module, err := impl.moduleRepository.FindOne(moduleBean.ModuleNameArgoCd) if err != nil && err != pg.ErrNoRows { impl.logger.Errorw("error on get acd connection", "err", err) return nil } - if module == nil || module.Status != ModuleStatusInstalled { + if module == nil || module.Status != moduleBean.ModuleStatusInstalled { impl.logger.Errorw("error on get acd connection", "err", err) return nil } diff --git a/client/argocdServer/helper.go b/client/argocdServer/helper.go index 1d6eba751c..76ddb830c4 100644 --- a/client/argocdServer/helper.go +++ b/client/argocdServer/helper.go @@ -1,6 +1,8 @@ package argocdServer import ( + json2 "encoding/json" + errors3 "errors" "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) @@ -36,3 +38,19 @@ func isArgoAppSyncModeMigrationNeeded(argoApplication *v1alpha1.Application, acd } return false } + +func GetAppObject(appMapObj map[string]interface{}) (*v1alpha1.Application, error) { + if appMapObj == nil { + return nil, errors3.New("found empty application object") + } + appJson, err := json2.Marshal(appMapObj) + if err != nil { + return nil, err + } + var app v1alpha1.Application + err = json2.Unmarshal(appJson, &app) + if err != nil { + return nil, err + } + return &app, err +} diff --git a/client/argocdServer/k8sClient.go b/client/argocdServer/k8sClient.go index a8a4d615c9..6471296fcb 100644 --- a/client/argocdServer/k8sClient.go +++ b/client/argocdServer/k8sClient.go @@ -22,15 +22,17 @@ import ( "encoding/json" "errors" "fmt" + "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1" "github.com/devtron-labs/common-lib/utils/k8s" + "github.com/devtron-labs/devtron/client/argocdServer/bean" "github.com/devtron-labs/devtron/internal/util" - "github.com/devtron-labs/devtron/pkg/cluster/repository" "go.uber.org/zap" "io/ioutil" k8sError "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/runtime/serializer" + "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/rest" "path/filepath" "text/template" @@ -56,7 +58,8 @@ const ( type ArgoK8sClient interface { CreateAcdApp(ctx context.Context, appRequest *AppTemplate, applicationTemplatePath string) (string, error) - GetArgoApplication(namespace string, appName string, cluster *repository.Cluster) (map[string]interface{}, error) + GetArgoApplication(k8sConfig *bean.ArgoK8sConfig, appName string) (map[string]interface{}, error) + DeleteArgoApplication(ctx context.Context, k8sConfig *bean.ArgoK8sConfig, appName string, cascadeDelete bool) error } type ArgoK8sClientImpl struct { logger *zap.SugaredLogger @@ -137,12 +140,25 @@ func (impl ArgoK8sClientImpl) CreateArgoApplication(ctx context.Context, namespa return err } +func (impl ArgoK8sClientImpl) handleArgoAppGetError(res []byte, err error) error { + // default error set + apiError := &util.ApiError{ + InternalMessage: "error getting argo cd application", + UserMessage: "error getting argo cd application", + } + return impl.convertArgoK8sClientError(apiError, res, err) +} + func (impl ArgoK8sClientImpl) handleArgoAppCreationError(res []byte, err error) error { // default error set apiError := &util.ApiError{ InternalMessage: "error creating argo cd app", UserMessage: "error creating argo cd app", } + return impl.convertArgoK8sClientError(apiError, res, err) +} + +func (impl ArgoK8sClientImpl) convertArgoK8sClientError(apiError *util.ApiError, res []byte, err error) error { // error override for errors.StatusError if statusError := (&k8sError.StatusError{}); errors.As(err, &statusError) { apiError.HttpStatusCode = int(statusError.Status().Code) @@ -171,13 +187,9 @@ func (impl ArgoK8sClientImpl) handleArgoAppCreationError(res []byte, err error) return apiError } -func (impl ArgoK8sClientImpl) GetArgoApplication(namespace string, appName string, cluster *repository.Cluster) (map[string]interface{}, error) { +func (impl ArgoK8sClientImpl) GetArgoApplication(k8sConfig *bean.ArgoK8sConfig, appName string) (map[string]interface{}, error) { - config, err := rest.InClusterConfig() - if err != nil { - impl.logger.Errorw("error in cluster config", "err", err) - return nil, err - } + config := k8sConfig.RestConfig config.GroupVersion = &schema.GroupVersion{Group: "argoproj.io", Version: "v1alpha1"} config.NegotiatedSerializer = serializer.NewCodecFactory(runtime.NewScheme()) config.APIPath = "/apis" @@ -191,19 +203,50 @@ func (impl ArgoK8sClientImpl) GetArgoApplication(namespace string, appName strin //opts := metav1.GetOptions{} res, err := client. Get(). - Namespace(namespace). + Namespace(k8sConfig.AcdNamespace). Resource("applications"). Name(appName). //VersionedParams(&opts, metav1.ParameterCodec). Do(context.Background()).Raw() response := make(map[string]interface{}) if err != nil { - err := json.Unmarshal(res, &response) - if err != nil { - impl.logger.Errorw("unmarshal error on app update status", "err", err) - return nil, fmt.Errorf("error get argo cd app") - } + impl.logger.Errorw("error in get argo application", "err", err) + return nil, impl.handleArgoAppGetError(res, err) + } + err = json.Unmarshal(res, &response) + if err != nil { + impl.logger.Errorw("unmarshal error on app update status", "err", err) + return nil, fmt.Errorf("error get argo cd app") } impl.logger.Infow("get argo cd application", "res", response, "err", err) return response, err } + +func (impl ArgoK8sClientImpl) DeleteArgoApplication(ctx context.Context, k8sConfig *bean.ArgoK8sConfig, appName string, cascadeDelete bool) error { + + patchType := types.MergePatchType + patchJSON := "" + + //TODO: ayush test cascade delete + if cascadeDelete { + patchJSON = `{"metadata": {"finalizers": ["resources-finalizer.argocd.argoproj.io"]}}` + } else { + patchJSON = `{"metadata": {"finalizers": null}}` + } + + applicationGVK := v1alpha1.ApplicationSchemaGroupVersionKind + + _, err := impl.k8sUtil.PatchResourceRequest(ctx, k8sConfig.RestConfig, patchType, patchJSON, appName, k8sConfig.AcdNamespace, applicationGVK) + if err != nil { + impl.logger.Errorw("error in patching argo application", "err", err) + return err + } + + _, err = impl.k8sUtil.DeleteResource(ctx, k8sConfig.RestConfig, applicationGVK, k8sConfig.AcdNamespace, appName, true) + if err != nil { + impl.logger.Errorw("error in patching argo application", "acdAppName", appName, "err", err) + return err + } + + return nil +} diff --git a/client/events/EventClient.go b/client/events/EventClient.go index 98a26aa298..a58cadea82 100644 --- a/client/events/EventClient.go +++ b/client/events/EventClient.go @@ -23,6 +23,7 @@ import ( "fmt" bean2 "github.com/devtron-labs/devtron/pkg/attributes/bean" "github.com/devtron-labs/devtron/pkg/module" + bean3 "github.com/devtron-labs/devtron/pkg/module/bean" "net/http" "time" @@ -165,12 +166,12 @@ func (impl *EventRESTClientImpl) buildFinalPayload(event Event, cdPipeline *pipe func (impl *EventRESTClientImpl) WriteNotificationEvent(event Event) (bool, error) { // if notification integration is not installed then do not send the notification - moduleInfo, err := impl.moduleService.GetModuleInfo(module.ModuleNameNotification) + moduleInfo, err := impl.moduleService.GetModuleInfo(bean3.ModuleNameNotification) if err != nil { impl.logger.Errorw("error while getting notification module status", "err", err) return false, err } - if moduleInfo.Status != module.ModuleStatusInstalled { + if moduleInfo.Status != bean3.ModuleStatusInstalled { impl.logger.Warnw("Notification module is not installed, hence skipping sending notification", "currentModuleStatus", moduleInfo.Status) return false, nil } diff --git a/client/telemetry/TelemetryEventClient.go b/client/telemetry/TelemetryEventClient.go index dd7dcb2e61..3be2cfa185 100644 --- a/client/telemetry/TelemetryEventClient.go +++ b/client/telemetry/TelemetryEventClient.go @@ -27,6 +27,7 @@ import ( installedAppReader "github.com/devtron-labs/devtron/pkg/appStore/installedApp/read" bean2 "github.com/devtron-labs/devtron/pkg/attributes/bean" bean3 "github.com/devtron-labs/devtron/pkg/cluster/bean" + module2 "github.com/devtron-labs/devtron/pkg/module/bean" cron3 "github.com/devtron-labs/devtron/util/cron" "net/http" "time" @@ -37,7 +38,6 @@ import ( "github.com/devtron-labs/devtron/pkg/auth/sso" user2 "github.com/devtron-labs/devtron/pkg/auth/user" "github.com/devtron-labs/devtron/pkg/cluster" - module2 "github.com/devtron-labs/devtron/pkg/module" moduleRepo "github.com/devtron-labs/devtron/pkg/module/repo" serverDataStore "github.com/devtron-labs/devtron/pkg/server/store" util3 "github.com/devtron-labs/devtron/pkg/util" diff --git a/cmd/external-app/router.go b/cmd/external-app/router.go index 972695fb36..d7047fcaf4 100644 --- a/cmd/external-app/router.go +++ b/cmd/external-app/router.go @@ -56,6 +56,7 @@ type MuxRouter struct { teamRouter team.TeamRouter UserAuthRouter user.UserAuthRouter userRouter user.UserRouter + commonRouter router.CommonRouter clusterRouter cluster.ClusterRouter dashboardRouter dashboard.DashboardRouter helmAppRouter client.HelmAppRouter @@ -92,6 +93,7 @@ func NewMuxRouter( teamRouter team.TeamRouter, UserAuthRouter user.UserAuthRouter, userRouter user.UserRouter, + commonRouter router.CommonRouter, clusterRouter cluster.ClusterRouter, dashboardRouter dashboard.DashboardRouter, helmAppRouter client.HelmAppRouter, @@ -124,6 +126,7 @@ func NewMuxRouter( teamRouter: teamRouter, UserAuthRouter: UserAuthRouter, userRouter: userRouter, + commonRouter: commonRouter, clusterRouter: clusterRouter, dashboardRouter: dashboardRouter, helmAppRouter: helmAppRouter, @@ -288,4 +291,7 @@ func (r *MuxRouter) Init() { r.argoApplicationRouter.InitArgoApplicationRouter(argoApplicationRouter) fluxApplicationRouter := r.Router.PathPrefix("/orchestrator/flux-application").Subrouter() r.fluxApplicationRouter.InitFluxApplicationRouter(fluxApplicationRouter) + + commonRouter := r.Router.PathPrefix("/orchestrator/global").Subrouter() + r.commonRouter.InitCommonRouter(commonRouter) } diff --git a/cmd/external-app/wire.go b/cmd/external-app/wire.go index c95eab1760..3130682556 100644 --- a/cmd/external-app/wire.go +++ b/cmd/external-app/wire.go @@ -62,7 +62,7 @@ import ( "github.com/devtron-labs/devtron/internal/sql/repository" app2 "github.com/devtron-labs/devtron/internal/sql/repository/app" "github.com/devtron-labs/devtron/internal/sql/repository/appStatus" - "github.com/devtron-labs/devtron/internal/sql/repository/deploymentConfig" + "github.com/devtron-labs/devtron/internal/sql/repository/chartConfig" dockerRegistryRepository "github.com/devtron-labs/devtron/internal/sql/repository/dockerRegistry" "github.com/devtron-labs/devtron/internal/sql/repository/pipelineConfig" "github.com/devtron-labs/devtron/internal/util" @@ -73,9 +73,11 @@ import ( "github.com/devtron-labs/devtron/pkg/appStore/installedApp/service/FullMode/deployment" "github.com/devtron-labs/devtron/pkg/attributes" "github.com/devtron-labs/devtron/pkg/build/git/gitMaterial" + "github.com/devtron-labs/devtron/pkg/commonService" delete2 "github.com/devtron-labs/devtron/pkg/delete" "github.com/devtron-labs/devtron/pkg/deployment/common" "github.com/devtron-labs/devtron/pkg/deployment/gitOps" + "github.com/devtron-labs/devtron/pkg/deployment/manifest/deploymentTemplate/read" "github.com/devtron-labs/devtron/pkg/deployment/providerConfig" "github.com/devtron-labs/devtron/pkg/kubernetesResourceAuditLogs" repository2 "github.com/devtron-labs/devtron/pkg/kubernetesResourceAuditLogs/repository" @@ -245,14 +247,19 @@ func InitializeApp() (*App, error) { // chart group repository layer wire injection ended // end: docker registry wire set injection + + router.NewCommonRouterImpl, + wire.Bind(new(router.CommonRouter), new(*router.CommonRouterImpl)), + restHandler.NewCommonRestHandlerImpl, + wire.Bind(new(restHandler.CommonRestHandler), new(*restHandler.CommonRestHandlerImpl)), + + commonService.NewCommonBaseServiceImpl, + wire.Bind(new(commonService.CommonService), new(*commonService.CommonBaseServiceImpl)), + cron.NewCronLoggerImpl, appStore.EAModeWireSet, - deploymentConfig.NewRepositoryImpl, - wire.Bind(new(deploymentConfig.Repository), new(*deploymentConfig.RepositoryImpl)), - - common.NewDeploymentConfigServiceImpl, - wire.Bind(new(common.DeploymentConfigService), new(*common.DeploymentConfigServiceImpl)), + common.WireSet, wire.Bind(new(util4.K8sService), new(*util4.K8sServiceImpl)), @@ -269,6 +276,12 @@ func InitializeApp() (*App, error) { dbMigration.NewDbMigrationServiceImpl, wire.Bind(new(dbMigration.DbMigration), new(*dbMigration.DbMigrationServiceImpl)), + + read.NewEnvConfigOverrideReadServiceImpl, + wire.Bind(new(read.EnvConfigOverrideService), new(*read.EnvConfigOverrideReadServiceImpl)), + + chartConfig.NewEnvConfigOverrideRepository, + wire.Bind(new(chartConfig.EnvConfigOverrideRepository), new(*chartConfig.EnvConfigOverrideRepositoryImpl)), ) return &App{}, nil } diff --git a/cmd/external-app/wire_gen.go b/cmd/external-app/wire_gen.go index 27e1091df7..0b8bc14913 100644 --- a/cmd/external-app/wire_gen.go +++ b/cmd/external-app/wire_gen.go @@ -54,6 +54,7 @@ import ( repository5 "github.com/devtron-labs/devtron/internal/sql/repository" "github.com/devtron-labs/devtron/internal/sql/repository/app" "github.com/devtron-labs/devtron/internal/sql/repository/appStatus" + "github.com/devtron-labs/devtron/internal/sql/repository/chartConfig" "github.com/devtron-labs/devtron/internal/sql/repository/deploymentConfig" repository7 "github.com/devtron-labs/devtron/internal/sql/repository/dockerRegistry" "github.com/devtron-labs/devtron/internal/sql/repository/pipelineConfig" @@ -74,7 +75,7 @@ import ( "github.com/devtron-labs/devtron/pkg/appStore/values/repository" service4 "github.com/devtron-labs/devtron/pkg/appStore/values/service" "github.com/devtron-labs/devtron/pkg/argoApplication" - read6 "github.com/devtron-labs/devtron/pkg/argoApplication/read" + read9 "github.com/devtron-labs/devtron/pkg/argoApplication/read" config3 "github.com/devtron-labs/devtron/pkg/argoApplication/read/config" "github.com/devtron-labs/devtron/pkg/attributes" "github.com/devtron-labs/devtron/pkg/auth/authentication" @@ -82,21 +83,24 @@ import ( "github.com/devtron-labs/devtron/pkg/auth/sso" "github.com/devtron-labs/devtron/pkg/auth/user" "github.com/devtron-labs/devtron/pkg/auth/user/repository" - read7 "github.com/devtron-labs/devtron/pkg/build/git/gitMaterial/read" + read10 "github.com/devtron-labs/devtron/pkg/build/git/gitMaterial/read" repository12 "github.com/devtron-labs/devtron/pkg/build/git/gitMaterial/repository" "github.com/devtron-labs/devtron/pkg/chartRepo" "github.com/devtron-labs/devtron/pkg/chartRepo/repository" "github.com/devtron-labs/devtron/pkg/cluster" "github.com/devtron-labs/devtron/pkg/cluster/environment" - read5 "github.com/devtron-labs/devtron/pkg/cluster/environment/read" + read8 "github.com/devtron-labs/devtron/pkg/cluster/environment/read" repository4 "github.com/devtron-labs/devtron/pkg/cluster/environment/repository" rbac2 "github.com/devtron-labs/devtron/pkg/cluster/rbac" read2 "github.com/devtron-labs/devtron/pkg/cluster/read" repository3 "github.com/devtron-labs/devtron/pkg/cluster/repository" "github.com/devtron-labs/devtron/pkg/clusterTerminalAccess" + "github.com/devtron-labs/devtron/pkg/commonService" delete2 "github.com/devtron-labs/devtron/pkg/delete" "github.com/devtron-labs/devtron/pkg/deployment/common" + read7 "github.com/devtron-labs/devtron/pkg/deployment/common/read" config2 "github.com/devtron-labs/devtron/pkg/deployment/gitOps/config" + read6 "github.com/devtron-labs/devtron/pkg/deployment/manifest/deploymentTemplate/read" "github.com/devtron-labs/devtron/pkg/deployment/providerConfig" "github.com/devtron-labs/devtron/pkg/externalLink" "github.com/devtron-labs/devtron/pkg/fluxApplication" @@ -109,6 +113,8 @@ import ( "github.com/devtron-labs/devtron/pkg/kubernetesResourceAuditLogs" repository10 "github.com/devtron-labs/devtron/pkg/kubernetesResourceAuditLogs/repository" "github.com/devtron-labs/devtron/pkg/module" + bean2 "github.com/devtron-labs/devtron/pkg/module/bean" + read5 "github.com/devtron-labs/devtron/pkg/module/read" "github.com/devtron-labs/devtron/pkg/module/repo" "github.com/devtron-labs/devtron/pkg/module/store" "github.com/devtron-labs/devtron/pkg/pipeline" @@ -287,6 +293,11 @@ func InitializeApp() (*App, error) { roleGroupServiceImpl := user.NewRoleGroupServiceImpl(userAuthRepositoryImpl, sugaredLogger, userRepositoryImpl, roleGroupRepositoryImpl, userCommonServiceImpl) userRestHandlerImpl := user2.NewUserRestHandlerImpl(userServiceImpl, validate, sugaredLogger, enforcerImpl, roleGroupServiceImpl, userCommonServiceImpl) userRouterImpl := user2.NewUserRouterImpl(userRestHandlerImpl) + moduleRepositoryImpl := moduleRepo.NewModuleRepositoryImpl(db) + moduleReadServiceImpl := read5.NewModuleReadServiceImpl(sugaredLogger, moduleRepositoryImpl) + commonBaseServiceImpl := commonService.NewCommonBaseServiceImpl(sugaredLogger, environmentVariables, moduleReadServiceImpl) + commonRestHandlerImpl := restHandler.NewCommonRestHandlerImpl(sugaredLogger, userServiceImpl, commonBaseServiceImpl) + commonRouterImpl := router.NewCommonRouterImpl(commonRestHandlerImpl) transactionUtilImpl := sql.NewTransactionUtilImpl(db) genericNoteRepositoryImpl := repository8.NewGenericNoteRepositoryImpl(db, transactionUtilImpl) genericNoteHistoryRepositoryImpl := repository8.NewGenericNoteHistoryRepositoryImpl(db, transactionUtilImpl) @@ -310,10 +321,14 @@ func InitializeApp() (*App, error) { installedAppVersionHistoryRepositoryImpl := repository6.NewInstalledAppVersionHistoryRepositoryImpl(sugaredLogger, db) repositoryImpl := deploymentConfig.NewRepositoryImpl(db) chartRepositoryImpl := chartRepoRepository.NewChartRepository(db, transactionUtilImpl) - deploymentConfigServiceImpl := common.NewDeploymentConfigServiceImpl(repositoryImpl, sugaredLogger, chartRepositoryImpl, pipelineRepositoryImpl, appRepositoryImpl, installedAppReadServiceEAImpl, environmentVariables) + envConfigOverrideRepositoryImpl := chartConfig.NewEnvConfigOverrideRepository(db) + envConfigOverrideReadServiceImpl := read6.NewEnvConfigOverrideReadServiceImpl(envConfigOverrideRepositoryImpl, sugaredLogger) + chartRefRepositoryImpl := chartRepoRepository.NewChartRefRepositoryImpl(db) + deploymentConfigReadServiceImpl := read7.NewDeploymentConfigReadServiceImpl(sugaredLogger, repositoryImpl, environmentVariables, chartRepositoryImpl, pipelineRepositoryImpl, appRepositoryImpl, environmentRepositoryImpl, envConfigOverrideReadServiceImpl) + deploymentConfigServiceImpl := common.NewDeploymentConfigServiceImpl(repositoryImpl, sugaredLogger, chartRepositoryImpl, pipelineRepositoryImpl, appRepositoryImpl, installedAppReadServiceEAImpl, environmentVariables, envConfigOverrideReadServiceImpl, environmentRepositoryImpl, chartRefRepositoryImpl, deploymentConfigReadServiceImpl, acdAuthConfig) installedAppDBServiceImpl := EAMode.NewInstalledAppDBServiceImpl(sugaredLogger, installedAppRepositoryImpl, appRepositoryImpl, userServiceImpl, environmentServiceImpl, installedAppVersionHistoryRepositoryImpl, deploymentConfigServiceImpl) gitOpsConfigRepositoryImpl := repository5.NewGitOpsConfigRepositoryImpl(sugaredLogger, db) - gitOpsConfigReadServiceImpl := config2.NewGitOpsConfigReadServiceImpl(sugaredLogger, gitOpsConfigRepositoryImpl, userServiceImpl, environmentVariables) + gitOpsConfigReadServiceImpl := config2.NewGitOpsConfigReadServiceImpl(sugaredLogger, gitOpsConfigRepositoryImpl, userServiceImpl, environmentVariables, moduleReadServiceImpl) attributesServiceImpl := attributes.NewAttributesServiceImpl(sugaredLogger, attributesRepositoryImpl) deploymentTypeOverrideServiceImpl := providerConfig.NewDeploymentTypeOverrideServiceImpl(sugaredLogger, environmentVariables, attributesServiceImpl) chartTemplateServiceImpl := util.NewChartTemplateServiceImpl(sugaredLogger) @@ -340,13 +355,13 @@ func InitializeApp() (*App, error) { if err != nil { return nil, err } - argoApplicationServiceImpl := argoApplication.NewArgoApplicationServiceImpl(sugaredLogger, clusterRepositoryImpl, k8sServiceImpl, helmAppClientImpl, helmAppServiceImpl, k8sApplicationServiceImpl, argoApplicationConfigServiceImpl) + argoApplicationServiceImpl := argoApplication.NewArgoApplicationServiceImpl(sugaredLogger, clusterRepositoryImpl, k8sServiceImpl, helmAppClientImpl, helmAppServiceImpl, k8sApplicationServiceImpl, argoApplicationConfigServiceImpl, deploymentConfigServiceImpl) helmAppRestHandlerImpl := client2.NewHelmAppRestHandlerImpl(sugaredLogger, helmAppServiceImpl, enforcerImpl, clusterServiceImpl, enforcerUtilHelmImpl, appStoreDeploymentServiceImpl, installedAppDBServiceImpl, userServiceImpl, attributesServiceImpl, serverEnvConfigServerEnvConfig, fluxApplicationServiceImpl, argoApplicationServiceImpl) helmAppRouterImpl := client2.NewHelmAppRouterImpl(helmAppRestHandlerImpl) - environmentReadServiceImpl := read5.NewEnvironmentReadServiceImpl(sugaredLogger, environmentRepositoryImpl) + environmentReadServiceImpl := read8.NewEnvironmentReadServiceImpl(sugaredLogger, environmentRepositoryImpl) environmentRestHandlerImpl := cluster2.NewEnvironmentRestHandlerImpl(environmentServiceImpl, environmentReadServiceImpl, sugaredLogger, userServiceImpl, validate, enforcerImpl, deleteServiceImpl, k8sServiceImpl, k8sCommonServiceImpl) environmentRouterImpl := cluster2.NewEnvironmentRouterImpl(environmentRestHandlerImpl) - argoApplicationReadServiceImpl := read6.NewArgoApplicationReadServiceImpl(sugaredLogger, clusterRepositoryImpl, k8sServiceImpl, helmAppClientImpl, helmAppServiceImpl) + argoApplicationReadServiceImpl := read9.NewArgoApplicationReadServiceImpl(sugaredLogger, clusterRepositoryImpl, k8sServiceImpl, helmAppClientImpl, helmAppServiceImpl) k8sApplicationRestHandlerImpl := application2.NewK8sApplicationRestHandlerImpl(sugaredLogger, k8sApplicationServiceImpl, pumpImpl, terminalSessionHandlerImpl, enforcerImpl, enforcerUtilHelmImpl, enforcerUtilImpl, helmAppServiceImpl, userServiceImpl, k8sCommonServiceImpl, validate, environmentVariables, fluxApplicationServiceImpl, argoApplicationReadServiceImpl) k8sApplicationRouterImpl := application2.NewK8sApplicationRouterImpl(k8sApplicationRestHandlerImpl) chartRepositoryRestHandlerImpl := chartRepo2.NewChartRepositoryRestHandlerImpl(sugaredLogger, userServiceImpl, chartRepositoryServiceImpl, enforcerImpl, validate, deleteServiceImpl, attributesServiceImpl) @@ -369,7 +384,6 @@ func InitializeApp() (*App, error) { if err != nil { return nil, err } - moduleRepositoryImpl := moduleRepo.NewModuleRepositoryImpl(db) providerIdentifierServiceImpl := providerIdentifier.NewProviderIdentifierServiceImpl(sugaredLogger) telemetryEventClientImpl, err := telemetry.NewTelemetryEventClientImpl(sugaredLogger, httpClient, clusterServiceImpl, k8sServiceImpl, acdAuthConfig, userServiceImpl, attributesRepositoryImpl, ssoLoginServiceImpl, posthogClient, moduleRepositoryImpl, serverDataStoreServerDataStore, userAuditServiceImpl, helmAppClientImpl, providerIdentifierServiceImpl, cronLoggerImpl, installedAppReadServiceEAImpl) if err != nil { @@ -390,7 +404,7 @@ func InitializeApp() (*App, error) { if err != nil { return nil, err } - moduleEnvConfig, err := module.ParseModuleEnvConfig() + moduleEnvConfig, err := bean2.ParseModuleEnvConfig() if err != nil { return nil, err } @@ -453,7 +467,7 @@ func InitializeApp() (*App, error) { return nil, err } materialRepositoryImpl := repository12.NewMaterialRepositoryImpl(db) - gitMaterialReadServiceImpl := read7.NewGitMaterialReadServiceImpl(sugaredLogger, materialRepositoryImpl) + gitMaterialReadServiceImpl := read10.NewGitMaterialReadServiceImpl(sugaredLogger, materialRepositoryImpl) appCrudOperationServiceImpl := app2.NewAppCrudOperationServiceImpl(appLabelRepositoryImpl, sugaredLogger, appRepositoryImpl, userRepositoryImpl, installedAppRepositoryImpl, genericNoteServiceImpl, installedAppDBServiceImpl, crudOperationServiceConfig, dbMigrationServiceImpl, gitMaterialReadServiceImpl) appInfoRestHandlerImpl := appInfo.NewAppInfoRestHandlerImpl(sugaredLogger, appCrudOperationServiceImpl, userServiceImpl, validate, enforcerUtilImpl, enforcerImpl, helmAppServiceImpl, enforcerUtilHelmImpl, genericNoteServiceImpl) appInfoRouterImpl := appInfo2.NewAppInfoRouterImpl(sugaredLogger, appInfoRestHandlerImpl) @@ -467,7 +481,7 @@ func InitializeApp() (*App, error) { argoApplicationRouterImpl := argoApplication2.NewArgoApplicationRouterImpl(argoApplicationRestHandlerImpl) fluxApplicationRestHandlerImpl := fluxApplication2.NewFluxApplicationRestHandlerImpl(fluxApplicationServiceImpl, sugaredLogger, enforcerImpl) fluxApplicationRouterImpl := fluxApplication2.NewFluxApplicationRouterImpl(fluxApplicationRestHandlerImpl) - muxRouter := NewMuxRouter(sugaredLogger, ssoLoginRouterImpl, teamRouterImpl, userAuthRouterImpl, userRouterImpl, clusterRouterImpl, dashboardRouterImpl, helmAppRouterImpl, environmentRouterImpl, k8sApplicationRouterImpl, chartRepositoryRouterImpl, appStoreDiscoverRouterImpl, appStoreValuesRouterImpl, appStoreDeploymentRouterImpl, chartProviderRouterImpl, dockerRegRouterImpl, dashboardTelemetryRouterImpl, commonDeploymentRouterImpl, externalLinkRouterImpl, moduleRouterImpl, serverRouterImpl, apiTokenRouterImpl, k8sCapacityRouterImpl, webhookHelmRouterImpl, userAttributesRouterImpl, telemetryRouterImpl, userTerminalAccessRouterImpl, attributesRouterImpl, appRouterEAModeImpl, rbacRoleRouterImpl, argoApplicationRouterImpl, fluxApplicationRouterImpl) + muxRouter := NewMuxRouter(sugaredLogger, ssoLoginRouterImpl, teamRouterImpl, userAuthRouterImpl, userRouterImpl, commonRouterImpl, clusterRouterImpl, dashboardRouterImpl, helmAppRouterImpl, environmentRouterImpl, k8sApplicationRouterImpl, chartRepositoryRouterImpl, appStoreDiscoverRouterImpl, appStoreValuesRouterImpl, appStoreDeploymentRouterImpl, chartProviderRouterImpl, dockerRegRouterImpl, dashboardTelemetryRouterImpl, commonDeploymentRouterImpl, externalLinkRouterImpl, moduleRouterImpl, serverRouterImpl, apiTokenRouterImpl, k8sCapacityRouterImpl, webhookHelmRouterImpl, userAttributesRouterImpl, telemetryRouterImpl, userTerminalAccessRouterImpl, attributesRouterImpl, appRouterEAModeImpl, rbacRoleRouterImpl, argoApplicationRouterImpl, fluxApplicationRouterImpl) mainApp := NewApp(db, sessionManager, muxRouter, telemetryEventClientImpl, posthogClient, sugaredLogger) return mainApp, nil } diff --git a/env_gen.json b/env_gen.json index adec9c458c..6ca53e47c4 100644 --- a/env_gen.json +++ b/env_gen.json @@ -1 +1 @@ -[{"Category":"CD","Fields":[{"Env":"ARGO_APP_MANUAL_SYNC_TIME","EnvType":"int","EnvValue":"3","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"CD_HELM_PIPELINE_STATUS_CRON_TIME","EnvType":"string","EnvValue":"*/2 * * * *","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"CD_PIPELINE_STATUS_CRON_TIME","EnvType":"string","EnvValue":"*/2 * * * *","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"CD_PIPELINE_STATUS_TIMEOUT_DURATION","EnvType":"string","EnvValue":"20","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"DEPLOY_STATUS_CRON_GET_PIPELINE_DEPLOYED_WITHIN_HOURS","EnvType":"int","EnvValue":"12","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"DEVTRON_CHART_ARGO_CD_INSTALL_REQUEST_TIMEOUT","EnvType":"int","EnvValue":"1","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"DEVTRON_CHART_INSTALL_REQUEST_TIMEOUT","EnvType":"int","EnvValue":"6","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"EXPOSE_CD_METRICS","EnvType":"bool","EnvValue":"false","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"HELM_PIPELINE_STATUS_CHECK_ELIGIBLE_TIME","EnvType":"string","EnvValue":"120","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"PIPELINE_DEGRADED_TIME","EnvType":"string","EnvValue":"10","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"REVISION_HISTORY_LIMIT_DEVTRON_APP","EnvType":"int","EnvValue":"1","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"REVISION_HISTORY_LIMIT_EXTERNAL_HELM_APP","EnvType":"int","EnvValue":"0","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"REVISION_HISTORY_LIMIT_HELM_APP","EnvType":"int","EnvValue":"1","EnvDescription":"","Example":"","Deprecated":"false"}]},{"Category":"CI_RUNNER","Fields":[{"Env":"AZURE_ACCOUNT_KEY","EnvType":"string","EnvValue":"","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"AZURE_ACCOUNT_NAME","EnvType":"string","EnvValue":"","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"AZURE_BLOB_CONTAINER_CI_CACHE","EnvType":"string","EnvValue":"","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"AZURE_BLOB_CONTAINER_CI_LOG","EnvType":"string","EnvValue":"","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"AZURE_GATEWAY_CONNECTION_INSECURE","EnvType":"bool","EnvValue":"true","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"AZURE_GATEWAY_URL","EnvType":"string","EnvValue":"http://devtron-minio.devtroncd:9000","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"BASE_LOG_LOCATION_PATH","EnvType":"string","EnvValue":"/home/devtron/","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"BLOB_STORAGE_GCP_CREDENTIALS_JSON","EnvType":"string","EnvValue":"","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"BLOB_STORAGE_PROVIDER","EnvType":"","EnvValue":"S3","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"BLOB_STORAGE_S3_ACCESS_KEY","EnvType":"string","EnvValue":"","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"BLOB_STORAGE_S3_BUCKET_VERSIONED","EnvType":"bool","EnvValue":"true","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"BLOB_STORAGE_S3_ENDPOINT","EnvType":"string","EnvValue":"","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"BLOB_STORAGE_S3_ENDPOINT_INSECURE","EnvType":"bool","EnvValue":"false","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"BLOB_STORAGE_S3_SECRET_KEY","EnvType":"string","EnvValue":"","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"BUILDX_CACHE_PATH","EnvType":"string","EnvValue":"/var/lib/devtron/buildx","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"BUILDX_K8S_DRIVER_OPTIONS","EnvType":"string","EnvValue":"","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"BUILDX_PROVENANCE_MODE","EnvType":"string","EnvValue":"","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"BUILD_LOG_TTL_VALUE_IN_SECS","EnvType":"int","EnvValue":"3600","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"CACHE_LIMIT","EnvType":"int64","EnvValue":"5000000000","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"CD_DEFAULT_ADDRESS_POOL_BASE_CIDR","EnvType":"string","EnvValue":"","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"CD_DEFAULT_ADDRESS_POOL_SIZE","EnvType":"int","EnvValue":"","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"CD_LIMIT_CI_CPU","EnvType":"string","EnvValue":"0.5","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"CD_LIMIT_CI_MEM","EnvType":"string","EnvValue":"3G","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"CD_NODE_LABEL_SELECTOR","EnvType":"","EnvValue":"","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"CD_NODE_TAINTS_KEY","EnvType":"string","EnvValue":"dedicated","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"CD_NODE_TAINTS_VALUE","EnvType":"string","EnvValue":"ci","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"CD_REQ_CI_CPU","EnvType":"string","EnvValue":"0.5","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"CD_REQ_CI_MEM","EnvType":"string","EnvValue":"3G","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"CD_WORKFLOW_EXECUTOR_TYPE","EnvType":"","EnvValue":"AWF","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"CD_WORKFLOW_SERVICE_ACCOUNT","EnvType":"string","EnvValue":"cd-runner","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"CI_DEFAULT_ADDRESS_POOL_BASE_CIDR","EnvType":"string","EnvValue":"","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"CI_DEFAULT_ADDRESS_POOL_SIZE","EnvType":"int","EnvValue":"","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"CI_IGNORE_DOCKER_CACHE","EnvType":"bool","EnvValue":"","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"CI_LOGS_KEY_PREFIX","EnvType":"string","EnvValue":"","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"CI_NODE_LABEL_SELECTOR","EnvType":"","EnvValue":"","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"CI_NODE_TAINTS_KEY","EnvType":"string","EnvValue":"","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"CI_NODE_TAINTS_VALUE","EnvType":"string","EnvValue":"","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"CI_RUNNER_DOCKER_MTU_VALUE","EnvType":"int","EnvValue":"-1","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"CI_SUCCESS_AUTO_TRIGGER_BATCH_SIZE","EnvType":"int","EnvValue":"1","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"CI_VOLUME_MOUNTS_JSON","EnvType":"string","EnvValue":"","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"CI_WORKFLOW_EXECUTOR_TYPE","EnvType":"","EnvValue":"AWF","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"DEFAULT_ARTIFACT_KEY_LOCATION","EnvType":"string","EnvValue":"arsenal-v1/ci-artifacts","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"DEFAULT_BUILD_LOGS_BUCKET","EnvType":"string","EnvValue":"devtron-pro-ci-logs","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"DEFAULT_BUILD_LOGS_KEY_PREFIX","EnvType":"string","EnvValue":"arsenal-v1","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"DEFAULT_CACHE_BUCKET","EnvType":"string","EnvValue":"ci-caching","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"DEFAULT_CACHE_BUCKET_REGION","EnvType":"string","EnvValue":"us-east-2","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"DEFAULT_CD_ARTIFACT_KEY_LOCATION","EnvType":"string","EnvValue":"","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"DEFAULT_CD_LOGS_BUCKET_REGION","EnvType":"string","EnvValue":"us-east-2","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"DEFAULT_CD_NAMESPACE","EnvType":"string","EnvValue":"","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"DEFAULT_CD_TIMEOUT","EnvType":"int64","EnvValue":"3600","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"DEFAULT_CI_IMAGE","EnvType":"string","EnvValue":"686244538589.dkr.ecr.us-east-2.amazonaws.com/cirunner:47","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"DEFAULT_NAMESPACE","EnvType":"string","EnvValue":"devtron-ci","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"DEFAULT_TARGET_PLATFORM","EnvType":"string","EnvValue":"","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"DOCKER_BUILD_CACHE_PATH","EnvType":"string","EnvValue":"/var/lib/docker","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"ENABLE_BUILD_CONTEXT","EnvType":"bool","EnvValue":"false","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"ENABLE_WORKFLOW_EXECUTION_STAGE","EnvType":"bool","EnvValue":"true","EnvDescription":"if enabled then we will display build stages separately for CI/Job/Pre-Post CD","Example":"true","Deprecated":"false"},{"Env":"EXTERNAL_BLOB_STORAGE_CM_NAME","EnvType":"string","EnvValue":"blob-storage-cm","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"EXTERNAL_BLOB_STORAGE_SECRET_NAME","EnvType":"string","EnvValue":"blob-storage-secret","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"EXTERNAL_CD_NODE_LABEL_SELECTOR","EnvType":"","EnvValue":"","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"EXTERNAL_CD_NODE_TAINTS_KEY","EnvType":"string","EnvValue":"dedicated","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"EXTERNAL_CD_NODE_TAINTS_VALUE","EnvType":"string","EnvValue":"ci","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"EXTERNAL_CI_API_SECRET","EnvType":"string","EnvValue":"devtroncd-secret","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"EXTERNAL_CI_PAYLOAD","EnvType":"string","EnvValue":"{\"ciProjectDetails\":[{\"gitRepository\":\"https://github.com/vikram1601/getting-started-nodejs.git\",\"checkoutPath\":\"./abc\",\"commitHash\":\"239077135f8cdeeccb7857e2851348f558cb53d3\",\"commitTime\":\"2022-10-30T20:00:00\",\"branch\":\"master\",\"message\":\"Update README.md\",\"author\":\"User Name \"}],\"dockerImage\":\"445808685819.dkr.ecr.us-east-2.amazonaws.com/orch:23907713-2\"}","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"EXTERNAL_CI_WEB_HOOK_URL","EnvType":"string","EnvValue":"","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"IGNORE_CM_CS_IN_CI_JOB","EnvType":"bool","EnvValue":"false","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"IMAGE_RETRY_COUNT","EnvType":"int","EnvValue":"0","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"IMAGE_RETRY_INTERVAL","EnvType":"int","EnvValue":"5","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"IMAGE_SCANNER_ENDPOINT","EnvType":"string","EnvValue":"http://image-scanner-new-demo-devtroncd-service.devtroncd:80","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"IMAGE_SCAN_MAX_RETRIES","EnvType":"int","EnvValue":"3","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"IMAGE_SCAN_RETRY_DELAY","EnvType":"int","EnvValue":"5","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"IN_APP_LOGGING_ENABLED","EnvType":"bool","EnvValue":"false","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"MAX_CD_WORKFLOW_RUNNER_RETRIES","EnvType":"int","EnvValue":"0","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"MAX_CI_WORKFLOW_RETRIES","EnvType":"int","EnvValue":"0","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"MODE","EnvType":"string","EnvValue":"DEV","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"NATS_SERVER_HOST","EnvType":"string","EnvValue":"localhost:4222","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"ORCH_HOST","EnvType":"string","EnvValue":"http://devtroncd-orchestrator-service-prod.devtroncd/webhook/msg/nats","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"ORCH_TOKEN","EnvType":"string","EnvValue":"","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"PRE_CI_CACHE_PATH","EnvType":"string","EnvValue":"/devtroncd-cache","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"SHOW_DOCKER_BUILD_ARGS","EnvType":"bool","EnvValue":"true","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"SKIP_CI_JOB_BUILD_CACHE_PUSH_PULL","EnvType":"bool","EnvValue":"false","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"SKIP_CREATING_ECR_REPO","EnvType":"bool","EnvValue":"false","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"TERMINATION_GRACE_PERIOD_SECS","EnvType":"int","EnvValue":"180","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"USE_ARTIFACT_LISTING_QUERY_V2","EnvType":"bool","EnvValue":"true","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"USE_BLOB_STORAGE_CONFIG_IN_CD_WORKFLOW","EnvType":"bool","EnvValue":"true","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"USE_BLOB_STORAGE_CONFIG_IN_CI_WORKFLOW","EnvType":"bool","EnvValue":"true","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"USE_BUILDX","EnvType":"bool","EnvValue":"false","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"USE_DOCKER_API_TO_GET_DIGEST","EnvType":"bool","EnvValue":"false","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"USE_EXTERNAL_NODE","EnvType":"bool","EnvValue":"false","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"USE_IMAGE_TAG_FROM_GIT_PROVIDER_FOR_TAG_BASED_BUILD","EnvType":"bool","EnvValue":"false","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"WF_CONTROLLER_INSTANCE_ID","EnvType":"string","EnvValue":"devtron-runner","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"WORKFLOW_CACHE_CONFIG","EnvType":"string","EnvValue":"{}","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"WORKFLOW_SERVICE_ACCOUNT","EnvType":"string","EnvValue":"ci-runner","EnvDescription":"","Example":"","Deprecated":"false"}]},{"Category":"DEVTRON","Fields":[{"Env":"-","EnvType":"","EnvValue":"","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"APP_SYNC_IMAGE","EnvType":"string","EnvValue":"quay.io/devtron/chart-sync:1227622d-132-3775","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"APP_SYNC_JOB_RESOURCES_OBJ","EnvType":"string","EnvValue":"","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"APP_SYNC_SERVICE_ACCOUNT","EnvType":"string","EnvValue":"chart-sync","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"ARGO_AUTO_SYNC_ENABLED","EnvType":"bool","EnvValue":"true","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"ARGO_GIT_COMMIT_RETRY_COUNT_ON_CONFLICT","EnvType":"int","EnvValue":"3","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"ARGO_GIT_COMMIT_RETRY_DELAY_ON_CONFLICT","EnvType":"int","EnvValue":"1","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"ARGO_REPO_REGISTER_RETRY_COUNT","EnvType":"int","EnvValue":"3","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"ARGO_REPO_REGISTER_RETRY_DELAY","EnvType":"int","EnvValue":"10","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"ASYNC_BUILDX_CACHE_EXPORT","EnvType":"bool","EnvValue":"false","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"BATCH_SIZE","EnvType":"int","EnvValue":"5","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"BLOB_STORAGE_ENABLED","EnvType":"bool","EnvValue":"false","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"BUILDX_CACHE_MODE_MIN","EnvType":"bool","EnvValue":"false","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"CD_HOST","EnvType":"string","EnvValue":"localhost","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"CD_NAMESPACE","EnvType":"string","EnvValue":"devtroncd","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"CD_PORT","EnvType":"string","EnvValue":"8000","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"CExpirationTime","EnvType":"int","EnvValue":"600","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"CI_TRIGGER_CRON_TIME","EnvType":"int","EnvValue":"2","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"CI_WORKFLOW_STATUS_UPDATE_CRON","EnvType":"string","EnvValue":"*/5 * * * *","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"CLI_CMD_TIMEOUT_GLOBAL_SECONDS","EnvType":"int","EnvValue":"0","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"CLUSTER_STATUS_CRON_TIME","EnvType":"int","EnvValue":"15","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"CONSUMER_CONFIG_JSON","EnvType":"string","EnvValue":"","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"DEFAULT_LOG_TIME_LIMIT","EnvType":"int64","EnvValue":"1","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"DEFAULT_TIMEOUT","EnvType":"float64","EnvValue":"3600","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"DEVTRON_BOM_URL","EnvType":"string","EnvValue":"https://raw.githubusercontent.com/devtron-labs/devtron/%s/charts/devtron/devtron-bom.yaml","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"DEVTRON_DEFAULT_NAMESPACE","EnvType":"string","EnvValue":"devtroncd","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"DEVTRON_DEX_SECRET_NAMESPACE","EnvType":"string","EnvValue":"devtroncd","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"DEVTRON_HELM_RELEASE_CHART_NAME","EnvType":"string","EnvValue":"devtron-operator","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"DEVTRON_HELM_RELEASE_NAME","EnvType":"string","EnvValue":"devtron","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"DEVTRON_HELM_RELEASE_NAMESPACE","EnvType":"string","EnvValue":"devtroncd","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"DEVTRON_HELM_REPO_NAME","EnvType":"string","EnvValue":"devtron","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"DEVTRON_HELM_REPO_URL","EnvType":"string","EnvValue":"https://helm.devtron.ai","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"DEVTRON_INSTALLATION_TYPE","EnvType":"string","EnvValue":"","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"DEVTRON_MODULES_IDENTIFIER_IN_HELM_VALUES","EnvType":"string","EnvValue":"installer.modules","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"DEVTRON_SECRET_NAME","EnvType":"string","EnvValue":"devtron-secret","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"DEVTRON_VERSION_IDENTIFIER_IN_HELM_VALUES","EnvType":"string","EnvValue":"installer.release","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"DEX_CID","EnvType":"string","EnvValue":"example-app","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"DEX_CLIENT_ID","EnvType":"string","EnvValue":"argo-cd","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"DEX_CSTOREKEY","EnvType":"string","EnvValue":"","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"DEX_JWTKEY","EnvType":"string","EnvValue":"","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"DEX_RURL","EnvType":"string","EnvValue":"http://127.0.0.1:8080/callback","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"DEX_SECRET","EnvType":"string","EnvValue":"","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"DEX_URL","EnvType":"string","EnvValue":"","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"ECR_REPO_NAME_PREFIX","EnvType":"string","EnvValue":"test/","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"ENABLE_ASYNC_ARGO_CD_INSTALL_DEVTRON_CHART","EnvType":"bool","EnvValue":"false","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"ENABLE_ASYNC_INSTALL_DEVTRON_CHART","EnvType":"bool","EnvValue":"false","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"EPHEMERAL_SERVER_VERSION_REGEX","EnvType":"string","EnvValue":"v[1-9]\\.\\b(2[3-9]\\|[3-9][0-9])\\b.*","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"EVENT_URL","EnvType":"string","EnvValue":"http://localhost:3000/notify","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"EXECUTE_WIRE_NIL_CHECKER","EnvType":"bool","EnvValue":"false","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"EXPOSE_CI_METRICS","EnvType":"bool","EnvValue":"false","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"FEATURE_RESTART_WORKLOAD_BATCH_SIZE","EnvType":"int","EnvValue":"1","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"FEATURE_RESTART_WORKLOAD_WORKER_POOL_SIZE","EnvType":"int","EnvValue":"5","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"FORCE_SECURITY_SCANNING","EnvType":"bool","EnvValue":"false","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"GITOPS_REPO_PREFIX","EnvType":"string","EnvValue":"","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"GO_RUNTIME_ENV","EnvType":"string","EnvValue":"production","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"GRAFANA_HOST","EnvType":"string","EnvValue":"localhost","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"GRAFANA_NAMESPACE","EnvType":"string","EnvValue":"devtroncd","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"GRAFANA_ORG_ID","EnvType":"int","EnvValue":"2","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"GRAFANA_PASSWORD","EnvType":"string","EnvValue":"prom-operator","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"GRAFANA_PORT","EnvType":"string","EnvValue":"8090","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"GRAFANA_URL","EnvType":"string","EnvValue":"","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"GRAFANA_USERNAME","EnvType":"string","EnvValue":"admin","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"HIDE_IMAGE_TAGGING_HARD_DELETE","EnvType":"bool","EnvValue":"false","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"IGNORE_AUTOCOMPLETE_AUTH_CHECK","EnvType":"bool","EnvValue":"false","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"INSTALLER_CRD_NAMESPACE","EnvType":"string","EnvValue":"devtroncd","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"INSTALLER_CRD_OBJECT_GROUP_NAME","EnvType":"string","EnvValue":"installer.devtron.ai","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"INSTALLER_CRD_OBJECT_RESOURCE","EnvType":"string","EnvValue":"installers","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"INSTALLER_CRD_OBJECT_VERSION","EnvType":"string","EnvValue":"v1alpha1","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"IS_INTERNAL_USE","EnvType":"bool","EnvValue":"true","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"JwtExpirationTime","EnvType":"int","EnvValue":"120","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"K8s_CLIENT_MAX_IDLE_CONNS_PER_HOST","EnvType":"int","EnvValue":"25","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"K8s_TCP_IDLE_CONN_TIMEOUT","EnvType":"int","EnvValue":"300","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"K8s_TCP_KEEPALIVE","EnvType":"int","EnvValue":"30","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"K8s_TCP_TIMEOUT","EnvType":"int","EnvValue":"30","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"K8s_TLS_HANDSHAKE_TIMEOUT","EnvType":"int","EnvValue":"10","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"KUBELINK_GRPC_MAX_RECEIVE_MSG_SIZE","EnvType":"int","EnvValue":"20","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"KUBELINK_GRPC_MAX_SEND_MSG_SIZE","EnvType":"int","EnvValue":"4","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"LENS_TIMEOUT","EnvType":"int","EnvValue":"0","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"LENS_URL","EnvType":"string","EnvValue":"http://lens-milandevtron-service:80","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"LIMIT_CI_CPU","EnvType":"string","EnvValue":"0.5","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"LIMIT_CI_MEM","EnvType":"string","EnvValue":"3G","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"LOGGER_DEV_MODE","EnvType":"bool","EnvValue":"false","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"LOG_LEVEL","EnvType":"int","EnvValue":"-1","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"MAX_SESSION_PER_USER","EnvType":"int","EnvValue":"5","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"MODULE_METADATA_API_URL","EnvType":"string","EnvValue":"https://api.devtron.ai/module?name=%s","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"MODULE_STATUS_HANDLING_CRON_DURATION_MIN","EnvType":"int","EnvValue":"3","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"NATS_MSG_ACK_WAIT_IN_SECS","EnvType":"int","EnvValue":"120","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"NATS_MSG_BUFFER_SIZE","EnvType":"int","EnvValue":"-1","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"NATS_MSG_MAX_AGE","EnvType":"int","EnvValue":"86400","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"NATS_MSG_PROCESSING_BATCH_SIZE","EnvType":"int","EnvValue":"1","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"NATS_MSG_REPLICAS","EnvType":"int","EnvValue":"0","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"NOTIFICATION_MEDIUM","EnvType":"NotificationMedium","EnvValue":"rest","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"OTEL_COLLECTOR_URL","EnvType":"string","EnvValue":"","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"PARALLELISM_LIMIT_FOR_TAG_PROCESSING","EnvType":"int","EnvValue":"","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"PG_EXPORT_PROM_METRICS","EnvType":"bool","EnvValue":"true","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"PG_LOG_ALL_FAILURE_QUERIES","EnvType":"bool","EnvValue":"true","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"PG_LOG_ALL_QUERY","EnvType":"bool","EnvValue":"false","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"PG_LOG_SLOW_QUERY","EnvType":"bool","EnvValue":"true","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"PG_QUERY_DUR_THRESHOLD","EnvType":"int64","EnvValue":"5000","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"PLUGIN_NAME","EnvType":"string","EnvValue":"Pull images from container repository","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"PROPAGATE_EXTRA_LABELS","EnvType":"bool","EnvValue":"false","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"PROXY_SERVICE_CONFIG","EnvType":"string","EnvValue":"{}","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"REQ_CI_CPU","EnvType":"string","EnvValue":"0.5","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"REQ_CI_MEM","EnvType":"string","EnvValue":"3G","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"RESTRICT_TERMINAL_ACCESS_FOR_NON_SUPER_USER","EnvType":"bool","EnvValue":"false","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"RUNTIME_CONFIG_LOCAL_DEV","EnvType":"LocalDevMode","EnvValue":"true","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"RUN_HELM_INSTALL_IN_ASYNC_MODE_HELM_APPS","EnvType":"bool","EnvValue":"false","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"SCOPED_VARIABLE_ENABLED","EnvType":"bool","EnvValue":"false","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"SCOPED_VARIABLE_FORMAT","EnvType":"string","EnvValue":"@{{%s}}","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"SCOPED_VARIABLE_HANDLE_PRIMITIVES","EnvType":"bool","EnvValue":"false","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"SCOPED_VARIABLE_NAME_REGEX","EnvType":"string","EnvValue":"^[a-zA-Z][a-zA-Z0-9_-]{0,62}[a-zA-Z0-9]$","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"SHOULD_CHECK_NAMESPACE_ON_CLONE","EnvType":"bool","EnvValue":"false","EnvDescription":"should we check if namespace exists or not while cloning app","Example":"","Deprecated":"false"},{"Env":"SOCKET_DISCONNECT_DELAY_SECONDS","EnvType":"int","EnvValue":"5","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"SOCKET_HEARTBEAT_SECONDS","EnvType":"int","EnvValue":"25","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"STREAM_CONFIG_JSON","EnvType":"string","EnvValue":"","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"SYSTEM_VAR_PREFIX","EnvType":"string","EnvValue":"DEVTRON_","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"TERMINAL_POD_DEFAULT_NAMESPACE","EnvType":"string","EnvValue":"default","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"TERMINAL_POD_INACTIVE_DURATION_IN_MINS","EnvType":"int","EnvValue":"10","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"TERMINAL_POD_STATUS_SYNC_In_SECS","EnvType":"int","EnvValue":"600","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"TEST_APP","EnvType":"string","EnvValue":"orchestrator","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"TEST_PG_ADDR","EnvType":"string","EnvValue":"127.0.0.1","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"TEST_PG_DATABASE","EnvType":"string","EnvValue":"orchestrator","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"TEST_PG_LOG_QUERY","EnvType":"bool","EnvValue":"true","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"TEST_PG_PASSWORD","EnvType":"string","EnvValue":"postgrespw","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"TEST_PG_PORT","EnvType":"string","EnvValue":"55000","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"TEST_PG_USER","EnvType":"string","EnvValue":"postgres","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"TIMEOUT_FOR_FAILED_CI_BUILD","EnvType":"string","EnvValue":"15","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"TIMEOUT_IN_SECONDS","EnvType":"int","EnvValue":"5","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"USER_SESSION_DURATION_SECONDS","EnvType":"int","EnvValue":"86400","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"USE_ARTIFACT_LISTING_API_V2","EnvType":"bool","EnvValue":"true","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"USE_CUSTOM_HTTP_TRANSPORT","EnvType":"bool","EnvValue":"false","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"USE_DEPLOYMENT_CONFIG_DATA","EnvType":"bool","EnvValue":"false","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"USE_GIT_CLI","EnvType":"bool","EnvValue":"false","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"USE_RBAC_CREATION_V2","EnvType":"bool","EnvValue":"true","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"VARIABLE_CACHE_ENABLED","EnvType":"bool","EnvValue":"true","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"VARIABLE_EXPRESSION_REGEX","EnvType":"string","EnvValue":"@{{([^}]+)}}","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"WEBHOOK_TOKEN","EnvType":"string","EnvValue":"","EnvDescription":"","Example":"","Deprecated":"false"}]},{"Category":"GITOPS","Fields":[{"Env":"ACD_CM","EnvType":"string","EnvValue":"argocd-cm","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"ACD_NAMESPACE","EnvType":"string","EnvValue":"devtroncd","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"ACD_PASSWORD","EnvType":"string","EnvValue":"","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"ACD_USERNAME","EnvType":"string","EnvValue":"admin","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"GITOPS_SECRET_NAME","EnvType":"string","EnvValue":"devtron-gitops-secret","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"RESOURCE_LIST_FOR_REPLICAS","EnvType":"string","EnvValue":"Deployment,Rollout,StatefulSet,ReplicaSet","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"RESOURCE_LIST_FOR_REPLICAS_BATCH_SIZE","EnvType":"int","EnvValue":"5","EnvDescription":"","Example":"","Deprecated":"false"}]},{"Category":"INFRA_SETUP","Fields":[{"Env":"DASHBOARD_HOST","EnvType":"string","EnvValue":"localhost","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"DASHBOARD_NAMESPACE","EnvType":"string","EnvValue":"devtroncd","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"DASHBOARD_PORT","EnvType":"string","EnvValue":"3000","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"DEX_HOST","EnvType":"string","EnvValue":"http://localhost","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"DEX_PORT","EnvType":"string","EnvValue":"5556","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"GIT_SENSOR_PROTOCOL","EnvType":"string","EnvValue":"REST","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"GIT_SENSOR_TIMEOUT","EnvType":"int","EnvValue":"0","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"GIT_SENSOR_URL","EnvType":"string","EnvValue":"127.0.0.1:7070","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"HELM_CLIENT_URL","EnvType":"string","EnvValue":"127.0.0.1:50051","EnvDescription":"","Example":"","Deprecated":"false"}]},{"Category":"POSTGRES","Fields":[{"Env":"APP","EnvType":"string","EnvValue":"orchestrator","EnvDescription":"Application name","Example":"","Deprecated":"false"},{"Env":"CASBIN_DATABASE","EnvType":"string","EnvValue":"casbin","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"PG_ADDR","EnvType":"string","EnvValue":"127.0.0.1","EnvDescription":"address of postgres service","Example":"postgresql-postgresql.devtroncd","Deprecated":"false"},{"Env":"PG_DATABASE","EnvType":"string","EnvValue":"orchestrator","EnvDescription":"postgres database to be made connection with","Example":"orchestrator, casbin, git_sensor, lens","Deprecated":"false"},{"Env":"PG_PASSWORD","EnvType":"string","EnvValue":"{password}","EnvDescription":"password for postgres, associated with PG_USER","Example":"confidential ;)","Deprecated":"false"},{"Env":"PG_PORT","EnvType":"string","EnvValue":"5432","EnvDescription":"port of postgresql service","Example":"5432","Deprecated":"false"},{"Env":"PG_READ_TIMEOUT","EnvType":"int64","EnvValue":"30","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"PG_USER","EnvType":"string","EnvValue":"postgres","EnvDescription":"user for postgres","Example":"postgres","Deprecated":"false"},{"Env":"PG_WRITE_TIMEOUT","EnvType":"int64","EnvValue":"30","EnvDescription":"","Example":"","Deprecated":"false"}]},{"Category":"RBAC","Fields":[{"Env":"ENFORCER_CACHE","EnvType":"bool","EnvValue":"false","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"ENFORCER_CACHE_EXPIRATION_IN_SEC","EnvType":"int","EnvValue":"86400","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"ENFORCER_MAX_BATCH_SIZE","EnvType":"int","EnvValue":"1","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"USE_CASBIN_V2","EnvType":"bool","EnvValue":"true","EnvDescription":"","Example":"","Deprecated":"false"}]}] \ No newline at end of file +[{"Category":"CD","Fields":[{"Env":"ARGO_APP_MANUAL_SYNC_TIME","EnvType":"int","EnvValue":"3","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"CD_HELM_PIPELINE_STATUS_CRON_TIME","EnvType":"string","EnvValue":"*/2 * * * *","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"CD_PIPELINE_STATUS_CRON_TIME","EnvType":"string","EnvValue":"*/2 * * * *","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"CD_PIPELINE_STATUS_TIMEOUT_DURATION","EnvType":"string","EnvValue":"20","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"DEPLOY_STATUS_CRON_GET_PIPELINE_DEPLOYED_WITHIN_HOURS","EnvType":"int","EnvValue":"12","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"DEVTRON_CHART_ARGO_CD_INSTALL_REQUEST_TIMEOUT","EnvType":"int","EnvValue":"1","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"DEVTRON_CHART_INSTALL_REQUEST_TIMEOUT","EnvType":"int","EnvValue":"6","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"EXPOSE_CD_METRICS","EnvType":"bool","EnvValue":"false","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"FEATURE_MIGRATE_ARGOCD_APPLICATION_ENABLE","EnvType":"bool","EnvValue":"false","EnvDescription":"enable migration of external argocd application to devtron pipeline","Example":"","Deprecated":"false"},{"Env":"HELM_PIPELINE_STATUS_CHECK_ELIGIBLE_TIME","EnvType":"string","EnvValue":"120","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"IS_INTERNAL_USE","EnvType":"bool","EnvValue":"true","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"MIGRATE_DEPLOYMENT_CONFIG_DATA","EnvType":"bool","EnvValue":"false","EnvDescription":"migrate deployment config data from charts table to deployment_config table","Example":"","Deprecated":"false"},{"Env":"PIPELINE_DEGRADED_TIME","EnvType":"string","EnvValue":"10","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"REVISION_HISTORY_LIMIT_DEVTRON_APP","EnvType":"int","EnvValue":"1","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"REVISION_HISTORY_LIMIT_EXTERNAL_HELM_APP","EnvType":"int","EnvValue":"0","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"REVISION_HISTORY_LIMIT_HELM_APP","EnvType":"int","EnvValue":"1","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"RUN_HELM_INSTALL_IN_ASYNC_MODE_HELM_APPS","EnvType":"bool","EnvValue":"false","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"SHOULD_CHECK_NAMESPACE_ON_CLONE","EnvType":"bool","EnvValue":"false","EnvDescription":"should we check if namespace exists or not while cloning app","Example":"","Deprecated":"false"},{"Env":"USE_DEPLOYMENT_CONFIG_DATA","EnvType":"bool","EnvValue":"false","EnvDescription":"use deployment config data from deployment_config table","Example":"","Deprecated":"true"}]},{"Category":"CI_RUNNER","Fields":[{"Env":"AZURE_ACCOUNT_KEY","EnvType":"string","EnvValue":"","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"AZURE_ACCOUNT_NAME","EnvType":"string","EnvValue":"","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"AZURE_BLOB_CONTAINER_CI_CACHE","EnvType":"string","EnvValue":"","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"AZURE_BLOB_CONTAINER_CI_LOG","EnvType":"string","EnvValue":"","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"AZURE_GATEWAY_CONNECTION_INSECURE","EnvType":"bool","EnvValue":"true","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"AZURE_GATEWAY_URL","EnvType":"string","EnvValue":"http://devtron-minio.devtroncd:9000","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"BASE_LOG_LOCATION_PATH","EnvType":"string","EnvValue":"/home/devtron/","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"BLOB_STORAGE_GCP_CREDENTIALS_JSON","EnvType":"string","EnvValue":"","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"BLOB_STORAGE_PROVIDER","EnvType":"","EnvValue":"S3","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"BLOB_STORAGE_S3_ACCESS_KEY","EnvType":"string","EnvValue":"","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"BLOB_STORAGE_S3_BUCKET_VERSIONED","EnvType":"bool","EnvValue":"true","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"BLOB_STORAGE_S3_ENDPOINT","EnvType":"string","EnvValue":"","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"BLOB_STORAGE_S3_ENDPOINT_INSECURE","EnvType":"bool","EnvValue":"false","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"BLOB_STORAGE_S3_SECRET_KEY","EnvType":"string","EnvValue":"","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"BUILDX_CACHE_PATH","EnvType":"string","EnvValue":"/var/lib/devtron/buildx","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"BUILDX_K8S_DRIVER_OPTIONS","EnvType":"string","EnvValue":"","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"BUILDX_PROVENANCE_MODE","EnvType":"string","EnvValue":"","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"BUILD_LOG_TTL_VALUE_IN_SECS","EnvType":"int","EnvValue":"3600","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"CACHE_LIMIT","EnvType":"int64","EnvValue":"5000000000","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"CD_DEFAULT_ADDRESS_POOL_BASE_CIDR","EnvType":"string","EnvValue":"","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"CD_DEFAULT_ADDRESS_POOL_SIZE","EnvType":"int","EnvValue":"","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"CD_LIMIT_CI_CPU","EnvType":"string","EnvValue":"0.5","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"CD_LIMIT_CI_MEM","EnvType":"string","EnvValue":"3G","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"CD_NODE_LABEL_SELECTOR","EnvType":"","EnvValue":"","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"CD_NODE_TAINTS_KEY","EnvType":"string","EnvValue":"dedicated","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"CD_NODE_TAINTS_VALUE","EnvType":"string","EnvValue":"ci","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"CD_REQ_CI_CPU","EnvType":"string","EnvValue":"0.5","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"CD_REQ_CI_MEM","EnvType":"string","EnvValue":"3G","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"CD_WORKFLOW_EXECUTOR_TYPE","EnvType":"","EnvValue":"AWF","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"CD_WORKFLOW_SERVICE_ACCOUNT","EnvType":"string","EnvValue":"cd-runner","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"CI_DEFAULT_ADDRESS_POOL_BASE_CIDR","EnvType":"string","EnvValue":"","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"CI_DEFAULT_ADDRESS_POOL_SIZE","EnvType":"int","EnvValue":"","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"CI_IGNORE_DOCKER_CACHE","EnvType":"bool","EnvValue":"","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"CI_LOGS_KEY_PREFIX","EnvType":"string","EnvValue":"","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"CI_NODE_LABEL_SELECTOR","EnvType":"","EnvValue":"","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"CI_NODE_TAINTS_KEY","EnvType":"string","EnvValue":"","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"CI_NODE_TAINTS_VALUE","EnvType":"string","EnvValue":"","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"CI_RUNNER_DOCKER_MTU_VALUE","EnvType":"int","EnvValue":"-1","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"CI_SUCCESS_AUTO_TRIGGER_BATCH_SIZE","EnvType":"int","EnvValue":"1","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"CI_VOLUME_MOUNTS_JSON","EnvType":"string","EnvValue":"","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"CI_WORKFLOW_EXECUTOR_TYPE","EnvType":"","EnvValue":"AWF","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"DEFAULT_ARTIFACT_KEY_LOCATION","EnvType":"string","EnvValue":"arsenal-v1/ci-artifacts","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"DEFAULT_BUILD_LOGS_BUCKET","EnvType":"string","EnvValue":"devtron-pro-ci-logs","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"DEFAULT_BUILD_LOGS_KEY_PREFIX","EnvType":"string","EnvValue":"arsenal-v1","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"DEFAULT_CACHE_BUCKET","EnvType":"string","EnvValue":"ci-caching","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"DEFAULT_CACHE_BUCKET_REGION","EnvType":"string","EnvValue":"us-east-2","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"DEFAULT_CD_ARTIFACT_KEY_LOCATION","EnvType":"string","EnvValue":"","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"DEFAULT_CD_LOGS_BUCKET_REGION","EnvType":"string","EnvValue":"us-east-2","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"DEFAULT_CD_NAMESPACE","EnvType":"string","EnvValue":"","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"DEFAULT_CD_TIMEOUT","EnvType":"int64","EnvValue":"3600","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"DEFAULT_CI_IMAGE","EnvType":"string","EnvValue":"686244538589.dkr.ecr.us-east-2.amazonaws.com/cirunner:47","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"DEFAULT_NAMESPACE","EnvType":"string","EnvValue":"devtron-ci","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"DEFAULT_TARGET_PLATFORM","EnvType":"string","EnvValue":"","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"DOCKER_BUILD_CACHE_PATH","EnvType":"string","EnvValue":"/var/lib/docker","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"ENABLE_BUILD_CONTEXT","EnvType":"bool","EnvValue":"false","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"ENABLE_WORKFLOW_EXECUTION_STAGE","EnvType":"bool","EnvValue":"true","EnvDescription":"if enabled then we will display build stages separately for CI/Job/Pre-Post CD","Example":"true","Deprecated":"false"},{"Env":"EXTERNAL_BLOB_STORAGE_CM_NAME","EnvType":"string","EnvValue":"blob-storage-cm","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"EXTERNAL_BLOB_STORAGE_SECRET_NAME","EnvType":"string","EnvValue":"blob-storage-secret","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"EXTERNAL_CD_NODE_LABEL_SELECTOR","EnvType":"","EnvValue":"","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"EXTERNAL_CD_NODE_TAINTS_KEY","EnvType":"string","EnvValue":"dedicated","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"EXTERNAL_CD_NODE_TAINTS_VALUE","EnvType":"string","EnvValue":"ci","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"EXTERNAL_CI_API_SECRET","EnvType":"string","EnvValue":"devtroncd-secret","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"EXTERNAL_CI_PAYLOAD","EnvType":"string","EnvValue":"{\"ciProjectDetails\":[{\"gitRepository\":\"https://github.com/vikram1601/getting-started-nodejs.git\",\"checkoutPath\":\"./abc\",\"commitHash\":\"239077135f8cdeeccb7857e2851348f558cb53d3\",\"commitTime\":\"2022-10-30T20:00:00\",\"branch\":\"master\",\"message\":\"Update README.md\",\"author\":\"User Name \"}],\"dockerImage\":\"445808685819.dkr.ecr.us-east-2.amazonaws.com/orch:23907713-2\"}","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"EXTERNAL_CI_WEB_HOOK_URL","EnvType":"string","EnvValue":"","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"IGNORE_CM_CS_IN_CI_JOB","EnvType":"bool","EnvValue":"false","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"IMAGE_RETRY_COUNT","EnvType":"int","EnvValue":"0","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"IMAGE_RETRY_INTERVAL","EnvType":"int","EnvValue":"5","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"IMAGE_SCANNER_ENDPOINT","EnvType":"string","EnvValue":"http://image-scanner-new-demo-devtroncd-service.devtroncd:80","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"IMAGE_SCAN_MAX_RETRIES","EnvType":"int","EnvValue":"3","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"IMAGE_SCAN_RETRY_DELAY","EnvType":"int","EnvValue":"5","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"IN_APP_LOGGING_ENABLED","EnvType":"bool","EnvValue":"false","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"MAX_CD_WORKFLOW_RUNNER_RETRIES","EnvType":"int","EnvValue":"0","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"MAX_CI_WORKFLOW_RETRIES","EnvType":"int","EnvValue":"0","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"MODE","EnvType":"string","EnvValue":"DEV","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"NATS_SERVER_HOST","EnvType":"string","EnvValue":"localhost:4222","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"ORCH_HOST","EnvType":"string","EnvValue":"http://devtroncd-orchestrator-service-prod.devtroncd/webhook/msg/nats","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"ORCH_TOKEN","EnvType":"string","EnvValue":"","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"PRE_CI_CACHE_PATH","EnvType":"string","EnvValue":"/devtroncd-cache","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"SHOW_DOCKER_BUILD_ARGS","EnvType":"bool","EnvValue":"true","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"SKIP_CI_JOB_BUILD_CACHE_PUSH_PULL","EnvType":"bool","EnvValue":"false","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"SKIP_CREATING_ECR_REPO","EnvType":"bool","EnvValue":"false","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"TERMINATION_GRACE_PERIOD_SECS","EnvType":"int","EnvValue":"180","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"USE_ARTIFACT_LISTING_QUERY_V2","EnvType":"bool","EnvValue":"true","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"USE_BLOB_STORAGE_CONFIG_IN_CD_WORKFLOW","EnvType":"bool","EnvValue":"true","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"USE_BLOB_STORAGE_CONFIG_IN_CI_WORKFLOW","EnvType":"bool","EnvValue":"true","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"USE_BUILDX","EnvType":"bool","EnvValue":"false","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"USE_DOCKER_API_TO_GET_DIGEST","EnvType":"bool","EnvValue":"false","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"USE_EXTERNAL_NODE","EnvType":"bool","EnvValue":"false","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"USE_IMAGE_TAG_FROM_GIT_PROVIDER_FOR_TAG_BASED_BUILD","EnvType":"bool","EnvValue":"false","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"WF_CONTROLLER_INSTANCE_ID","EnvType":"string","EnvValue":"devtron-runner","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"WORKFLOW_CACHE_CONFIG","EnvType":"string","EnvValue":"{}","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"WORKFLOW_SERVICE_ACCOUNT","EnvType":"string","EnvValue":"ci-runner","EnvDescription":"","Example":"","Deprecated":"false"}]},{"Category":"DEVTRON","Fields":[{"Env":"-","EnvType":"","EnvValue":"","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"APP_SYNC_IMAGE","EnvType":"string","EnvValue":"quay.io/devtron/chart-sync:1227622d-132-3775","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"APP_SYNC_JOB_RESOURCES_OBJ","EnvType":"string","EnvValue":"","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"APP_SYNC_SERVICE_ACCOUNT","EnvType":"string","EnvValue":"chart-sync","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"ARGO_AUTO_SYNC_ENABLED","EnvType":"bool","EnvValue":"true","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"ARGO_GIT_COMMIT_RETRY_COUNT_ON_CONFLICT","EnvType":"int","EnvValue":"3","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"ARGO_GIT_COMMIT_RETRY_DELAY_ON_CONFLICT","EnvType":"int","EnvValue":"1","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"ARGO_REPO_REGISTER_RETRY_COUNT","EnvType":"int","EnvValue":"3","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"ARGO_REPO_REGISTER_RETRY_DELAY","EnvType":"int","EnvValue":"10","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"ASYNC_BUILDX_CACHE_EXPORT","EnvType":"bool","EnvValue":"false","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"BATCH_SIZE","EnvType":"int","EnvValue":"5","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"BLOB_STORAGE_ENABLED","EnvType":"bool","EnvValue":"false","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"BUILDX_CACHE_MODE_MIN","EnvType":"bool","EnvValue":"false","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"CD_HOST","EnvType":"string","EnvValue":"localhost","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"CD_NAMESPACE","EnvType":"string","EnvValue":"devtroncd","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"CD_PORT","EnvType":"string","EnvValue":"8000","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"CExpirationTime","EnvType":"int","EnvValue":"600","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"CI_TRIGGER_CRON_TIME","EnvType":"int","EnvValue":"2","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"CI_WORKFLOW_STATUS_UPDATE_CRON","EnvType":"string","EnvValue":"*/5 * * * *","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"CLI_CMD_TIMEOUT_GLOBAL_SECONDS","EnvType":"int","EnvValue":"0","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"CLUSTER_STATUS_CRON_TIME","EnvType":"int","EnvValue":"15","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"CONSUMER_CONFIG_JSON","EnvType":"string","EnvValue":"","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"DEFAULT_LOG_TIME_LIMIT","EnvType":"int64","EnvValue":"1","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"DEFAULT_TIMEOUT","EnvType":"float64","EnvValue":"3600","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"DEVTRON_BOM_URL","EnvType":"string","EnvValue":"https://raw.githubusercontent.com/devtron-labs/devtron/%s/charts/devtron/devtron-bom.yaml","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"DEVTRON_DEFAULT_NAMESPACE","EnvType":"string","EnvValue":"devtroncd","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"DEVTRON_DEX_SECRET_NAMESPACE","EnvType":"string","EnvValue":"devtroncd","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"DEVTRON_HELM_RELEASE_CHART_NAME","EnvType":"string","EnvValue":"devtron-operator","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"DEVTRON_HELM_RELEASE_NAME","EnvType":"string","EnvValue":"devtron","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"DEVTRON_HELM_RELEASE_NAMESPACE","EnvType":"string","EnvValue":"devtroncd","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"DEVTRON_HELM_REPO_NAME","EnvType":"string","EnvValue":"devtron","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"DEVTRON_HELM_REPO_URL","EnvType":"string","EnvValue":"https://helm.devtron.ai","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"DEVTRON_INSTALLATION_TYPE","EnvType":"string","EnvValue":"","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"DEVTRON_MODULES_IDENTIFIER_IN_HELM_VALUES","EnvType":"string","EnvValue":"installer.modules","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"DEVTRON_SECRET_NAME","EnvType":"string","EnvValue":"devtron-secret","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"DEVTRON_VERSION_IDENTIFIER_IN_HELM_VALUES","EnvType":"string","EnvValue":"installer.release","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"DEX_CID","EnvType":"string","EnvValue":"example-app","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"DEX_CLIENT_ID","EnvType":"string","EnvValue":"argo-cd","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"DEX_CSTOREKEY","EnvType":"string","EnvValue":"","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"DEX_JWTKEY","EnvType":"string","EnvValue":"","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"DEX_RURL","EnvType":"string","EnvValue":"http://127.0.0.1:8080/callback","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"DEX_SECRET","EnvType":"string","EnvValue":"","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"DEX_URL","EnvType":"string","EnvValue":"","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"ECR_REPO_NAME_PREFIX","EnvType":"string","EnvValue":"test/","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"ENABLE_ASYNC_ARGO_CD_INSTALL_DEVTRON_CHART","EnvType":"bool","EnvValue":"false","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"ENABLE_ASYNC_INSTALL_DEVTRON_CHART","EnvType":"bool","EnvValue":"false","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"EPHEMERAL_SERVER_VERSION_REGEX","EnvType":"string","EnvValue":"v[1-9]\\.\\b(2[3-9]\\|[3-9][0-9])\\b.*","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"EVENT_URL","EnvType":"string","EnvValue":"http://localhost:3000/notify","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"EXECUTE_WIRE_NIL_CHECKER","EnvType":"bool","EnvValue":"false","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"EXPOSE_CI_METRICS","EnvType":"bool","EnvValue":"false","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"FEATURE_RESTART_WORKLOAD_BATCH_SIZE","EnvType":"int","EnvValue":"1","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"FEATURE_RESTART_WORKLOAD_WORKER_POOL_SIZE","EnvType":"int","EnvValue":"5","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"FORCE_SECURITY_SCANNING","EnvType":"bool","EnvValue":"false","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"GITOPS_REPO_PREFIX","EnvType":"string","EnvValue":"","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"GO_RUNTIME_ENV","EnvType":"string","EnvValue":"production","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"GRAFANA_HOST","EnvType":"string","EnvValue":"localhost","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"GRAFANA_NAMESPACE","EnvType":"string","EnvValue":"devtroncd","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"GRAFANA_ORG_ID","EnvType":"int","EnvValue":"2","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"GRAFANA_PASSWORD","EnvType":"string","EnvValue":"prom-operator","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"GRAFANA_PORT","EnvType":"string","EnvValue":"8090","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"GRAFANA_URL","EnvType":"string","EnvValue":"","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"GRAFANA_USERNAME","EnvType":"string","EnvValue":"admin","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"HIDE_IMAGE_TAGGING_HARD_DELETE","EnvType":"bool","EnvValue":"false","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"IGNORE_AUTOCOMPLETE_AUTH_CHECK","EnvType":"bool","EnvValue":"false","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"INSTALLER_CRD_NAMESPACE","EnvType":"string","EnvValue":"devtroncd","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"INSTALLER_CRD_OBJECT_GROUP_NAME","EnvType":"string","EnvValue":"installer.devtron.ai","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"INSTALLER_CRD_OBJECT_RESOURCE","EnvType":"string","EnvValue":"installers","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"INSTALLER_CRD_OBJECT_VERSION","EnvType":"string","EnvValue":"v1alpha1","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"JwtExpirationTime","EnvType":"int","EnvValue":"120","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"K8s_CLIENT_MAX_IDLE_CONNS_PER_HOST","EnvType":"int","EnvValue":"25","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"K8s_TCP_IDLE_CONN_TIMEOUT","EnvType":"int","EnvValue":"300","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"K8s_TCP_KEEPALIVE","EnvType":"int","EnvValue":"30","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"K8s_TCP_TIMEOUT","EnvType":"int","EnvValue":"30","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"K8s_TLS_HANDSHAKE_TIMEOUT","EnvType":"int","EnvValue":"10","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"KUBELINK_GRPC_MAX_RECEIVE_MSG_SIZE","EnvType":"int","EnvValue":"20","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"KUBELINK_GRPC_MAX_SEND_MSG_SIZE","EnvType":"int","EnvValue":"4","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"LENS_TIMEOUT","EnvType":"int","EnvValue":"0","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"LENS_URL","EnvType":"string","EnvValue":"http://lens-milandevtron-service:80","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"LIMIT_CI_CPU","EnvType":"string","EnvValue":"0.5","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"LIMIT_CI_MEM","EnvType":"string","EnvValue":"3G","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"LOGGER_DEV_MODE","EnvType":"bool","EnvValue":"false","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"LOG_LEVEL","EnvType":"int","EnvValue":"-1","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"MAX_SESSION_PER_USER","EnvType":"int","EnvValue":"5","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"MODULE_METADATA_API_URL","EnvType":"string","EnvValue":"https://api.devtron.ai/module?name=%s","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"MODULE_STATUS_HANDLING_CRON_DURATION_MIN","EnvType":"int","EnvValue":"3","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"NATS_MSG_ACK_WAIT_IN_SECS","EnvType":"int","EnvValue":"120","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"NATS_MSG_BUFFER_SIZE","EnvType":"int","EnvValue":"-1","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"NATS_MSG_MAX_AGE","EnvType":"int","EnvValue":"86400","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"NATS_MSG_PROCESSING_BATCH_SIZE","EnvType":"int","EnvValue":"1","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"NATS_MSG_REPLICAS","EnvType":"int","EnvValue":"0","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"NOTIFICATION_MEDIUM","EnvType":"NotificationMedium","EnvValue":"rest","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"OTEL_COLLECTOR_URL","EnvType":"string","EnvValue":"","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"PARALLELISM_LIMIT_FOR_TAG_PROCESSING","EnvType":"int","EnvValue":"","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"PG_EXPORT_PROM_METRICS","EnvType":"bool","EnvValue":"true","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"PG_LOG_ALL_FAILURE_QUERIES","EnvType":"bool","EnvValue":"true","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"PG_LOG_ALL_QUERY","EnvType":"bool","EnvValue":"false","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"PG_LOG_SLOW_QUERY","EnvType":"bool","EnvValue":"true","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"PG_QUERY_DUR_THRESHOLD","EnvType":"int64","EnvValue":"5000","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"PLUGIN_NAME","EnvType":"string","EnvValue":"Pull images from container repository","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"PROPAGATE_EXTRA_LABELS","EnvType":"bool","EnvValue":"false","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"PROXY_SERVICE_CONFIG","EnvType":"string","EnvValue":"{}","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"REQ_CI_CPU","EnvType":"string","EnvValue":"0.5","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"REQ_CI_MEM","EnvType":"string","EnvValue":"3G","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"RESTRICT_TERMINAL_ACCESS_FOR_NON_SUPER_USER","EnvType":"bool","EnvValue":"false","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"RUNTIME_CONFIG_LOCAL_DEV","EnvType":"LocalDevMode","EnvValue":"true","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"SCOPED_VARIABLE_ENABLED","EnvType":"bool","EnvValue":"false","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"SCOPED_VARIABLE_FORMAT","EnvType":"string","EnvValue":"@{{%s}}","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"SCOPED_VARIABLE_HANDLE_PRIMITIVES","EnvType":"bool","EnvValue":"false","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"SCOPED_VARIABLE_NAME_REGEX","EnvType":"string","EnvValue":"^[a-zA-Z][a-zA-Z0-9_-]{0,62}[a-zA-Z0-9]$","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"SOCKET_DISCONNECT_DELAY_SECONDS","EnvType":"int","EnvValue":"5","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"SOCKET_HEARTBEAT_SECONDS","EnvType":"int","EnvValue":"25","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"STREAM_CONFIG_JSON","EnvType":"string","EnvValue":"","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"SYSTEM_VAR_PREFIX","EnvType":"string","EnvValue":"DEVTRON_","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"TERMINAL_POD_DEFAULT_NAMESPACE","EnvType":"string","EnvValue":"default","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"TERMINAL_POD_INACTIVE_DURATION_IN_MINS","EnvType":"int","EnvValue":"10","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"TERMINAL_POD_STATUS_SYNC_In_SECS","EnvType":"int","EnvValue":"600","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"TEST_APP","EnvType":"string","EnvValue":"orchestrator","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"TEST_PG_ADDR","EnvType":"string","EnvValue":"127.0.0.1","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"TEST_PG_DATABASE","EnvType":"string","EnvValue":"orchestrator","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"TEST_PG_LOG_QUERY","EnvType":"bool","EnvValue":"true","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"TEST_PG_PASSWORD","EnvType":"string","EnvValue":"postgrespw","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"TEST_PG_PORT","EnvType":"string","EnvValue":"55000","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"TEST_PG_USER","EnvType":"string","EnvValue":"postgres","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"TIMEOUT_FOR_FAILED_CI_BUILD","EnvType":"string","EnvValue":"15","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"TIMEOUT_IN_SECONDS","EnvType":"int","EnvValue":"5","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"USER_SESSION_DURATION_SECONDS","EnvType":"int","EnvValue":"86400","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"USE_ARTIFACT_LISTING_API_V2","EnvType":"bool","EnvValue":"true","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"USE_CUSTOM_HTTP_TRANSPORT","EnvType":"bool","EnvValue":"false","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"USE_GIT_CLI","EnvType":"bool","EnvValue":"false","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"USE_RBAC_CREATION_V2","EnvType":"bool","EnvValue":"true","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"VARIABLE_CACHE_ENABLED","EnvType":"bool","EnvValue":"true","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"VARIABLE_EXPRESSION_REGEX","EnvType":"string","EnvValue":"@{{([^}]+)}}","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"WEBHOOK_TOKEN","EnvType":"string","EnvValue":"","EnvDescription":"","Example":"","Deprecated":"false"}]},{"Category":"GITOPS","Fields":[{"Env":"ACD_CM","EnvType":"string","EnvValue":"argocd-cm","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"ACD_NAMESPACE","EnvType":"string","EnvValue":"devtroncd","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"ACD_PASSWORD","EnvType":"string","EnvValue":"","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"ACD_USERNAME","EnvType":"string","EnvValue":"admin","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"GITOPS_SECRET_NAME","EnvType":"string","EnvValue":"devtron-gitops-secret","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"RESOURCE_LIST_FOR_REPLICAS","EnvType":"string","EnvValue":"Deployment,Rollout,StatefulSet,ReplicaSet","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"RESOURCE_LIST_FOR_REPLICAS_BATCH_SIZE","EnvType":"int","EnvValue":"5","EnvDescription":"","Example":"","Deprecated":"false"}]},{"Category":"INFRA_SETUP","Fields":[{"Env":"DASHBOARD_HOST","EnvType":"string","EnvValue":"localhost","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"DASHBOARD_NAMESPACE","EnvType":"string","EnvValue":"devtroncd","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"DASHBOARD_PORT","EnvType":"string","EnvValue":"3000","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"DEX_HOST","EnvType":"string","EnvValue":"http://localhost","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"DEX_PORT","EnvType":"string","EnvValue":"5556","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"GIT_SENSOR_PROTOCOL","EnvType":"string","EnvValue":"REST","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"GIT_SENSOR_TIMEOUT","EnvType":"int","EnvValue":"0","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"GIT_SENSOR_URL","EnvType":"string","EnvValue":"127.0.0.1:7070","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"HELM_CLIENT_URL","EnvType":"string","EnvValue":"127.0.0.1:50051","EnvDescription":"","Example":"","Deprecated":"false"}]},{"Category":"POSTGRES","Fields":[{"Env":"APP","EnvType":"string","EnvValue":"orchestrator","EnvDescription":"Application name","Example":"","Deprecated":"false"},{"Env":"CASBIN_DATABASE","EnvType":"string","EnvValue":"casbin","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"PG_ADDR","EnvType":"string","EnvValue":"127.0.0.1","EnvDescription":"address of postgres service","Example":"postgresql-postgresql.devtroncd","Deprecated":"false"},{"Env":"PG_DATABASE","EnvType":"string","EnvValue":"orchestrator","EnvDescription":"postgres database to be made connection with","Example":"orchestrator, casbin, git_sensor, lens","Deprecated":"false"},{"Env":"PG_PASSWORD","EnvType":"string","EnvValue":"{password}","EnvDescription":"password for postgres, associated with PG_USER","Example":"confidential ;)","Deprecated":"false"},{"Env":"PG_PORT","EnvType":"string","EnvValue":"5432","EnvDescription":"port of postgresql service","Example":"5432","Deprecated":"false"},{"Env":"PG_READ_TIMEOUT","EnvType":"int64","EnvValue":"30","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"PG_USER","EnvType":"string","EnvValue":"postgres","EnvDescription":"user for postgres","Example":"postgres","Deprecated":"false"},{"Env":"PG_WRITE_TIMEOUT","EnvType":"int64","EnvValue":"30","EnvDescription":"","Example":"","Deprecated":"false"}]},{"Category":"RBAC","Fields":[{"Env":"ENFORCER_CACHE","EnvType":"bool","EnvValue":"false","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"ENFORCER_CACHE_EXPIRATION_IN_SEC","EnvType":"int","EnvValue":"86400","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"ENFORCER_MAX_BATCH_SIZE","EnvType":"int","EnvValue":"1","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"USE_CASBIN_V2","EnvType":"bool","EnvValue":"true","EnvDescription":"","Example":"","Deprecated":"false"}]}] \ No newline at end of file diff --git a/env_gen.md b/env_gen.md index 8e7461ef9b..454ac52c8d 100644 --- a/env_gen.md +++ b/env_gen.md @@ -11,11 +11,17 @@ | DEVTRON_CHART_ARGO_CD_INSTALL_REQUEST_TIMEOUT | int |1 | | | false | | DEVTRON_CHART_INSTALL_REQUEST_TIMEOUT | int |6 | | | false | | EXPOSE_CD_METRICS | bool |false | | | false | + | FEATURE_MIGRATE_ARGOCD_APPLICATION_ENABLE | bool |false | enable migration of external argocd application to devtron pipeline | | false | | HELM_PIPELINE_STATUS_CHECK_ELIGIBLE_TIME | string |120 | | | false | + | IS_INTERNAL_USE | bool |true | | | false | + | MIGRATE_DEPLOYMENT_CONFIG_DATA | bool |false | migrate deployment config data from charts table to deployment_config table | | false | | PIPELINE_DEGRADED_TIME | string |10 | | | false | | REVISION_HISTORY_LIMIT_DEVTRON_APP | int |1 | | | false | | REVISION_HISTORY_LIMIT_EXTERNAL_HELM_APP | int |0 | | | false | | REVISION_HISTORY_LIMIT_HELM_APP | int |1 | | | false | + | RUN_HELM_INSTALL_IN_ASYNC_MODE_HELM_APPS | bool |false | | | false | + | SHOULD_CHECK_NAMESPACE_ON_CLONE | bool |false | should we check if namespace exists or not while cloning app | | false | + | USE_DEPLOYMENT_CONFIG_DATA | bool |false | use deployment config data from deployment_config table | | true | ## CI_RUNNER Related Environment Variables @@ -186,7 +192,6 @@ | INSTALLER_CRD_OBJECT_GROUP_NAME | string |installer.devtron.ai | | | false | | INSTALLER_CRD_OBJECT_RESOURCE | string |installers | | | false | | INSTALLER_CRD_OBJECT_VERSION | string |v1alpha1 | | | false | - | IS_INTERNAL_USE | bool |true | | | false | | JwtExpirationTime | int |120 | | | false | | K8s_CLIENT_MAX_IDLE_CONNS_PER_HOST | int |25 | | | false | | K8s_TCP_IDLE_CONN_TIMEOUT | int |300 | | | false | @@ -224,12 +229,10 @@ | REQ_CI_MEM | string |3G | | | false | | RESTRICT_TERMINAL_ACCESS_FOR_NON_SUPER_USER | bool |false | | | false | | RUNTIME_CONFIG_LOCAL_DEV | LocalDevMode |true | | | false | - | RUN_HELM_INSTALL_IN_ASYNC_MODE_HELM_APPS | bool |false | | | false | | SCOPED_VARIABLE_ENABLED | bool |false | | | false | | SCOPED_VARIABLE_FORMAT | string |@{{%s}} | | | false | | SCOPED_VARIABLE_HANDLE_PRIMITIVES | bool |false | | | false | | SCOPED_VARIABLE_NAME_REGEX | string |^[a-zA-Z][a-zA-Z0-9_-]{0,62}[a-zA-Z0-9]$ | | | false | - | SHOULD_CHECK_NAMESPACE_ON_CLONE | bool |false | should we check if namespace exists or not while cloning app | | false | | SOCKET_DISCONNECT_DELAY_SECONDS | int |5 | | | false | | SOCKET_HEARTBEAT_SECONDS | int |25 | | | false | | STREAM_CONFIG_JSON | string | | | | false | @@ -249,7 +252,6 @@ | USER_SESSION_DURATION_SECONDS | int |86400 | | | false | | USE_ARTIFACT_LISTING_API_V2 | bool |true | | | false | | USE_CUSTOM_HTTP_TRANSPORT | bool |false | | | false | - | USE_DEPLOYMENT_CONFIG_DATA | bool |false | | | false | | USE_GIT_CLI | bool |false | | | false | | USE_RBAC_CREATION_V2 | bool |true | | | false | | VARIABLE_CACHE_ENABLED | bool |true | | | false | diff --git a/go.mod b/go.mod index 643ca95118..d95f16d546 100644 --- a/go.mod +++ b/go.mod @@ -1,16 +1,16 @@ module github.com/devtron-labs/devtron -go 1.21 +go 1.21.0 toolchain go1.21.8 require ( github.com/Masterminds/semver v1.5.0 github.com/Pallinder/go-randomdata v1.2.0 - github.com/argoproj/argo-cd/v2 v2.9.21 - github.com/argoproj/argo-workflows/v3 v3.5.10 - github.com/argoproj/gitops-engine v0.7.1-0.20240718175351-6b2984ebc470 - github.com/aws/aws-sdk-go v1.44.317 + github.com/argoproj/argo-cd/v2 v2.12.10 + github.com/argoproj/argo-workflows/v3 v3.5.13 + github.com/argoproj/gitops-engine v0.7.1-0.20250129155113-faf5a4e5c37d + github.com/aws/aws-sdk-go v1.50.8 github.com/caarlos0/env v3.5.0+incompatible github.com/caarlos0/env/v6 v6.7.2 github.com/casbin/casbin v1.9.1 @@ -24,7 +24,7 @@ require ( github.com/devtron-labs/common-lib v0.18.1-0.20241001061923-eda545dc839e github.com/devtron-labs/go-bitbucket v0.9.60-beta github.com/devtron-labs/protos v0.0.3-0.20240802105333-92ee9bb85d80 - github.com/evanphx/json-patch v5.7.0+incompatible + github.com/evanphx/json-patch v5.9.0+incompatible github.com/gammazero/workerpool v1.1.3 github.com/ghodss/yaml v1.0.1-0.20190212211648-25d852aebe32 github.com/go-git/go-billy/v5 v5.6.2 @@ -56,7 +56,7 @@ require ( github.com/patrickmn/go-cache v2.1.0+incompatible github.com/pkg/errors v0.9.1 github.com/posthog/posthog-go v0.0.0-20210610161230-cd4408afb35a - github.com/prometheus/client_golang v1.16.0 + github.com/prometheus/client_golang v1.18.0 github.com/robfig/cron/v3 v3.0.1 github.com/satori/go.uuid v1.2.0 github.com/stretchr/testify v1.10.0 @@ -67,12 +67,12 @@ require ( github.com/yannh/kubeconform v0.5.0 github.com/zclconf/go-cty v1.13.2 go.opentelemetry.io/contrib/instrumentation/github.com/gorilla/mux/otelmux v0.44.0 - go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.0 - go.opentelemetry.io/otel v1.20.0 - go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.20.0 - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.20.0 - go.opentelemetry.io/otel/sdk v1.20.0 - go.opentelemetry.io/otel/trace v1.20.0 + go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.1 + go.opentelemetry.io/otel v1.21.0 + go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.21.0 + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.21.0 + go.opentelemetry.io/otel/sdk v1.21.0 + go.opentelemetry.io/otel/trace v1.21.0 go.uber.org/zap v1.21.0 golang.org/x/crypto v0.32.0 golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 @@ -167,8 +167,8 @@ require ( github.com/google/gofuzz v1.2.0 // indirect github.com/google/s2a-go v0.1.7 // indirect github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect - github.com/googleapis/enterprise-certificate-proxy v0.2.3 // indirect - github.com/googleapis/gax-go/v2 v2.11.0 // indirect + github.com/googleapis/enterprise-certificate-proxy v0.2.5 // indirect + github.com/googleapis/gax-go/v2 v2.12.0 // indirect github.com/gorilla/securecookie v1.1.1 // indirect github.com/gorilla/websocket v1.5.0 // indirect github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 // indirect @@ -195,7 +195,6 @@ require ( github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de // indirect github.com/mailru/easyjson v0.7.7 // indirect github.com/mattn/go-ieproxy v0.0.1 // indirect - github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect github.com/minio/highwayhash v1.0.2 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/go-wordwrap v1.0.1 // indirect @@ -217,9 +216,9 @@ require ( github.com/peterbourgon/diskv v2.0.1+incompatible // indirect github.com/pjbgf/sha1cd v0.3.2 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect - github.com/prometheus/client_model v0.4.1-0.20230718164431-9a2bf3000d16 // indirect - github.com/prometheus/common v0.44.0 // indirect - github.com/prometheus/procfs v0.11.1 // indirect + github.com/prometheus/client_model v0.5.0 // indirect + github.com/prometheus/common v0.45.0 // indirect + github.com/prometheus/procfs v0.12.0 // indirect github.com/redis/go-redis/v9 v9.0.5 // indirect github.com/russross/blackfriday/v2 v2.1.0 // indirect github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3 // indirect @@ -242,7 +241,7 @@ require ( github.com/xtgo/uuid v0.0.0-20140804021211-a0b114877d4c // indirect go.opencensus.io v0.24.0 // indirect go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.45.0 // indirect - go.opentelemetry.io/otel/metric v1.20.0 // indirect + go.opentelemetry.io/otel/metric v1.21.0 // indirect go.opentelemetry.io/proto/otlp v1.0.0 // indirect go.starlark.net v0.0.0-20230525235612-a134d8f9ddca // indirect go.uber.org/atomic v1.10.0 // indirect @@ -255,7 +254,7 @@ require ( golang.org/x/time v0.5.0 // indirect golang.org/x/tools v0.23.0 // indirect golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 // indirect - google.golang.org/api v0.126.0 // indirect + google.golang.org/api v0.132.0 // indirect google.golang.org/appengine v1.6.8 // indirect google.golang.org/genproto v0.0.0-20230822172742-b8732ec3820d // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d // indirect @@ -284,10 +283,14 @@ require ( require github.com/docker/distribution v2.8.2+incompatible -require gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect +require ( + github.com/dlclark/regexp2 v1.11.2 // indirect + github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 // indirect + gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect +) replace ( - github.com/argoproj/argo-workflows/v3 v3.5.10 => github.com/devtron-labs/argo-workflows/v3 v3.5.13 + github.com/argoproj/argo-workflows/v3 v3.5.13 => github.com/devtron-labs/argo-workflows/v3 v3.5.13 github.com/devtron-labs/authenticator => github.com/devtron-labs/devtron-services/authenticator v0.0.0-20250228031913-d6390cb9dcd0 github.com/devtron-labs/common-lib => github.com/devtron-labs/devtron-services/common-lib v0.0.0-20250228112747-a1b425d845a3 github.com/go-check/check => github.com/go-check/check v0.0.0-20180628173108-788fd7840127 diff --git a/go.sum b/go.sum index ad745e1508..7639001fa4 100644 --- a/go.sum +++ b/go.sum @@ -687,18 +687,18 @@ github.com/apparentlymart/go-textseg v1.0.0 h1:rRmlIsPEEhUTIKQb7T++Nz/A5Q6C9IuX2 github.com/apparentlymart/go-textseg v1.0.0/go.mod h1:z96Txxhf3xSFMPmb5X/1W05FF/Nj9VFpLOpjS5yuumk= github.com/apparentlymart/go-textseg/v13 v13.0.0 h1:Y+KvPE1NYz0xl601PVImeQfFyEy6iT90AvPUL1NNfNw= github.com/apparentlymart/go-textseg/v13 v13.0.0/go.mod h1:ZK2fH7c4NqDTLtiYLvIkEghdlcqw7yxLeM89kiTRPUo= -github.com/argoproj/argo-cd/v2 v2.9.21 h1:asVFgXfv0pvw7Q9STOhT75sWUU9cIKHRWWrAzJWmRgo= -github.com/argoproj/argo-cd/v2 v2.9.21/go.mod h1:V9EKQR1U5kJV/aLVRgUV46muOStnP6C5c4wTeT6nkoY= -github.com/argoproj/gitops-engine v0.7.1-0.20240718175351-6b2984ebc470 h1:RUo6je4n+FgNEkGsONhwxUtT67YqyEtrvMNd+t8pKSo= -github.com/argoproj/gitops-engine v0.7.1-0.20240718175351-6b2984ebc470/go.mod h1:xMIbuLg9Qj2e0egTy+8NcukbhRaVmWwK9vm3aAQZoi4= +github.com/argoproj/argo-cd/v2 v2.12.10 h1:Qe5cBSnGy0wXAVdMKH69gWZYZ1CwHHxSOdavE4t+sAg= +github.com/argoproj/argo-cd/v2 v2.12.10/go.mod h1:5kppi19e6lVQxlKSd1Cs7LorEZ6rID4nc06YWNHM+rg= +github.com/argoproj/gitops-engine v0.7.1-0.20250129155113-faf5a4e5c37d h1:LrHPuKm4rFfaVzNOqXhuoLNqe7DnhZ3d5pZA+k431Bo= +github.com/argoproj/gitops-engine v0.7.1-0.20250129155113-faf5a4e5c37d/go.mod h1:xMIbuLg9Qj2e0egTy+8NcukbhRaVmWwK9vm3aAQZoi4= github.com/argoproj/pkg v0.13.7-0.20230627120311-a4dd357b057e h1:kuLQvJqwwRMQTheT4MFyKVM8Txncu21CHT4yBWUl1Mk= github.com/argoproj/pkg v0.13.7-0.20230627120311-a4dd357b057e/go.mod h1:xBN5PLx2MoK63dmPfMo/PGBvd77K1Y0m/rzZOe4cs1s= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= github.com/aws/aws-sdk-go v1.44.290/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI= -github.com/aws/aws-sdk-go v1.44.317 h1:+8XWrLmGMwPPXSRSLPzhgcGnzJ2mYkgkrcB9C/GnSOU= -github.com/aws/aws-sdk-go v1.44.317/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI= +github.com/aws/aws-sdk-go v1.50.8 h1:gY0WoOW+/Wz6XmYSgDH9ge3wnAevYDSQWPxxJvqAkP4= +github.com/aws/aws-sdk-go v1.50.8/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk= github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8= github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= @@ -805,6 +805,8 @@ github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/r github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc= github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk= github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= +github.com/dlclark/regexp2 v1.11.2 h1:/u628IuisSTwri5/UKloiIsH8+qF2Pu7xEQX+yIKg68= +github.com/dlclark/regexp2 v1.11.2/go.mod h1:DHkYz0B9wPfa6wondMfaivmHpzrQ3v9q8cnmRbL6yW8= github.com/docker/cli v24.0.6+incompatible h1:fF+XCQCgJjjQNIMjzaSmiKJSCcfcXb3TWTcc7GAneOY= github.com/docker/cli v24.0.6+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= github.com/docker/distribution v2.8.2+incompatible h1:T3de5rq0dB1j30rp0sA2rER+m322EBzniBPB6ZIzuh8= @@ -840,8 +842,8 @@ github.com/envoyproxy/protoc-gen-validate v0.10.0/go.mod h1:DRjgyB0I43LtJapqN6Ni github.com/envoyproxy/protoc-gen-validate v1.0.2 h1:QkIBuU5k+x7/QXPvPPnWXWlCdaBFApVqftFV6k087DA= github.com/envoyproxy/protoc-gen-validate v1.0.2/go.mod h1:GpiZQP3dDbg4JouG/NNS7QWXpgx6x8QiMKdmN72jogE= github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= -github.com/evanphx/json-patch v5.7.0+incompatible h1:vgGkfT/9f8zE6tvSCe74nfpAVDQ2tG6yudJd8LBksgI= -github.com/evanphx/json-patch v5.7.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/evanphx/json-patch v5.9.0+incompatible h1:fBXyNpNMuTTDdquAq/uisOr2lShz4oaXpDTX2bLe7ls= +github.com/evanphx/json-patch v5.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d h1:105gxyaGwCFad8crR9dcMQWvV9Hvulu6hwUh4tWPJnM= github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d/go.mod h1:ZZMPRZwes7CROmyNKgQzC3XPs6L/G2EJLHddWejkmf4= github.com/fatih/camelcase v1.0.0 h1:hxNvNX/xYBp0ovncs8WyWZrOrpBNub/JfaMvbURyft8= @@ -1086,8 +1088,9 @@ github.com/googleapis/enterprise-certificate-proxy v0.0.0-20220520183353-fd19c99 github.com/googleapis/enterprise-certificate-proxy v0.1.0/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8= github.com/googleapis/enterprise-certificate-proxy v0.2.0/go.mod h1:8C0jb7/mgJe/9KK8Lm7X9ctZC2t60YyIpYEI16jx0Qg= github.com/googleapis/enterprise-certificate-proxy v0.2.1/go.mod h1:AwSRAtLfXpU5Nm3pW+v7rGDHp09LsPtGY9MduiEsR9k= -github.com/googleapis/enterprise-certificate-proxy v0.2.3 h1:yk9/cqRKtT9wXZSsRH9aurXEpJX+U6FLtpYTdC3R06k= github.com/googleapis/enterprise-certificate-proxy v0.2.3/go.mod h1:AwSRAtLfXpU5Nm3pW+v7rGDHp09LsPtGY9MduiEsR9k= +github.com/googleapis/enterprise-certificate-proxy v0.2.5 h1:UR4rDjcgpgEnqpIEvkiqTYKBCKLNmlge2eVjoZfySzM= +github.com/googleapis/enterprise-certificate-proxy v0.2.5/go.mod h1:RxW0N9901Cko1VOCW3SXCpWP+mlIEkk2tP7jnHy9a3w= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0= @@ -1101,8 +1104,9 @@ github.com/googleapis/gax-go/v2 v2.7.0/go.mod h1:TEop28CZZQ2y+c0VxMUmu1lV+fQx57Q github.com/googleapis/gax-go/v2 v2.7.1/go.mod h1:4orTrqY6hXxxaUL4LHIPl6lGo8vAE38/qKbhSAKP6QI= github.com/googleapis/gax-go/v2 v2.8.0/go.mod h1:4orTrqY6hXxxaUL4LHIPl6lGo8vAE38/qKbhSAKP6QI= github.com/googleapis/gax-go/v2 v2.10.0/go.mod h1:4UOEnMCrxsSqQ940WnTiD6qJ63le2ev3xfyagutxiPw= -github.com/googleapis/gax-go/v2 v2.11.0 h1:9V9PWXEsWnPpQhu/PeQIkS4eGzMlTLGgt80cUUI8Ki4= github.com/googleapis/gax-go/v2 v2.11.0/go.mod h1:DxmR61SGKkGLa2xigwuZIQpkCI2S5iydzRfb3peWZJI= +github.com/googleapis/gax-go/v2 v2.12.0 h1:A+gCJKdRfqXkr+BIRGtZLibNXf0m1f9E4HG56etFpas= +github.com/googleapis/gax-go/v2 v2.12.0/go.mod h1:y+aIqrI5eb1YGMVJfuV3185Ts/D7qKpsEkdD5+I6QGU= github.com/googleapis/go-type-adapters v1.0.0/go.mod h1:zHW75FOG2aur7gAO2B+MLby+cLsWGBF62rFAi7WjWO4= github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g= github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= @@ -1288,8 +1292,8 @@ github.com/mattn/go-sqlite3 v1.14.0/go.mod h1:JIl7NbARA7phWnGvh0LKTyg7S9BA+6gx71 github.com/mattn/go-sqlite3 v1.14.14 h1:qZgc/Rwetq+MtyE18WhzjokPD93dNqLGNT3QJuLvBGw= github.com/mattn/go-sqlite3 v1.14.14/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= -github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo= -github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= +github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 h1:jWpvCLoY8Z/e3VKvlsiIGKtc+UG6U5vzxaoagmhXfyg= +github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0/go.mod h1:QUyp042oQthUoa9bqDv0ER0wrtXnBruoNd7aNjkbP+k= github.com/microsoft/azure-devops-go-api/azuredevops v1.0.0-b5 h1:YH424zrwLTlyHSH/GzLMJeu5zhYVZSx5RQxGKm1h96s= github.com/microsoft/azure-devops-go-api/azuredevops v1.0.0-b5/go.mod h1:PoGiBqKSQK1vIfQ+yVaFcGjDySHvym6FM1cNYnwzbrY= github.com/minio/asm2plan9s v0.0.0-20200509001527-cdd76441f9d8/go.mod h1:mC1jAcsrzbxHt8iiaC+zU4b1ylILSosueou12R++wfY= @@ -1424,22 +1428,22 @@ github.com/posthog/posthog-go v0.0.0-20210610161230-cd4408afb35a h1:wm95cDvqeISA github.com/posthog/posthog-go v0.0.0-20210610161230-cd4408afb35a/go.mod h1:oa2sAs9tGai3VldabTV0eWejt/O4/OOD7azP8GaikqU= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs= -github.com/prometheus/client_golang v1.16.0 h1:yk/hx9hDbrGHovbci4BY+pRMfSuuat626eFsHb7tmT8= -github.com/prometheus/client_golang v1.16.0/go.mod h1:Zsulrv/L9oM40tJ7T815tM89lFEugiJ9HzIqaAx4LKc= +github.com/prometheus/client_golang v1.18.0 h1:HzFfmkOzH5Q8L8G+kSJKUx5dtG87sewO+FoDDqP5Tbk= +github.com/prometheus/client_golang v1.18.0/go.mod h1:T+GXkCk5wSJyOqMIzVgvvjFDlkOQntgjkJWKrN5txjA= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.3.0/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w= -github.com/prometheus/client_model v0.4.1-0.20230718164431-9a2bf3000d16 h1:v7DLqVdK4VrYkVD5diGdl4sxJurKJEMnODWRJlxV9oM= -github.com/prometheus/client_model v0.4.1-0.20230718164431-9a2bf3000d16/go.mod h1:oMQmHW1/JoDwqLtg57MGgP/Fb1CJEYF2imWWhWtMkYU= +github.com/prometheus/client_model v0.5.0 h1:VQw1hfvPvk3Uv6Qf29VrPF32JB6rtbgI6cYPYQjL0Qw= +github.com/prometheus/client_model v0.5.0/go.mod h1:dTiFglRmd66nLR9Pv9f0mZi7B7fk5Pm3gvsjB5tr+kI= github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= -github.com/prometheus/common v0.44.0 h1:+5BrQJwiBB9xsMygAB3TNvpQKOwlkc25LbISbrdOOfY= -github.com/prometheus/common v0.44.0/go.mod h1:ofAIvZbQ1e/nugmZGz4/qCb9Ap1VoSTIO7x0VV9VvuY= +github.com/prometheus/common v0.45.0 h1:2BGz0eBc2hdMDLnO/8n0jeB3oPrt2D08CekT0lneoxM= +github.com/prometheus/common v0.45.0/go.mod h1:YJmSTw9BoKxJplESWWxlbyttQR4uaEcGyv9MZjVOJsY= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/prometheus/procfs v0.11.1 h1:xRC8Iq1yyca5ypa9n1EZnWZkt7dwcoRPQwX/5gwaUuI= -github.com/prometheus/procfs v0.11.1/go.mod h1:eesXgaPo1q7lBpVMoMy0ZOFTth9hBn4W/y0/p/ScXhY= +github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo= +github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo= github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= github.com/redis/go-redis/v9 v9.0.0-rc.4/go.mod h1:Vo3EsyWnicKnSKCA7HhgnvnyA74wOA69Cd2Meli5mmA= github.com/redis/go-redis/v9 v9.0.5 h1:CuQcn5HIEeK7BgElubPP8CGtE0KakrnbBSTLjathl5o= @@ -1574,22 +1578,22 @@ go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= go.opentelemetry.io/contrib/instrumentation/github.com/gorilla/mux/otelmux v0.44.0 h1:QaNUlLvmettd1vnmFHrgBYQHearxWP3uO4h4F3pVtkM= go.opentelemetry.io/contrib/instrumentation/github.com/gorilla/mux/otelmux v0.44.0/go.mod h1:cJu+5jZwoZfkBOECSFtBZK/O7h/pY5djn0fwnIGnQ4A= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.0 h1:PzIubN4/sjByhDRHLviCjJuweBXWFZWhghjg7cS28+M= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.0/go.mod h1:Ct6zzQEuGK3WpJs2n4dn+wfJYzd/+hNnxMRTWjGn30M= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.1 h1:SpGay3w+nEwMpfVnbqOLH5gY52/foP8RE8UzTZ1pdSE= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.1/go.mod h1:4UoMYEZOC0yN/sPGH76KPkkU7zgiEWYWL9vwmbnTJPE= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.45.0 h1:x8Z78aZx8cOF0+Kkazoc7lwUNMGy0LrzEMxTm4BbTxg= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.45.0/go.mod h1:62CPTSry9QZtOaSsE3tOzhx6LzDhHnXJ6xHeMNNiM6Q= -go.opentelemetry.io/otel v1.20.0 h1:vsb/ggIY+hUjD/zCAQHpzTmndPqv/ml2ArbsbfBYTAc= -go.opentelemetry.io/otel v1.20.0/go.mod h1:oUIGj3D77RwJdM6PPZImDpSZGDvkD9fhesHny69JFrs= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.20.0 h1:DeFD0VgTZ+Cj6hxravYYZE2W4GlneVH81iAOPjZkzk8= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.20.0/go.mod h1:GijYcYmNpX1KazD5JmWGsi4P7dDTTTnfv1UbGn84MnU= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.20.0 h1:gvmNvqrPYovvyRmCSygkUDyL8lC5Tl845MLEwqpxhEU= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.20.0/go.mod h1:vNUq47TGFioo+ffTSnKNdob241vePmtNZnAODKapKd0= -go.opentelemetry.io/otel/metric v1.20.0 h1:ZlrO8Hu9+GAhnepmRGhSU7/VkpjrNowxRN9GyKR4wzA= -go.opentelemetry.io/otel/metric v1.20.0/go.mod h1:90DRw3nfK4D7Sm/75yQ00gTJxtkBxX+wu6YaNymbpVM= -go.opentelemetry.io/otel/sdk v1.20.0 h1:5Jf6imeFZlZtKv9Qbo6qt2ZkmWtdWx/wzcCbNUlAWGM= -go.opentelemetry.io/otel/sdk v1.20.0/go.mod h1:rmkSx1cZCm/tn16iWDn1GQbLtsW/LvsdEEFzCSRM6V0= -go.opentelemetry.io/otel/trace v1.20.0 h1:+yxVAPZPbQhbC3OfAkeIVTky6iTFpcr4SiY9om7mXSQ= -go.opentelemetry.io/otel/trace v1.20.0/go.mod h1:HJSK7F/hA5RlzpZ0zKDCHCDHm556LCDtKaAo6JmBFUU= +go.opentelemetry.io/otel v1.21.0 h1:hzLeKBZEL7Okw2mGzZ0cc4k/A7Fta0uoPgaJCr8fsFc= +go.opentelemetry.io/otel v1.21.0/go.mod h1:QZzNPQPm1zLX4gZK4cMi+71eaorMSGT3A4znnUvNNEo= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.21.0 h1:cl5P5/GIfFh4t6xyruOgJP5QiA1pw4fYYdv6nc6CBWw= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.21.0/go.mod h1:zgBdWWAu7oEEMC06MMKc5NLbA/1YDXV1sMpSqEeLQLg= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.21.0 h1:tIqheXEFWAZ7O8A7m+J0aPTmpJN3YQ7qetUAdkkkKpk= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.21.0/go.mod h1:nUeKExfxAQVbiVFn32YXpXZZHZ61Cc3s3Rn1pDBGAb0= +go.opentelemetry.io/otel/metric v1.21.0 h1:tlYWfeo+Bocx5kLEloTjbcDwBuELRrIFxwdQ36PlJu4= +go.opentelemetry.io/otel/metric v1.21.0/go.mod h1:o1p3CA8nNHW8j5yuQLdc1eeqEaPfzug24uvsyIEJRWM= +go.opentelemetry.io/otel/sdk v1.21.0 h1:FTt8qirL1EysG6sTQRZ5TokkU8d0ugCj8htOgThZXQ8= +go.opentelemetry.io/otel/sdk v1.21.0/go.mod h1:Nna6Yv7PWTdgJHVRD9hIYywQBRx7pbox6nwBnZIxl/E= +go.opentelemetry.io/otel/trace v1.21.0 h1:WD9i5gzvoUPuXIXH24ZNBudiarZDKuekPqi/E8fpfLc= +go.opentelemetry.io/otel/trace v1.21.0/go.mod h1:LGbsEB0f9LGjN+OZaQQ26sohbOmiMR+BaslueVtS/qQ= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= go.opentelemetry.io/proto/otlp v0.15.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= go.opentelemetry.io/proto/otlp v0.19.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= @@ -2170,8 +2174,9 @@ google.golang.org/api v0.114.0/go.mod h1:ifYI2ZsFK6/uGddGfAD5BMxlnkBqCmqHSDUVi45 google.golang.org/api v0.118.0/go.mod h1:76TtD3vkgmZ66zZzp72bUUklpmQmKlhh6sYtIjYK+5E= google.golang.org/api v0.122.0/go.mod h1:gcitW0lvnyWjSp9nKxAbdHKIZ6vF4aajGueeslZOyms= google.golang.org/api v0.124.0/go.mod h1:xu2HQurE5gi/3t1aFCvhPD781p0a3p11sdunTJ2BlP4= -google.golang.org/api v0.126.0 h1:q4GJq+cAdMAC7XP7njvQ4tvohGLiSlytuL4BQxbIZ+o= google.golang.org/api v0.126.0/go.mod h1:mBwVAtz+87bEN6CbA1GtZPDOqY2R5ONPqJeIlvyo4Aw= +google.golang.org/api v0.132.0 h1:8t2/+qZ26kAOGSmOiHwVycqVaDg7q3JDILrNi/Z6rvc= +google.golang.org/api v0.132.0/go.mod h1:AeTBC6GpJnJSRJjktDcPX0QwtS8pGYZOV6MSuSCusw0= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= diff --git a/internal/constants/InternalErrorCode.go b/internal/constants/InternalErrorCode.go index 85bc18ac9c..211392eb6a 100644 --- a/internal/constants/InternalErrorCode.go +++ b/internal/constants/InternalErrorCode.go @@ -18,15 +18,15 @@ package constants import "fmt" -/** - Cluster - 1000-1999 - Environment - 2000-2999 - Global Config - 3000-3999 - Pipeline Config - 4000-4999 - Pipeline - 5000-5999 - User - 6000-6999 - Other - 7000-7999 -*/ +// Error Codes Sequence + +// Cluster - 1000-1999 +// Environment - 2000-2999 +// Global Config - 3000-3999 +// Application - 4000-4999 +// Pipeline - 5000-5999 +// User - 6000-6999 +// Other - 7000-7999 type ErrorCode struct { Code string @@ -38,7 +38,9 @@ func (code ErrorCode) UserMessage(params ...interface{}) string { } const ( - //Cluster Errors + // Cluster Errors Start ------------------ + // Sequence 1000-1999 + ClusterCreateDBFailed string = "1001" ClusterCreateACDFailed string = "1002" ClusterDBRollbackFailed string = "1003" @@ -47,17 +49,33 @@ const ( ClusterCreateBadRequestACD string = "1006" ClusterUpdateBadRequestACD string = "1007" - //Environment Errors + // Cluster Errors End -------------------- +) + +const ( + // Environment Errors Start -------------- + // Sequence 2000-2999 + EnvironmentCreateDBFailed string = "2001" EnvironmentUpdateDBFailed string = "2002" EnvironmentUpdateEnvOverrideFailed string = "2003" - //Global Config Errors Constants; use 3000 + // Environment Errors End ---------------- +) + +const ( + // Global Config Errors Start ------------ + // Sequence 3000-3999 + + // Docker Registry Errors + DockerRegCreateFailedInDb string = "3001" DockerRegCreateFailedInGocd string = "3002" DockerRegUpdateFailedInDb string = "3003" DockerRegUpdateFailedInGocd string = "3004" + // Git Provider Errors + GitProviderCreateFailedAlreadyExists string = "3005" GitProviderCreateFailedInDb string = "3006" GitProviderUpdateProviderNotExists string = "3007" @@ -66,12 +84,43 @@ const ( DockerRegDeleteFailedInGocd string = "3010" GitProviderUpdateFailedInSync string = "3011" GitProviderUpdateRequestIsInvalid string = "3012" - // For conflicts use 900 series + + // --------------------------------------- + + // For Global Config conflicts use 3900 series + GitOpsConfigValidationConflict string = "3900" + // Global Config Errors End -------------- +) + +const ( +// Application Errors Start -------------- +// Sequence 4000-4999 + +// Application Errors End ---------------- +) + +const ( + // Pipeline Errors Start ----------------- + // Sequence 5000-5999 + ChartCreatedAlreadyExists string = "5001" ChartNameAlreadyReserved string = "5002" + // Pipeline Config GitOps Config Errors + // Sequence 5100-5199 + + InvalidGitOpsRepoUrlForPipeline string = "5101" + InvalidDeploymentAppTypeForPipeline string = "5102" + + // Pipeline Errors End ------------------- +) + +const ( + // User Errors Start --------------------- + // Sequence 6000-6999 + UserCreateDBFailed string = "6001" UserCreatePolicyFailed string = "6002" UserUpdateDBFailed string = "6003" @@ -81,6 +130,13 @@ const ( UserCreateFetchRoleFailed string = "6007" UserUpdateFetchRoleFailed string = "6008" + // User Errors End ----------------------- +) + +const ( + // Other Errors Start -------------------- + // Sequence 7000-7999 + AppDetailResourceTreeNotFound string = "7000" HelmReleaseNotFound string = "7001" @@ -89,9 +145,41 @@ const ( GitHostCreateFailedAlreadyExists string = "9001" GitHostCreateFailedInDb string = "9002" - // feasibility errors - OperationPerformError string = "10001" - VulnerabilityFound string = "10002" + // Other Errors End ---------------------- +) + +const ( + // Feasibility Errors Start -------------- + // Sequence 10000-10999 + + VulnerabilityFound string = "10001" + ApprovalNodeFail string = "10002" + FilteringConditionFail string = "10003" + DeploymentWindowFail string = "10004" + PreCDDoesNotExists string = "10005" + PostCDDoesNotExists string = "10006" + ArtifactNotAvailable string = "10007" + DeploymentWindowByPassed string = "10008" + MandatoryPluginNotAdded string = "10009" + MandatoryTagNotAdded string = "10010" + SecurityScanFail string = "10011" + ApprovalConfigDependentActionFailure string = "10012" + + // Feasibility Errors End ---------------- +) + +const ( + // Not Processed Internal Errors Start --- + // Sequence 11000-11999 + + NotProcessed string = "11001" + NotExecuted string = "11002" + + // Not Processed Internal Errors End ----- +) + +const ( + HttpStatusUnprocessableEntity = "422" ) const ( @@ -107,4 +195,5 @@ const ( UnableToFetchResourceTreeErrMsg = "unable to fetch resource tree" UnableToFetchResourceTreeForAcdErrMsg = "app detail fetched, failed to get resource tree from acd" CannotGetAppWithRefreshErrMsg = "cannot get application with refresh" + NoDataFoundErrMsg = "no data found" ) diff --git a/internal/sql/repository/AppListingRepository.go b/internal/sql/repository/AppListingRepository.go index f8456932b2..026aed924a 100644 --- a/internal/sql/repository/AppListingRepository.go +++ b/internal/sql/repository/AppListingRepository.go @@ -20,17 +20,11 @@ package repository import ( - "context" "encoding/json" "fmt" "github.com/devtron-labs/devtron/api/bean/AppView" - "github.com/devtron-labs/devtron/api/bean/gitOps" "github.com/devtron-labs/devtron/internal/middleware" - appWorkflow2 "github.com/devtron-labs/devtron/internal/sql/repository/appWorkflow" - "github.com/devtron-labs/devtron/internal/sql/repository/deploymentConfig" - "github.com/devtron-labs/devtron/internal/util" repository2 "github.com/devtron-labs/devtron/pkg/cluster/environment/repository" - "go.opentelemetry.io/otel" "strings" "time" @@ -43,10 +37,11 @@ type AppListingRepository interface { FetchJobs(appIds []int, statuses []string, environmentIds []int, sortOrder string) ([]*AppView.JobListingContainer, error) FetchOverviewCiPipelines(jobId int) ([]*AppView.JobListingContainer, error) FetchJobsLastSucceededOn(ciPipelineIDs []int) ([]*AppView.CiPipelineLastSucceededTime, error) - FetchAppDetail(ctx context.Context, appId int, envId int) (AppView.AppDetailContainer, error) - FetchAppTriggerView(appId int) ([]AppView.TriggerView, error) - FetchAppStageStatus(appId int, appType int) ([]AppView.AppStageStatus, error) + + FetchAppStage(appId int, appType int) (AppView.AppStages, error) + GetEnvironmentNameFromPipelineId(pipelineId int) (string, error) + GetDeploymentDetailsByAppIdAndEnvId(appId int, envId int) (AppView.DeploymentDetailContainer, error) // Not in used PrometheusApiByEnvId(id int) (*string, error) @@ -88,9 +83,6 @@ type AppListingRepositoryImpl struct { Logger *zap.SugaredLogger appListingRepositoryQueryBuilder helper.AppListingRepositoryQueryBuilder environmentRepository repository2.EnvironmentRepository - gitOpsRepository GitOpsConfigRepository - appWorkflowRepository appWorkflow2.AppWorkflowRepository - deploymentConfigRepository deploymentConfig.Repository } func NewAppListingRepositoryImpl( @@ -98,20 +90,16 @@ func NewAppListingRepositoryImpl( dbConnection *pg.DB, appListingRepositoryQueryBuilder helper.AppListingRepositoryQueryBuilder, environmentRepository repository2.EnvironmentRepository, - gitOpsRepository GitOpsConfigRepository, appWorkflowRepository appWorkflow2.AppWorkflowRepository, - deploymentConfigRepository deploymentConfig.Repository) *AppListingRepositoryImpl { +) *AppListingRepositoryImpl { return &AppListingRepositoryImpl{ dbConnection: dbConnection, Logger: Logger, appListingRepositoryQueryBuilder: appListingRepositoryQueryBuilder, environmentRepository: environmentRepository, - gitOpsRepository: gitOpsRepository, - appWorkflowRepository: appWorkflowRepository, - deploymentConfigRepository: deploymentConfigRepository, } } -func (impl AppListingRepositoryImpl) FetchJobs(appIds []int, statuses []string, environmentIds []int, sortOrder string) ([]*AppView.JobListingContainer, error) { +func (impl *AppListingRepositoryImpl) FetchJobs(appIds []int, statuses []string, environmentIds []int, sortOrder string) ([]*AppView.JobListingContainer, error) { var jobContainers []*AppView.JobListingContainer if len(appIds) == 0 { return jobContainers, nil @@ -132,7 +120,7 @@ func (impl AppListingRepositoryImpl) FetchJobs(appIds []int, statuses []string, return jobContainers, nil } -func (impl AppListingRepositoryImpl) FetchOverviewCiPipelines(jobId int) ([]*AppView.JobListingContainer, error) { +func (impl *AppListingRepositoryImpl) FetchOverviewCiPipelines(jobId int) ([]*AppView.JobListingContainer, error) { var jobContainers []*AppView.JobListingContainer jobsQuery := impl.appListingRepositoryQueryBuilder.OverviewCiPipelineQuery() impl.Logger.Debugw("basic app detail query: ", jobsQuery) @@ -149,7 +137,7 @@ func (impl AppListingRepositoryImpl) FetchOverviewCiPipelines(jobId int) ([]*App return jobContainers, nil } -func (impl AppListingRepositoryImpl) FetchOverviewAppsByEnvironment(envId, limit, offset int) ([]*AppView.AppEnvironmentContainer, error) { +func (impl *AppListingRepositoryImpl) FetchOverviewAppsByEnvironment(envId, limit, offset int) ([]*AppView.AppEnvironmentContainer, error) { query := ` SELECT a.id as app_id,a.app_name,aps.status as app_status, ld.last_deployed_time, p.id as pipeline_id FROM app a INNER JOIN pipeline p ON p.app_id = a.id and p.deleted = false and p.environment_id = ? @@ -173,7 +161,7 @@ func (impl AppListingRepositoryImpl) FetchOverviewAppsByEnvironment(envId, limit return envContainers, err } -func (impl AppListingRepositoryImpl) FetchLastDeployedImage(appId, envId int) (*LastDeployed, error) { +func (impl *AppListingRepositoryImpl) FetchLastDeployedImage(appId, envId int) (*LastDeployed, error) { var lastDeployed []*LastDeployed // we are adding a case in the query to concatenate the string "(inactive)" to the users' email id when user is inactive query := `select ca.id as ci_artifact_id,ca.image as last_deployed_image, @@ -194,7 +182,7 @@ func (impl AppListingRepositoryImpl) FetchLastDeployedImage(appId, envId int) (* return nil, err } -func (impl AppListingRepositoryImpl) FetchJobsLastSucceededOn(CiPipelineIDs []int) ([]*AppView.CiPipelineLastSucceededTime, error) { +func (impl *AppListingRepositoryImpl) FetchJobsLastSucceededOn(CiPipelineIDs []int) ([]*AppView.CiPipelineLastSucceededTime, error) { var lastSucceededTimeArray []*AppView.CiPipelineLastSucceededTime if len(CiPipelineIDs) == 0 { return lastSucceededTimeArray, nil @@ -209,19 +197,7 @@ func (impl AppListingRepositoryImpl) FetchJobsLastSucceededOn(CiPipelineIDs []in return lastSucceededTimeArray, nil } -func getRequiredAppIdsInSequence(appIds []int) []int { - resIDs := make([]int, 0) - appIdsSet := make(map[int]bool) - for _, appId := range appIds { - if _, ok := appIdsSet[appId]; !ok { - resIDs = append(resIDs, appId) - appIdsSet[appId] = true - } - } - return resIDs -} - -func (impl AppListingRepositoryImpl) FetchAppsByEnvironmentV2(appListingFilter helper.AppListingFilter) ([]*AppView.AppEnvironmentContainer, int, error) { +func (impl *AppListingRepositoryImpl) FetchAppsByEnvironmentV2(appListingFilter helper.AppListingFilter) ([]*AppView.AppEnvironmentContainer, int, error) { impl.Logger.Debugw("reached at FetchAppsByEnvironment ", "appListingFilter", appListingFilter) var appEnvArr []*AppView.AppEnvironmentContainer appsSize := 0 @@ -339,74 +315,6 @@ func (impl AppListingRepositoryImpl) FetchAppsByEnvironmentV2(appListingFilter h return appEnvArr, appsSize, nil } -func (impl AppListingRepositoryImpl) getEnvironmentNameFromPipelineId(pipelineID int) (string, error) { - var environmentName string - query := "SELECT e.environment_name " + - "FROM pipeline p " + - "JOIN environment e ON p.environment_id = e.id WHERE p.id = ?" - - _, err := impl.dbConnection.Query(&environmentName, query, pipelineID) - if err != nil { - impl.Logger.Errorw("error in finding environment", "err", err, "pipelineID", pipelineID) - return "", err - } - return environmentName, nil -} - -// DeploymentDetailsByAppIdAndEnvId It will return the deployment detail of any cd pipeline which is latest triggered for Environment of any App -func (impl AppListingRepositoryImpl) deploymentDetailsByAppIdAndEnvId(ctx context.Context, appId int, envId int) (AppView.DeploymentDetailContainer, error) { - _, span := otel.Tracer("orchestrator").Start(ctx, "DeploymentDetailsByAppIdAndEnvId") - defer span.End() - var deploymentDetail AppView.DeploymentDetailContainer - query := "SELECT" + - " a.app_name," + - " env.environment_name," + - " env.namespace," + - " env.default," + - " p.deployment_app_type," + - " p.ci_pipeline_id," + - " p.deployment_app_delete_request," + - " pco.id as pco_id," + - " cia.data_source," + - " cia.id as ci_artifact_id," + - " cia.parent_ci_artifact as parent_artifact_id," + - " cl.k8s_version," + - " env.cluster_id," + - " env.is_virtual_environment," + - " cl.cluster_name," + - " p.id as cd_pipeline_id," + - " p.ci_pipeline_id," + - " p.trigger_type" + - " FROM pipeline p" + - " LEFT JOIN pipeline_config_override pco on pco.pipeline_id=p.id" + - " INNER JOIN environment env ON env.id=p.environment_id" + - " INNER JOIN cluster cl on cl.id=env.cluster_id" + - " LEFT JOIN ci_artifact cia on cia.id = pco.ci_artifact_id" + - " INNER JOIN app a ON a.id=p.app_id" + - " WHERE a.app_type = 0 AND a.id=? AND env.id=? AND p.deleted = FALSE AND env.active = TRUE" + - " ORDER BY pco.created_on DESC LIMIT 1;" - _, err := impl.dbConnection.Query(&deploymentDetail, query, appId, envId) - if err != nil { - impl.Logger.Errorw("Exception caught:", "err", err) - return deploymentDetail, err - } - deploymentDetail.EnvironmentId = envId - - dc, err := impl.deploymentConfigRepository.GetByAppIdAndEnvId(appId, envId) - if err != nil && err != pg.ErrNoRows { - impl.Logger.Errorw("error in getting deployment config by appId and envId", "appId", appId, "envId", envId, "err", err) - return deploymentDetail, err - } - if err == pg.ErrNoRows { - deploymentDetail.ReleaseMode = util.PIPELINE_RELEASE_MODE_CREATE - } else { - deploymentDetail.DeploymentAppType = dc.DeploymentAppType - deploymentDetail.ReleaseMode = dc.ReleaseMode - } - - return deploymentDetail, nil -} - func parseMaterialInfo(materialInfo string, source string) (json.RawMessage, error) { defer func() { if r := recover(); r != nil { @@ -462,29 +370,7 @@ func parseMaterialInfo(materialInfo string, source string) (json.RawMessage, err return mInfo, err } -func (impl AppListingRepositoryImpl) FetchAppDetail(ctx context.Context, appId int, envId int) (AppView.AppDetailContainer, error) { - impl.Logger.Debugf("reached at AppListingRepository:") - var appDetailContainer AppView.AppDetailContainer - newCtx, span := otel.Tracer("orchestrator").Start(ctx, "DeploymentDetailsByAppIdAndEnvId") - defer span.End() - // Fetch deployment detail of cd pipeline latest triggered within env of any App. - deploymentDetail, err := impl.deploymentDetailsByAppIdAndEnvId(newCtx, appId, envId) - if err != nil { - impl.Logger.Warn("unable to fetch deployment detail for app") - } - if deploymentDetail.PcoId > 0 { - deploymentDetail.IsPipelineTriggered = true - } - appWfMapping, _ := impl.appWorkflowRepository.FindWFCDMappingByCDPipelineId(deploymentDetail.CdPipelineId) - if appWfMapping.ParentType == appWorkflow2.CDPIPELINE { - parentEnvironmentName, _ := impl.getEnvironmentNameFromPipelineId(appWfMapping.ParentId) - deploymentDetail.ParentEnvironmentName = parentEnvironmentName - } - appDetailContainer.DeploymentDetailContainer = deploymentDetail - return appDetailContainer, nil -} - -func (impl AppListingRepositoryImpl) PrometheusApiByEnvId(id int) (*string, error) { +func (impl *AppListingRepositoryImpl) PrometheusApiByEnvId(id int) (*string, error) { impl.Logger.Debug("reached at PrometheusApiByEnvId:") var prometheusEndpoint string query := "SELECT env.prometheus_endpoint from environment env" + @@ -499,7 +385,7 @@ func (impl AppListingRepositoryImpl) PrometheusApiByEnvId(id int) (*string, erro return &prometheusEndpoint, nil } -func (impl AppListingRepositoryImpl) FetchAppTriggerView(appId int) ([]AppView.TriggerView, error) { +func (impl *AppListingRepositoryImpl) FetchAppTriggerView(appId int) ([]AppView.TriggerView, error) { impl.Logger.Debug("reached at FetchAppTriggerView:") var triggerView []AppView.TriggerView var triggerViewResponse []AppView.TriggerView @@ -566,101 +452,7 @@ func (impl AppListingRepositoryImpl) FetchAppTriggerView(appId int) ([]AppView.T return triggerViewResponse, nil } -func (impl AppListingRepositoryImpl) FetchAppStageStatus(appId int, appType int) ([]AppView.AppStageStatus, error) { - impl.Logger.Debug("reached at AppListingRepository:") - var appStageStatus []AppView.AppStageStatus - - var stages struct { - AppId int `json:"app_id,omitempty"` - CiTemplateId int `json:"ci_template_id,omitempty"` - CiPipelineId int `json:"ci_pipeline_id,omitempty"` - ChartId int `json:"chart_id,omitempty"` - ChartGitRepoUrl string `json:"chart_git_repo_url,omitempty"` - PipelineId int `json:"pipeline_id,omitempty"` - YamlStatus int `json:"yaml_status,omitempty"` - YamlReviewed bool `json:"yaml_reviewed,omitempty"` - DeploymentConfigRepoURL string `json:"deployment_config_repo_url"` - } - - query := "SELECT " + - " app.id as app_id, ct.id as ci_template_id, cp.id as ci_pipeline_id, ch.id as chart_id, ch.git_repo_url as chart_git_repo_url, dc.repo_url as deployment_config_repo_url, " + - " p.id as pipeline_id, ceco.status as yaml_status, ceco.reviewed as yaml_reviewed " + - " FROM app app" + - " LEFT JOIN ci_template ct on ct.app_id=app.id" + - " LEFT JOIN ci_pipeline cp on cp.app_id=app.id" + - " LEFT JOIN charts ch on ch.app_id=app.id" + - " LEFT JOIN deployment_config dc on dc.app_id=app.id" + - " LEFT JOIN pipeline p on p.app_id=app.id" + - " LEFT JOIN chart_env_config_override ceco on ceco.chart_id=ch.id" + - " WHERE app.id=? and app.app_type = ? limit 1;" - - impl.Logger.Debugw("last app stages status query:", "query", query) - - _, err := impl.dbConnection.Query(&stages, query, appId, appType) - if err != nil { - impl.Logger.Errorw("error:", err) - return appStageStatus, err - } - - var isMaterialExists bool - materialQuery := "select exists(select 1 from git_material gm where gm.app_id=? and gm.active is TRUE)" - impl.Logger.Debugw("material stage status query:", "query", query) - - _, err = impl.dbConnection.Query(&isMaterialExists, materialQuery, appId) - if err != nil { - impl.Logger.Errorw("error:", err) - return appStageStatus, err - } - materialExists := 0 - if isMaterialExists { - materialExists = 1 - } - isCustomGitopsRepoUrl := false - model, err := impl.gitOpsRepository.GetGitOpsConfigActive() - if err != nil && err != pg.ErrNoRows { - impl.Logger.Errorw("error while getting GetGitOpsConfigActive", "err", err) - return appStageStatus, err - } - if model != nil && model.Id > 0 && model.AllowCustomRepository { - isCustomGitopsRepoUrl = true - } - if (gitOps.IsGitOpsRepoNotConfigured(stages.ChartGitRepoUrl) && gitOps.IsGitOpsRepoNotConfigured(stages.DeploymentConfigRepoURL)) && stages.CiPipelineId == 0 { - stages.ChartGitRepoUrl = "" - stages.DeploymentConfigRepoURL = "" - } - appStageStatus = append(appStageStatus, impl.makeAppStageStatus(0, "APP", stages.AppId, true), - impl.makeAppStageStatus(1, "MATERIAL", materialExists, true), - impl.makeAppStageStatus(2, "TEMPLATE", stages.CiTemplateId, true), - impl.makeAppStageStatus(3, "CI_PIPELINE", stages.CiPipelineId, true), - impl.makeAppStageStatus(4, "CHART", stages.ChartId, true), - impl.makeAppStageStatus(5, "GITOPS_CONFIG", len(stages.ChartGitRepoUrl)+len(stages.DeploymentConfigRepoURL), isCustomGitopsRepoUrl), - impl.makeAppStageStatus(6, "CD_PIPELINE", stages.PipelineId, true), - impl.makeAppStageChartEnvConfigStatus(7, "CHART_ENV_CONFIG", stages.YamlStatus == 3 && stages.YamlReviewed), - ) - - return appStageStatus, nil -} - -func (impl AppListingRepositoryImpl) makeAppStageChartEnvConfigStatus(stage int, stageName string, status bool) AppView.AppStageStatus { - return AppView.AppStageStatus{Stage: stage, StageName: stageName, Status: status, Required: true} -} - -func (impl AppListingRepositoryImpl) makeAppStageStatus(stage int, stageName string, id int, isRequired bool) AppView.AppStageStatus { - return AppView.AppStageStatus{ - Stage: stage, - StageName: stageName, - Status: func() bool { - if id > 0 { - return true - } else { - return false - } - }(), - Required: isRequired, - } -} - -func (impl AppListingRepositoryImpl) FetchOtherEnvironment(appId int) ([]*AppView.Environment, error) { +func (impl *AppListingRepositoryImpl) FetchOtherEnvironment(appId int) ([]*AppView.Environment, error) { // other environment tab var otherEnvironments []*AppView.Environment //TODO: remove infra metrics from query as it is not being used from here @@ -689,7 +481,7 @@ func (impl AppListingRepositoryImpl) FetchOtherEnvironment(appId int) ([]*AppVie return otherEnvironments, nil } -func (impl AppListingRepositoryImpl) FetchMinDetailOtherEnvironment(appId int) ([]*AppView.Environment, error) { +func (impl *AppListingRepositoryImpl) FetchMinDetailOtherEnvironment(appId int) ([]*AppView.Environment, error) { impl.Logger.Debug("reached at FetchMinDetailOtherEnvironment:") var otherEnvironments []*AppView.Environment //TODO: remove infra metrics from query as it is not being used from here @@ -707,7 +499,7 @@ func (impl AppListingRepositoryImpl) FetchMinDetailOtherEnvironment(appId int) ( return otherEnvironments, nil } -func (impl AppListingRepositoryImpl) DeploymentDetailByArtifactId(ciArtifactId int, envId int) (AppView.DeploymentDetailContainer, error) { +func (impl *AppListingRepositoryImpl) DeploymentDetailByArtifactId(ciArtifactId int, envId int) (AppView.DeploymentDetailContainer, error) { impl.Logger.Debug("reached at AppListingRepository:") var deploymentDetail AppView.DeploymentDetailContainer query := "SELECT env.id AS environment_id, env.environment_name, env.default, pco.created_on as last_deployed_time, pco.updated_by as last_deployed_by_id, a.app_name" + @@ -728,7 +520,7 @@ func (impl AppListingRepositoryImpl) DeploymentDetailByArtifactId(ciArtifactId i return deploymentDetail, nil } -func (impl AppListingRepositoryImpl) FindAppCount(isProd bool) (int, error) { +func (impl *AppListingRepositoryImpl) FindAppCount(isProd bool) (int, error) { var count int query := "SELECT count(distinct pipeline.app_id) from pipeline pipeline " + " INNER JOIN environment env on env.id=pipeline.environment_id" + @@ -743,7 +535,96 @@ func (impl AppListingRepositoryImpl) FindAppCount(isProd bool) (int, error) { return count, nil } -func (impl AppListingRepositoryImpl) extractEnvironmentNameFromId(jobContainers []*AppView.JobListingContainer) ([]*AppView.JobListingContainer, error) { +func (impl *AppListingRepositoryImpl) FetchAppStage(appId int, appType int) (AppView.AppStages, error) { + var stages AppView.AppStages + // FIXME: active condition checks are not there in the query + // chart_env_config_override also has an is_override filed which is not checked in the query (could be subjective for this particular use case) - need to check + query := "SELECT " + + " app.id as app_id, ct.id as ci_template_id, cp.id as ci_pipeline_id, ch.id as chart_id, ch.git_repo_url as chart_git_repo_url, " + + " p.id as pipeline_id, ceco.status as yaml_status, ceco.reviewed as yaml_reviewed " + + " FROM app app" + + " LEFT JOIN ci_template ct on ct.app_id=app.id" + + " LEFT JOIN ci_pipeline cp on cp.app_id=app.id" + + " LEFT JOIN charts ch on ch.app_id=app.id" + + " LEFT JOIN pipeline p on p.app_id=app.id" + + " LEFT JOIN chart_env_config_override ceco on ceco.chart_id=ch.id" + + " WHERE app.id=? and app.app_type = ? limit 1;" + + impl.Logger.Debugw("last app stages status query:", "query", query) + + _, err := impl.dbConnection.Query(&stages, query, appId, appType) + if err != nil { + impl.Logger.Errorw("error:", err) + return stages, err + } + var isMaterialExists bool + materialQuery := "select exists(select 1 from git_material gm where gm.app_id=? and gm.active is TRUE)" + impl.Logger.Debugw("material stage status query:", "query", query) + + _, err = impl.dbConnection.Query(&isMaterialExists, materialQuery, appId) + if err != nil { + impl.Logger.Errorw("error:", err) + return stages, err + } + stages.GitMaterialExists = 0 + if isMaterialExists { + stages.GitMaterialExists = 1 + } + return stages, nil +} + +func (impl *AppListingRepositoryImpl) GetEnvironmentNameFromPipelineId(pipelineId int) (string, error) { + var environmentName string + query := "SELECT e.environment_name " + + "FROM pipeline p " + + "JOIN environment e ON p.environment_id = e.id WHERE p.id = ?" + + _, err := impl.dbConnection.Query(&environmentName, query, pipelineId) + if err != nil { + impl.Logger.Errorw("error in finding environment", "err", err, "pipelineID", pipelineId) + return "", err + } + return environmentName, nil +} + +func (impl *AppListingRepositoryImpl) GetDeploymentDetailsByAppIdAndEnvId(appId int, envId int) (AppView.DeploymentDetailContainer, error) { + var deploymentDetail AppView.DeploymentDetailContainer + query := "SELECT" + + " a.app_name," + + " env.environment_name," + + " env.namespace," + + " env.default," + + " p.deployment_app_type," + + " p.ci_pipeline_id," + + " p.deployment_app_delete_request," + + " pco.id as pco_id," + + " cia.data_source," + + " cia.id as ci_artifact_id," + + " cia.parent_ci_artifact as parent_artifact_id," + + " cl.k8s_version," + + " env.cluster_id," + + " env.is_virtual_environment," + + " cl.cluster_name," + + " p.id as cd_pipeline_id," + + " p.ci_pipeline_id," + + " p.trigger_type" + + " FROM pipeline p" + + " LEFT JOIN pipeline_config_override pco on pco.pipeline_id=p.id" + + " INNER JOIN environment env ON env.id=p.environment_id" + + " INNER JOIN cluster cl on cl.id=env.cluster_id" + + " LEFT JOIN ci_artifact cia on cia.id = pco.ci_artifact_id" + + " INNER JOIN app a ON a.id=p.app_id" + + " WHERE a.app_type = 0 AND a.id=? AND env.id=? AND p.deleted = FALSE AND env.active = TRUE" + + " ORDER BY pco.created_on DESC LIMIT 1;" + _, err := impl.dbConnection.Query(&deploymentDetail, query, appId, envId) + if err != nil { + impl.Logger.Errorw("error in fetching deployment details", "error", err, "appId", appId, "envId", envId) + return deploymentDetail, err + } + return deploymentDetail, nil +} + +func (impl *AppListingRepositoryImpl) extractEnvironmentNameFromId(jobContainers []*AppView.JobListingContainer) ([]*AppView.JobListingContainer, error) { var envIds []*int for _, job := range jobContainers { if job.EnvironmentId != 0 { diff --git a/internal/sql/repository/chartConfig/EnvConfigOverrideRepository.go b/internal/sql/repository/chartConfig/EnvConfigOverrideRepository.go index c6dd5b12ad..cecdd829ec 100644 --- a/internal/sql/repository/chartConfig/EnvConfigOverrideRepository.go +++ b/internal/sql/repository/chartConfig/EnvConfigOverrideRepository.go @@ -73,6 +73,7 @@ type EnvConfigOverrideRepository interface { UpdateWithTxn(envConfigOverride *EnvConfigOverride, tx *pg.Tx) (*EnvConfigOverride, error) GetByAppIdEnvIdAndChartRefId(appId, envId int, chartRefId int) (*EnvConfigOverride, error) + GetAllOverridesForApp(appId int) ([]EnvConfigOverride, error) } type EnvConfigOverrideRepositoryImpl struct { @@ -358,3 +359,14 @@ func (r EnvConfigOverrideRepositoryImpl) GetByAppIdEnvIdAndChartRefId(appId, env Select() return eco, err } + +func (r EnvConfigOverrideRepositoryImpl) GetAllOverridesForApp(appId int) ([]EnvConfigOverride, error) { + var eco []EnvConfigOverride + err := r.dbConnection. + Model(&eco). + Column("env_config_override.*", "Chart"). + Where("env_config_override.active = ?", true). + Where("Chart.app_id =? ", appId). + Select() + return eco, err +} diff --git a/internal/sql/repository/deploymentConfig/repository.go b/internal/sql/repository/deploymentConfig/repository.go index b51286d2f1..f2ec110523 100644 --- a/internal/sql/repository/deploymentConfig/repository.go +++ b/internal/sql/repository/deploymentConfig/repository.go @@ -17,6 +17,7 @@ package deploymentConfig import ( + "fmt" "github.com/devtron-labs/devtron/internal/sql/repository/helper" "github.com/devtron-labs/devtron/pkg/sql" "github.com/go-pg/pg" @@ -34,7 +35,7 @@ type ConfigType string const ( Custom ConfigType = "custom" - SystemGenerated = "system_generated" + SystemGenerated ConfigType = "system_generated" ) type DeploymentConfig struct { @@ -47,6 +48,7 @@ type DeploymentConfig struct { RepoUrl string `sql:"repo_url"` RepoName string `sql:"repo_name"` ReleaseMode string `sql:"release_mode"` + ReleaseConfig string `sql:"release_config"` Active bool `sql:"active,notnull"` sql.AuditLog } @@ -56,14 +58,13 @@ type Repository interface { SaveAll(tx *pg.Tx, configs []*DeploymentConfig) ([]*DeploymentConfig, error) Update(tx *pg.Tx, config *DeploymentConfig) (*DeploymentConfig, error) UpdateAll(tx *pg.Tx, config []*DeploymentConfig) ([]*DeploymentConfig, error) - GetById(id int) (*DeploymentConfig, error) GetByAppIdAndEnvId(appId, envId int) (*DeploymentConfig, error) GetAppLevelConfigForDevtronApps(appId int) (*DeploymentConfig, error) - GetAppLevelConfigByAppIds(appIds []int) ([]*DeploymentConfig, error) GetAppAndEnvLevelConfigsInBulk(appIdToEnvIdsMap map[int][]int) ([]*DeploymentConfig, error) GetByAppIdAndEnvIdEvenIfInactive(appId, envId int) (*DeploymentConfig, error) - UpdateRepoUrlByAppIdAndEnvId(repoUrl string, appId, envId int) error GetConfigByAppIds(appIds []int) ([]*DeploymentConfig, error) + GetAllConfigsForActiveApps() ([]*DeploymentConfig, error) + GetAllEnvLevelConfigsWithReleaseMode(releaseMode string) ([]*DeploymentConfig, error) GetDeploymentAppTypeForChartStoreAppByAppId(appId int) (string, error) } @@ -120,19 +121,19 @@ func (impl *RepositoryImpl) UpdateAll(tx *pg.Tx, configs []*DeploymentConfig) ([ return configs, err } -func (impl *RepositoryImpl) GetById(id int) (*DeploymentConfig, error) { - result := &DeploymentConfig{} - err := impl.dbConnection.Model(result).Where("id = ?", id).Where("active = ?", true).Select() - return result, err -} - func (impl *RepositoryImpl) GetByAppIdAndEnvId(appId, envId int) (*DeploymentConfig, error) { result := &DeploymentConfig{} err := impl.dbConnection.Model(result). - Where("app_id = ?", appId). - Where("environment_id = ? ", envId). - Where("active = ?", true). - Order("id DESC").Limit(1). + Join("INNER JOIN app a"). + JoinOn("deployment_config.app_id = a.id"). + Join("INNER JOIN environment e"). + JoinOn("deployment_config.environment_id = e.id"). + Where("a.active = ?", true). + Where("e.active = ?", true). + Where("deployment_config.app_id = ?", appId). + Where("deployment_config.environment_id = ?", envId). + Where("deployment_config.active = ?", true). + Order("deployment_config.id DESC").Limit(1). Select() return result, err } @@ -140,31 +141,36 @@ func (impl *RepositoryImpl) GetByAppIdAndEnvId(appId, envId int) (*DeploymentCon func (impl *RepositoryImpl) GetAppLevelConfigForDevtronApps(appId int) (*DeploymentConfig, error) { result := &DeploymentConfig{} err := impl.dbConnection.Model(result). - Where("app_id = ? ", appId). - Where("environment_id is NULL"). - Where("active = ?", true). - Select() - return result, err -} - -func (impl *RepositoryImpl) GetAppLevelConfigByAppIds(appIds []int) ([]*DeploymentConfig, error) { - var result []*DeploymentConfig - err := impl.dbConnection.Model(&result). - Where("app_id in (?) and environment_id is NULL ", pg.In(appIds)). - Where("active = ?", true). + Join("INNER JOIN app a"). + JoinOn("deployment_config.app_id = a.id"). + Where("a.active = ?", true). + Where("deployment_config.app_id = ? ", appId). + Where("deployment_config.environment_id is NULL"). + Where("deployment_config.active = ?", true). Select() return result, err } func (impl *RepositoryImpl) GetAppAndEnvLevelConfigsInBulk(appIdToEnvIdsMap map[int][]int) ([]*DeploymentConfig, error) { var result []*DeploymentConfig + if len(appIdToEnvIdsMap) == 0 { + return result, nil + } err := impl.dbConnection.Model(&result). + Join("INNER JOIN app a"). + JoinOn("deployment_config.app_id = a.id"). + Join("INNER JOIN environment e"). + JoinOn("deployment_config.environment_id = e.id"). + Where("a.active = ?", true). + Where("e.active = ?", true). WhereOrGroup(func(query *orm.Query) (*orm.Query, error) { for appId, envIds := range appIdToEnvIdsMap { if len(envIds) == 0 { continue } - query = query.Where("app_id = ?", appId).Where("environment_id in (?)", pg.In((envIds))).Where("active = ?", true) + query = query.Where("deployment_config.app_id = ?", appId). + Where("deployment_config.environment_id in (?)", pg.In(envIds)). + Where("deployment_config.active = ?", true) } return query, nil }).Select() @@ -172,40 +178,102 @@ func (impl *RepositoryImpl) GetAppAndEnvLevelConfigsInBulk(appIdToEnvIdsMap map[ } func (impl *RepositoryImpl) GetByAppIdAndEnvIdEvenIfInactive(appId, envId int) (*DeploymentConfig, error) { + if envId == 0 { + return impl.getByAppIdEvenIfInactive(appId) + } + return impl.getByAppIdAndEnvIdEvenIfInactive(appId, envId) +} + +func (impl *RepositoryImpl) getByAppIdEvenIfInactive(appId int) (*DeploymentConfig, error) { result := &DeploymentConfig{} err := impl.dbConnection.Model(result). + Join("INNER JOIN app a"). + JoinOn("deployment_config.app_id = a.id"). + Where("a.active = ?", true). WhereGroup(func(query *orm.Query) (*orm.Query, error) { - query = query.Where("app_id = ?", appId) - if envId == 0 { - query = query.Where("environment_id is NULL") - } else { - query = query.Where("environment_id = ? ", envId) - } + query = query.Where("deployment_config.app_id = ?", appId). + Where("deployment_config.environment_id is NULL") return query, nil }). - Order("id DESC").Limit(1). + Order("deployment_config.id DESC"). + Limit(1). Select() return result, err } -func (impl *RepositoryImpl) UpdateRepoUrlByAppIdAndEnvId(repoUrl string, appId, envId int) error { - _, err := impl.dbConnection. - Model((*DeploymentConfig)(nil)). - Set("repo_url = ? ", repoUrl). - Where("app_id = ? and environment_id = ? ", appId, envId). - Update() - return err +func (impl *RepositoryImpl) getByAppIdAndEnvIdEvenIfInactive(appId, envId int) (*DeploymentConfig, error) { + if envId == 0 { + return nil, fmt.Errorf("empty envId passed for deployment config") + } + result := &DeploymentConfig{} + err := impl.dbConnection.Model(result). + Join("INNER JOIN app a"). + JoinOn("deployment_config.app_id = a.id"). + Join("INNER JOIN environment e"). + JoinOn("deployment_config.environment_id = e.id"). + Where("a.active = ?", true). + Where("e.active = ?", true). + WhereGroup(func(query *orm.Query) (*orm.Query, error) { + query = query.Where("deployment_config.app_id = ?", appId). + Where("deployment_config.environment_id = ?", envId) + return query, nil + }). + Order("deployment_config.id DESC"). + Limit(1). + Select() + return result, err } func (impl *RepositoryImpl) GetConfigByAppIds(appIds []int) ([]*DeploymentConfig, error) { var results []*DeploymentConfig + if len(appIds) == 0 { + return results, nil + } err := impl.dbConnection.Model(&results). - Where("app_id in (?) ", pg.In(appIds)). - Where("active = ?", true). + Join("INNER JOIN app a"). + JoinOn("deployment_config.app_id = a.id"). + Where("a.active = ?", true). + Where("deployment_config.app_id in (?) ", pg.In(appIds)). + Where("deployment_config.active = ?", true). Select() return results, err } +// GetAllConfigsForActiveApps returns all deployment configs for active apps +// INNER JOIN app a is used to filter out inactive apps +// NOTE: earlier we were not deleting the deployment configs on app delete, +// so we need to filter out inactive deployment configs +func (impl *RepositoryImpl) GetAllConfigsForActiveApps() ([]*DeploymentConfig, error) { + result := make([]*DeploymentConfig, 0) + err := impl.dbConnection.Model(&result). + Join("INNER JOIN app a"). + JoinOn("deployment_config.app_id = a.id"). + Where("a.active = ?", true). + Where("deployment_config.active = ?", true). + Select() + return result, err +} + +// GetAllEnvLevelConfigsWithReleaseMode returns all deployment configs for active apps and envs +// INNER JOIN app a is used to filter out inactive apps +// INNER JOIN environment e is used to filter out inactive envs +// NOTE: earlier we were not deleting the deployment configs on app delete, +// so we need to filter out inactive deployment configs +func (impl *RepositoryImpl) GetAllEnvLevelConfigsWithReleaseMode(releaseMode string) ([]*DeploymentConfig, error) { + result := make([]*DeploymentConfig, 0) + err := impl.dbConnection.Model(&result). + Join("INNER JOIN app a"). + JoinOn("deployment_config.app_id = a.id"). + Join("INNER JOIN environment e"). + JoinOn("deployment_config.environment_id = e.id"). + Where("a.active = ?", true). + Where("e.active = ?", true). + Where("deployment_config.active = ?", true). + Where("deployment_config.release_mode = ?", releaseMode). + Select() + return result, err +} + func (impl *RepositoryImpl) GetDeploymentAppTypeForChartStoreAppByAppId(appId int) (string, error) { result := &DeploymentConfig{} err := impl.dbConnection.Model(result). diff --git a/internal/sql/repository/pipelineConfig/PipelineRepository.go b/internal/sql/repository/pipelineConfig/PipelineRepository.go index 14d465f4ed..4747ab2574 100644 --- a/internal/sql/repository/pipelineConfig/PipelineRepository.go +++ b/internal/sql/repository/pipelineConfig/PipelineRepository.go @@ -101,6 +101,7 @@ type PipelineRepository interface { GetByEnvOverrideId(envOverrideId int) ([]Pipeline, error) GetByEnvOverrideIdAndEnvId(envOverrideId, envId int) (Pipeline, error) FindActiveByAppIdAndEnvironmentId(appId int, environmentId int) (pipelines []*Pipeline, err error) + FindOneByAppIdAndEnvId(appId int, envId int) (*Pipeline, error) UniqueAppEnvironmentPipelines() ([]*Pipeline, error) FindByCiPipelineId(ciPipelineId int) (pipelines []*Pipeline, err error) FindByParentCiPipelineId(ciPipelineId int) (pipelines []*Pipeline, err error) @@ -130,7 +131,7 @@ type PipelineRepository interface { FindIdsByAppIdsAndEnvironmentIds(appIds, environmentIds []int) (ids []int, err error) FindIdsByProjectIdsAndEnvironmentIds(projectIds, environmentIds []int) ([]int, error) - GetArgoPipelineByArgoAppName(argoAppName string) (Pipeline, error) + GetArgoPipelineByArgoAppName(argoAppName string) ([]Pipeline, error) FindActiveByAppIds(appIds []int) (pipelines []*Pipeline, err error) FindAppAndEnvironmentAndProjectByPipelineIds(pipelineIds []int) (pipelines []*Pipeline, err error) FilterDeploymentDeleteRequestedPipelineIds(cdPipelineIds []int) (map[int]bool, error) @@ -142,6 +143,7 @@ type PipelineRepository interface { FindDeploymentAppTypeByAppIdAndEnvId(appId, envId int) (string, error) FindByAppIdToEnvIdsMapping(appIdToEnvIds map[int][]int) ([]*Pipeline, error) FindDeploymentAppTypeByIds(ids []int) (pipelines []*Pipeline, err error) + GetAllArgoAppInfoByDeploymentAppNames(deploymentAppNames []string) ([]*PipelineDeploymentConfigObj, error) } type CiArtifactDTO struct { @@ -160,6 +162,12 @@ type DeploymentObject struct { Status string `sql:"status"` } +type PipelineDeploymentConfigObj struct { + DeploymentAppName string `json:"deployment_app_name"` + AppId int `json:"app_id"` + EnvironmentId int `json:"environment_id"` +} + type PipelineRepositoryImpl struct { dbConnection *pg.DB logger *zap.SugaredLogger @@ -169,11 +177,11 @@ func NewPipelineRepositoryImpl(dbConnection *pg.DB, logger *zap.SugaredLogger) * return &PipelineRepositoryImpl{dbConnection: dbConnection, logger: logger} } -func (impl PipelineRepositoryImpl) GetConnection() *pg.DB { +func (impl *PipelineRepositoryImpl) GetConnection() *pg.DB { return impl.dbConnection } -func (impl PipelineRepositoryImpl) FindByIdsIn(ids []int) ([]*Pipeline, error) { +func (impl *PipelineRepositoryImpl) FindByIdsIn(ids []int) ([]*Pipeline, error) { var pipelines []*Pipeline err := impl.dbConnection.Model(&pipelines). Column("pipeline.*", "App", "Environment", "Environment.Cluster"). @@ -189,7 +197,7 @@ func (impl PipelineRepositoryImpl) FindByIdsIn(ids []int) ([]*Pipeline, error) { return pipelines, err } -func (impl PipelineRepositoryImpl) FindByIdsInAndEnvironment(ids []int, environmentId int) ([]*Pipeline, error) { +func (impl *PipelineRepositoryImpl) FindByIdsInAndEnvironment(ids []int, environmentId int) ([]*Pipeline, error) { var pipelines []*Pipeline err := impl.dbConnection.Model(&pipelines). Where("id in (?)", pg.In(ids)). @@ -197,7 +205,8 @@ func (impl PipelineRepositoryImpl) FindByIdsInAndEnvironment(ids []int, environm Select() return pipelines, err } -func (impl PipelineRepositoryImpl) FindByCiPipelineIdsIn(ciPipelineIds []int) ([]*Pipeline, error) { + +func (impl *PipelineRepositoryImpl) FindByCiPipelineIdsIn(ciPipelineIds []int) ([]*Pipeline, error) { var pipelines []*Pipeline err := impl.dbConnection.Model(&pipelines). Where("ci_pipeline_id in (?)", pg.In(ciPipelineIds)). @@ -205,7 +214,7 @@ func (impl PipelineRepositoryImpl) FindByCiPipelineIdsIn(ciPipelineIds []int) ([ return pipelines, err } -func (impl PipelineRepositoryImpl) Save(pipeline []*Pipeline, tx *pg.Tx) error { +func (impl *PipelineRepositoryImpl) Save(pipeline []*Pipeline, tx *pg.Tx) error { var v []interface{} for _, i := range pipeline { v = append(v, i) @@ -214,12 +223,12 @@ func (impl PipelineRepositoryImpl) Save(pipeline []*Pipeline, tx *pg.Tx) error { return err } -func (impl PipelineRepositoryImpl) Update(pipeline *Pipeline, tx *pg.Tx) error { +func (impl *PipelineRepositoryImpl) Update(pipeline *Pipeline, tx *pg.Tx) error { err := tx.Update(pipeline) return err } -func (impl PipelineRepositoryImpl) FindAutomaticByCiPipelineId(ciPipelineId int) (pipelines []*Pipeline, err error) { +func (impl *PipelineRepositoryImpl) FindAutomaticByCiPipelineId(ciPipelineId int) (pipelines []*Pipeline, err error) { err = impl.dbConnection.Model(&pipelines). Where("ci_pipeline_id =?", ciPipelineId). Where("trigger_type =?", TRIGGER_TYPE_AUTOMATIC). @@ -233,7 +242,7 @@ func (impl PipelineRepositoryImpl) FindAutomaticByCiPipelineId(ciPipelineId int) return pipelines, nil } -func (impl PipelineRepositoryImpl) FindByCiPipelineId(ciPipelineId int) (pipelines []*Pipeline, err error) { +func (impl *PipelineRepositoryImpl) FindByCiPipelineId(ciPipelineId int) (pipelines []*Pipeline, err error) { err = impl.dbConnection.Model(&pipelines). Where("ci_pipeline_id =?", ciPipelineId). Where("deleted =?", false). @@ -245,7 +254,8 @@ func (impl PipelineRepositoryImpl) FindByCiPipelineId(ciPipelineId int) (pipelin } return pipelines, nil } -func (impl PipelineRepositoryImpl) FindByParentCiPipelineId(ciPipelineId int) (pipelines []*Pipeline, err error) { + +func (impl *PipelineRepositoryImpl) FindByParentCiPipelineId(ciPipelineId int) (pipelines []*Pipeline, err error) { err = impl.dbConnection.Model(&pipelines). Column("pipeline.*"). Join("INNER JOIN app_workflow_mapping awm on awm.component_id = pipeline.id"). @@ -261,7 +271,7 @@ func (impl PipelineRepositoryImpl) FindByParentCiPipelineId(ciPipelineId int) (p return pipelines, nil } -func (impl PipelineRepositoryImpl) FindActiveByAppId(appId int) (pipelines []*Pipeline, err error) { +func (impl *PipelineRepositoryImpl) FindActiveByAppId(appId int) (pipelines []*Pipeline, err error) { err = impl.dbConnection.Model(&pipelines). Column("pipeline.*", "Environment"). Where("app_id = ?", appId). @@ -270,7 +280,7 @@ func (impl PipelineRepositoryImpl) FindActiveByAppId(appId int) (pipelines []*Pi return pipelines, err } -func (impl PipelineRepositoryImpl) FindActiveByAppIdAndEnvironmentId(appId int, environmentId int) (pipelines []*Pipeline, err error) { +func (impl *PipelineRepositoryImpl) FindActiveByAppIdAndEnvironmentId(appId int, environmentId int) (pipelines []*Pipeline, err error) { err = impl.dbConnection.Model(&pipelines). Column("pipeline.*", "Environment", "App"). Where("app_id = ?", appId). @@ -280,14 +290,25 @@ func (impl PipelineRepositoryImpl) FindActiveByAppIdAndEnvironmentId(appId int, return pipelines, err } -func (impl PipelineRepositoryImpl) FindActiveByAppIdAndEnvironmentIdV2() (pipelines []*Pipeline, err error) { +func (impl *PipelineRepositoryImpl) FindOneByAppIdAndEnvId(appId int, envId int) (*Pipeline, error) { + pipeline := Pipeline{} + err := impl.dbConnection.Model(&pipeline). + Column("pipeline.*"). + Where("app_id = ?", appId). + Where("deleted = ?", false). + Where("environment_id = ? ", envId). + Select() + return &pipeline, err +} + +func (impl *PipelineRepositoryImpl) FindActiveByAppIdAndEnvironmentIdV2() (pipelines []*Pipeline, err error) { err = impl.dbConnection.Model(&pipelines). Where("deleted = ?", false). Select() return pipelines, err } -func (impl PipelineRepositoryImpl) Delete(id int, userId int32, tx *pg.Tx) error { +func (impl *PipelineRepositoryImpl) Delete(id int, userId int32, tx *pg.Tx) error { pipeline := &Pipeline{} r, err := tx.Model(pipeline).Set("deleted =?", true).Set("deployment_app_created =?", false). Set("updated_on = ?", time.Now()).Set("updated_by = ?", userId).Where("id =?", id).Update() @@ -295,7 +316,7 @@ func (impl PipelineRepositoryImpl) Delete(id int, userId int32, tx *pg.Tx) error return err } -func (impl PipelineRepositoryImpl) MarkPartiallyDeleted(id int, userId int32, tx *pg.Tx) error { +func (impl *PipelineRepositoryImpl) MarkPartiallyDeleted(id int, userId int32, tx *pg.Tx) error { pipeline := &Pipeline{} _, err := tx.Model(pipeline). Set("deployment_app_delete_request = ?", true). @@ -307,7 +328,7 @@ func (impl PipelineRepositoryImpl) MarkPartiallyDeleted(id int, userId int32, tx return err } -func (impl PipelineRepositoryImpl) FindByName(pipelineName string) (pipeline *Pipeline, err error) { +func (impl *PipelineRepositoryImpl) FindByName(pipelineName string) (pipeline *Pipeline, err error) { pipeline = &Pipeline{} err = impl.dbConnection.Model(pipeline). Where("pipeline_name = ?", pipelineName). @@ -315,7 +336,7 @@ func (impl PipelineRepositoryImpl) FindByName(pipelineName string) (pipeline *Pi return pipeline, err } -func (impl PipelineRepositoryImpl) PipelineExists(pipelineName string) (bool, error) { +func (impl *PipelineRepositoryImpl) PipelineExists(pipelineName string) (bool, error) { pipeline := &Pipeline{} exists, err := impl.dbConnection.Model(pipeline). Where("pipeline_name = ?", pipelineName). @@ -324,7 +345,7 @@ func (impl PipelineRepositoryImpl) PipelineExists(pipelineName string) (bool, er return exists, err } -func (impl PipelineRepositoryImpl) FindById(id int) (pipeline *Pipeline, err error) { +func (impl *PipelineRepositoryImpl) FindById(id int) (pipeline *Pipeline, err error) { pipeline = &Pipeline{} err = impl.dbConnection. Model(pipeline). @@ -336,7 +357,7 @@ func (impl PipelineRepositoryImpl) FindById(id int) (pipeline *Pipeline, err err return pipeline, err } -func (impl PipelineRepositoryImpl) FindByIdEvenIfInactive(id int) (pipeline *Pipeline, err error) { +func (impl *PipelineRepositoryImpl) FindByIdEvenIfInactive(id int) (pipeline *Pipeline, err error) { pipeline = &Pipeline{} err = impl.dbConnection. Model(pipeline). @@ -347,7 +368,7 @@ func (impl PipelineRepositoryImpl) FindByIdEvenIfInactive(id int) (pipeline *Pip return pipeline, err } -func (impl PipelineRepositoryImpl) GetPostStageConfigById(id int) (pipeline *Pipeline, err error) { +func (impl *PipelineRepositoryImpl) GetPostStageConfigById(id int) (pipeline *Pipeline, err error) { pipeline = &Pipeline{} err = impl.dbConnection. Model(pipeline). @@ -358,7 +379,7 @@ func (impl PipelineRepositoryImpl) GetPostStageConfigById(id int) (pipeline *Pip return pipeline, err } -func (impl PipelineRepositoryImpl) FindAppAndEnvDetailsByPipelineId(id int) (pipeline *Pipeline, err error) { +func (impl *PipelineRepositoryImpl) FindAppAndEnvDetailsByPipelineId(id int) (pipeline *Pipeline, err error) { pipeline = &Pipeline{} err = impl.dbConnection. Model(pipeline). @@ -373,7 +394,7 @@ func (impl PipelineRepositoryImpl) FindAppAndEnvDetailsByPipelineId(id int) (pip // FindActiveByEnvIdAndDeploymentType takes in environment id and current deployment app type // and fetches and returns a list of pipelines matching the same excluding given app ids. -func (impl PipelineRepositoryImpl) FindActiveByEnvIdAndDeploymentType(environmentId int, +func (impl *PipelineRepositoryImpl) FindActiveByEnvIdAndDeploymentType(environmentId int, deploymentAppType string, exclusionList []int, includeApps []int) ([]*Pipeline, error) { // NOTE: PG query throws error with slice of integer @@ -405,7 +426,7 @@ func (impl PipelineRepositoryImpl) FindActiveByEnvIdAndDeploymentType(environmen } // Deprecated: -func (impl PipelineRepositoryImpl) FindByEnvOverrideId(envOverrideId int) (pipeline []Pipeline, err error) { +func (impl *PipelineRepositoryImpl) FindByEnvOverrideId(envOverrideId int) (pipeline []Pipeline, err error) { var pipelines []Pipeline err = impl.dbConnection. Model(&pipelines). @@ -416,7 +437,7 @@ func (impl PipelineRepositoryImpl) FindByEnvOverrideId(envOverrideId int) (pipel return pipelines, err } -func (impl PipelineRepositoryImpl) GetByEnvOverrideId(envOverrideId int) ([]Pipeline, error) { +func (impl *PipelineRepositoryImpl) GetByEnvOverrideId(envOverrideId int) ([]Pipeline, error) { var pipelines []Pipeline query := "" + " SELECT p.*" + @@ -434,7 +455,7 @@ func (impl PipelineRepositoryImpl) GetByEnvOverrideId(envOverrideId int) ([]Pipe return pipelines, err } -func (impl PipelineRepositoryImpl) GetByEnvOverrideIdAndEnvId(envOverrideId, envId int) (Pipeline, error) { +func (impl *PipelineRepositoryImpl) GetByEnvOverrideIdAndEnvId(envOverrideId, envId int) (Pipeline, error) { var pipeline Pipeline query := "" + " SELECT p.*" + @@ -452,7 +473,7 @@ func (impl PipelineRepositoryImpl) GetByEnvOverrideIdAndEnvId(envOverrideId, env return pipeline, err } -func (impl PipelineRepositoryImpl) UniqueAppEnvironmentPipelines() ([]*Pipeline, error) { +func (impl *PipelineRepositoryImpl) UniqueAppEnvironmentPipelines() ([]*Pipeline, error) { var pipelines []*Pipeline err := impl.dbConnection. @@ -466,7 +487,7 @@ func (impl PipelineRepositoryImpl) UniqueAppEnvironmentPipelines() ([]*Pipeline, return pipelines, err } -func (impl PipelineRepositoryImpl) FindByPipelineTriggerGitHash(gitHash string) (pipeline *Pipeline, err error) { +func (impl *PipelineRepositoryImpl) FindByPipelineTriggerGitHash(gitHash string) (pipeline *Pipeline, err error) { var pipelines *Pipeline err = impl.dbConnection. Model(&pipelines). @@ -477,19 +498,21 @@ func (impl PipelineRepositoryImpl) FindByPipelineTriggerGitHash(gitHash string) return pipelines, err } -func (impl PipelineRepositoryImpl) FindAllPipelineCreatedCountInLast24Hour() (pipelineCount int, err error) { +func (impl *PipelineRepositoryImpl) FindAllPipelineCreatedCountInLast24Hour() (pipelineCount int, err error) { pipelineCount, err = impl.dbConnection.Model(&Pipeline{}). Where("created_on > ?", time.Now().AddDate(0, 0, -1)). Count() return pipelineCount, err } -func (impl PipelineRepositoryImpl) FindAllDeletedPipelineCountInLast24Hour() (pipelineCount int, err error) { + +func (impl *PipelineRepositoryImpl) FindAllDeletedPipelineCountInLast24Hour() (pipelineCount int, err error) { pipelineCount, err = impl.dbConnection.Model(&Pipeline{}). Where("created_on > ? and deleted=?", time.Now().AddDate(0, 0, -1), true). Count() return pipelineCount, err } -func (impl PipelineRepositoryImpl) FindActiveByEnvId(envId int) (pipelines []*Pipeline, err error) { + +func (impl *PipelineRepositoryImpl) FindActiveByEnvId(envId int) (pipelines []*Pipeline, err error) { err = impl.dbConnection.Model(&pipelines).Column("pipeline.*", "App", "Environment"). Where("environment_id = ?", envId). Where("deleted = ?", false). @@ -497,7 +520,7 @@ func (impl PipelineRepositoryImpl) FindActiveByEnvId(envId int) (pipelines []*Pi return pipelines, err } -func (impl PipelineRepositoryImpl) FindActivePipelineAppIdsByEnvId(envId int) ([]int, error) { +func (impl *PipelineRepositoryImpl) FindActivePipelineAppIdsByEnvId(envId int) ([]int, error) { var appIds []int err := impl.dbConnection.Model((*Pipeline)(nil)).Column("app_id"). Where("environment_id = ?", envId). @@ -506,7 +529,7 @@ func (impl PipelineRepositoryImpl) FindActivePipelineAppIdsByEnvId(envId int) ([ return appIds, err } -func (impl PipelineRepositoryImpl) FindActivePipelineByEnvId(envId int) (pipelines []*Pipeline, err error) { +func (impl *PipelineRepositoryImpl) FindActivePipelineByEnvId(envId int) (pipelines []*Pipeline, err error) { err = impl.dbConnection.Model(&pipelines).Column("pipeline.*", "App", "Environment"). Where("environment_id = ?", envId). Where("deleted = ?", false). @@ -515,7 +538,7 @@ func (impl PipelineRepositoryImpl) FindActivePipelineByEnvId(envId int) (pipelin return pipelines, err } -func (impl PipelineRepositoryImpl) FindActiveByEnvIds(envIds []int) (pipelines []*Pipeline, err error) { +func (impl *PipelineRepositoryImpl) FindActiveByEnvIds(envIds []int) (pipelines []*Pipeline, err error) { err = impl.dbConnection.Model(&pipelines).Column("pipeline.*"). Where("environment_id in (?)", pg.In(envIds)). Where("deleted = ?", false). @@ -523,7 +546,7 @@ func (impl PipelineRepositoryImpl) FindActiveByEnvIds(envIds []int) (pipelines [ return pipelines, err } -func (impl PipelineRepositoryImpl) FindActiveByInFilter(envId int, appIdIncludes []int) (pipelines []*Pipeline, err error) { +func (impl *PipelineRepositoryImpl) FindActiveByInFilter(envId int, appIdIncludes []int) (pipelines []*Pipeline, err error) { err = impl.dbConnection.Model(&pipelines).Column("pipeline.*", "App", "Environment"). Where("environment_id = ?", envId). Where("app_id in (?)", pg.In(appIdIncludes)). @@ -532,7 +555,7 @@ func (impl PipelineRepositoryImpl) FindActiveByInFilter(envId int, appIdIncludes return pipelines, err } -func (impl PipelineRepositoryImpl) FindActivePipelineAppIdsByInFilter(envId int, appIdIncludes []int) ([]int, error) { +func (impl *PipelineRepositoryImpl) FindActivePipelineAppIdsByInFilter(envId int, appIdIncludes []int) ([]int, error) { var appIds []int err := impl.dbConnection.Model((*Pipeline)(nil)).Column("app_id"). Where("environment_id = ?", envId). @@ -541,7 +564,7 @@ func (impl PipelineRepositoryImpl) FindActivePipelineAppIdsByInFilter(envId int, return appIds, err } -func (impl PipelineRepositoryImpl) FindActiveByNotFilter(envId int, appIdExcludes []int) (pipelines []*Pipeline, err error) { +func (impl *PipelineRepositoryImpl) FindActiveByNotFilter(envId int, appIdExcludes []int) (pipelines []*Pipeline, err error) { err = impl.dbConnection.Model(&pipelines).Column("pipeline.*", "App", "Environment"). Where("environment_id = ?", envId). Where("app_id not in (?)", pg.In(appIdExcludes)). @@ -550,7 +573,7 @@ func (impl PipelineRepositoryImpl) FindActiveByNotFilter(envId int, appIdExclude return pipelines, err } -func (impl PipelineRepositoryImpl) FindAllPipelinesByChartsOverrideAndAppIdAndChartId(hasConfigOverridden bool, appId int, chartId int) (pipelines []*Pipeline, err error) { +func (impl *PipelineRepositoryImpl) FindAllPipelinesByChartsOverrideAndAppIdAndChartId(hasConfigOverridden bool, appId int, chartId int) (pipelines []*Pipeline, err error) { err = impl.dbConnection.Model(&pipelines). Column("pipeline.*"). Join("inner join charts on pipeline.app_id = charts.app_id"). @@ -565,7 +588,7 @@ func (impl PipelineRepositoryImpl) FindAllPipelinesByChartsOverrideAndAppIdAndCh return pipelines, err } -func (impl PipelineRepositoryImpl) FindActiveByAppIdAndPipelineId(appId int, pipelineId int) ([]*Pipeline, error) { +func (impl *PipelineRepositoryImpl) FindActiveByAppIdAndPipelineId(appId int, pipelineId int) ([]*Pipeline, error) { var pipelines []*Pipeline err := impl.dbConnection.Model(&pipelines). Where("app_id = ?", appId). @@ -575,7 +598,7 @@ func (impl PipelineRepositoryImpl) FindActiveByAppIdAndPipelineId(appId int, pip return pipelines, err } -func (impl PipelineRepositoryImpl) FindActiveByAppIdAndEnvId(appId int, envId int) (*Pipeline, error) { +func (impl *PipelineRepositoryImpl) FindActiveByAppIdAndEnvId(appId int, envId int) (*Pipeline, error) { var pipeline Pipeline err := impl.dbConnection.Model(&pipeline). Where("app_id = ?", appId). @@ -585,7 +608,7 @@ func (impl PipelineRepositoryImpl) FindActiveByAppIdAndEnvId(appId int, envId in return &pipeline, err } -func (impl PipelineRepositoryImpl) SetDeploymentAppCreatedInPipeline(deploymentAppCreated bool, pipelineId int, userId int32) error { +func (impl *PipelineRepositoryImpl) SetDeploymentAppCreatedInPipeline(deploymentAppCreated bool, pipelineId int, userId int32) error { query := "update pipeline set deployment_app_created=?, updated_on=?, updated_by=? where id=?;" var pipeline *Pipeline _, err := impl.dbConnection.Query(pipeline, query, deploymentAppCreated, time.Now(), userId, pipelineId) @@ -594,7 +617,7 @@ func (impl PipelineRepositoryImpl) SetDeploymentAppCreatedInPipeline(deploymentA // UpdateCdPipelineDeploymentAppInFilter takes in deployment app type and list of cd pipeline ids and // updates the deploymentAppType and sets deployment_app_created to false in the table for given ids. -func (impl PipelineRepositoryImpl) UpdateCdPipelineDeploymentAppInFilter(deploymentAppType string, +func (impl *PipelineRepositoryImpl) UpdateCdPipelineDeploymentAppInFilter(deploymentAppType string, cdPipelineIdIncludes []int, userId int32, deploymentAppCreated bool, isDeleted bool) error { query := "update pipeline set deployment_app_created = ?, deployment_app_type = ?, " + "updated_by = ?, updated_on = ?, deployment_app_delete_request = ? where id in (?);" @@ -604,7 +627,7 @@ func (impl PipelineRepositoryImpl) UpdateCdPipelineDeploymentAppInFilter(deploym return err } -func (impl PipelineRepositoryImpl) UpdateCdPipelineAfterDeployment(deploymentAppType string, +func (impl *PipelineRepositoryImpl) UpdateCdPipelineAfterDeployment(deploymentAppType string, cdPipelineIdIncludes []int, userId int32, isDeleted bool) error { query := "update pipeline set deployment_app_type = ?, " + "updated_by = ?, updated_on = ?, deployment_app_delete_request = ? where id in (?);" @@ -614,7 +637,7 @@ func (impl PipelineRepositoryImpl) UpdateCdPipelineAfterDeployment(deploymentApp return err } -func (impl PipelineRepositoryImpl) FindNumberOfAppsWithCdPipeline(appIds []int) (count int, err error) { +func (impl *PipelineRepositoryImpl) FindNumberOfAppsWithCdPipeline(appIds []int) (count int, err error) { var pipelines []*Pipeline count, err = impl.dbConnection. Model(&pipelines). @@ -627,7 +650,7 @@ func (impl PipelineRepositoryImpl) FindNumberOfAppsWithCdPipeline(appIds []int) return count, nil } -func (impl PipelineRepositoryImpl) GetAppAndEnvDetailsForDeploymentAppTypePipeline(deploymentAppType string, clusterIds []int) ([]*Pipeline, error) { +func (impl *PipelineRepositoryImpl) GetAppAndEnvDetailsForDeploymentAppTypePipeline(deploymentAppType string, clusterIds []int) ([]*Pipeline, error) { var pipelines []*Pipeline err := impl.dbConnection. Model(&pipelines). @@ -643,7 +666,7 @@ func (impl PipelineRepositoryImpl) GetAppAndEnvDetailsForDeploymentAppTypePipeli return pipelines, err } -func (impl PipelineRepositoryImpl) GetArgoPipelinesHavingTriggersStuckInLastPossibleNonTerminalTimelines(pendingSinceSeconds int, timeForDegradation int) ([]*Pipeline, error) { +func (impl *PipelineRepositoryImpl) GetArgoPipelinesHavingTriggersStuckInLastPossibleNonTerminalTimelines(pendingSinceSeconds int, timeForDegradation int) ([]*Pipeline, error) { var pipelines []*Pipeline queryString := `select p.* from pipeline p inner join cd_workflow cw on cw.pipeline_id = p.id inner join cd_workflow_runner cwr on cwr.cd_workflow_id=cw.id @@ -665,7 +688,7 @@ func (impl PipelineRepositoryImpl) GetArgoPipelinesHavingTriggersStuckInLastPoss return pipelines, nil } -func (impl PipelineRepositoryImpl) GetArgoPipelinesHavingLatestTriggerStuckInNonTerminalStatuses(getPipelineDeployedBeforeMinutes int, getPipelineDeployedWithinHours int) ([]*Pipeline, error) { +func (impl *PipelineRepositoryImpl) GetArgoPipelinesHavingLatestTriggerStuckInNonTerminalStatuses(getPipelineDeployedBeforeMinutes int, getPipelineDeployedWithinHours int) ([]*Pipeline, error) { var pipelines []*Pipeline queryString := `select p.id from pipeline p inner join cd_workflow cw on cw.pipeline_id = p.id inner join cd_workflow_runner cwr on cwr.cd_workflow_id=cw.id @@ -685,7 +708,7 @@ func (impl PipelineRepositoryImpl) GetArgoPipelinesHavingLatestTriggerStuckInNon return pipelines, nil } -func (impl PipelineRepositoryImpl) FindIdsByAppIdsAndEnvironmentIds(appIds, environmentIds []int) ([]int, error) { +func (impl *PipelineRepositoryImpl) FindIdsByAppIdsAndEnvironmentIds(appIds, environmentIds []int) ([]int, error) { var pipelineIds []int query := "select id from pipeline where app_id in (?) and environment_id in (?) and deleted = ?;" _, err := impl.dbConnection.Query(&pipelineIds, query, pg.In(appIds), pg.In(environmentIds), false) @@ -696,7 +719,7 @@ func (impl PipelineRepositoryImpl) FindIdsByAppIdsAndEnvironmentIds(appIds, envi return pipelineIds, err } -func (impl PipelineRepositoryImpl) FindIdsByProjectIdsAndEnvironmentIds(projectIds, environmentIds []int) ([]int, error) { +func (impl *PipelineRepositoryImpl) FindIdsByProjectIdsAndEnvironmentIds(projectIds, environmentIds []int) ([]int, error) { var pipelineIds []int query := "select p.id from pipeline p inner join app a on a.id=p.app_id where a.team_id in (?) and p.environment_id in (?) and p.deleted = ? and a.active = ?;" _, err := impl.dbConnection.Query(&pipelineIds, query, pg.In(projectIds), pg.In(environmentIds), false, true) @@ -707,8 +730,8 @@ func (impl PipelineRepositoryImpl) FindIdsByProjectIdsAndEnvironmentIds(projectI return pipelineIds, err } -func (impl PipelineRepositoryImpl) GetArgoPipelineByArgoAppName(argoAppName string) (Pipeline, error) { - var pipeline Pipeline +func (impl *PipelineRepositoryImpl) GetArgoPipelineByArgoAppName(argoAppName string) ([]Pipeline, error) { + var pipeline []Pipeline err := impl.dbConnection.Model(&pipeline). Join("LEFT JOIN deployment_config dc on dc.app_id = pipeline.app_id and dc.environment_id=pipeline.environment_id and dc.active=true"). Column("pipeline.*", "Environment"). @@ -723,7 +746,7 @@ func (impl PipelineRepositoryImpl) GetArgoPipelineByArgoAppName(argoAppName stri return pipeline, nil } -func (impl PipelineRepositoryImpl) FindActiveByAppIds(appIds []int) (pipelines []*Pipeline, err error) { +func (impl *PipelineRepositoryImpl) FindActiveByAppIds(appIds []int) (pipelines []*Pipeline, err error) { err = impl.dbConnection.Model(&pipelines). Column("pipeline.*", "App", "Environment"). Where("app_id in(?)", pg.In(appIds)). @@ -732,7 +755,10 @@ func (impl PipelineRepositoryImpl) FindActiveByAppIds(appIds []int) (pipelines [ return pipelines, err } -func (impl PipelineRepositoryImpl) FindAppAndEnvironmentAndProjectByPipelineIds(pipelineIds []int) (pipelines []*Pipeline, err error) { +func (impl *PipelineRepositoryImpl) FindAppAndEnvironmentAndProjectByPipelineIds(pipelineIds []int) (pipelines []*Pipeline, err error) { + if len(pipelineIds) == 0 { + return pipelines, nil + } err = impl.dbConnection.Model(&pipelines).Column("pipeline.*", "App", "Environment", "App.Team"). Where("pipeline.id in(?)", pg.In(pipelineIds)). Where("pipeline.deleted = ?", false). @@ -740,7 +766,7 @@ func (impl PipelineRepositoryImpl) FindAppAndEnvironmentAndProjectByPipelineIds( return pipelines, err } -func (impl PipelineRepositoryImpl) FilterDeploymentDeleteRequestedPipelineIds(cdPipelineIds []int) (map[int]bool, error) { +func (impl *PipelineRepositoryImpl) FilterDeploymentDeleteRequestedPipelineIds(cdPipelineIds []int) (map[int]bool, error) { var pipelineIds []int pipelineIdsMap := make(map[int]bool) query := "select pipeline.id from pipeline where pipeline.id in (?) and pipeline.deployment_app_delete_request = ?;" @@ -754,7 +780,7 @@ func (impl PipelineRepositoryImpl) FilterDeploymentDeleteRequestedPipelineIds(cd return pipelineIdsMap, nil } -func (impl PipelineRepositoryImpl) FindDeploymentTypeByPipelineIds(cdPipelineIds []int) (map[int]DeploymentObject, error) { +func (impl *PipelineRepositoryImpl) FindDeploymentTypeByPipelineIds(cdPipelineIds []int) (map[int]DeploymentObject, error) { pipelineIdsMap := make(map[int]DeploymentObject) @@ -785,7 +811,7 @@ func (impl PipelineRepositoryImpl) FindDeploymentTypeByPipelineIds(cdPipelineIds return pipelineIdsMap, nil } -func (impl PipelineRepositoryImpl) UpdateOldCiPipelineIdToNewCiPipelineId(tx *pg.Tx, oldCiPipelineId, newCiPipelineId int) error { +func (impl *PipelineRepositoryImpl) UpdateOldCiPipelineIdToNewCiPipelineId(tx *pg.Tx, oldCiPipelineId, newCiPipelineId int) error { newCiPipId := pointer.Int(newCiPipelineId) if newCiPipelineId == 0 { newCiPipId = nil @@ -796,7 +822,7 @@ func (impl PipelineRepositoryImpl) UpdateOldCiPipelineIdToNewCiPipelineId(tx *pg return err } -func (impl PipelineRepositoryImpl) UpdateCiPipelineId(tx *pg.Tx, pipelineIds []int, ciPipelineId int) error { +func (impl *PipelineRepositoryImpl) UpdateCiPipelineId(tx *pg.Tx, pipelineIds []int, ciPipelineId int) error { if len(pipelineIds) == 0 { return nil } @@ -806,7 +832,7 @@ func (impl PipelineRepositoryImpl) UpdateCiPipelineId(tx *pg.Tx, pipelineIds []i return err } -func (impl PipelineRepositoryImpl) FindWithEnvironmentByCiIds(ctx context.Context, cIPipelineIds []int) ([]*Pipeline, error) { +func (impl *PipelineRepositoryImpl) FindWithEnvironmentByCiIds(ctx context.Context, cIPipelineIds []int) ([]*Pipeline, error) { _, span := otel.Tracer("orchestrator").Start(ctx, "FindWithEnvironmentByCiIds") defer span.End() var cDPipelines []*Pipeline @@ -820,7 +846,7 @@ func (impl PipelineRepositoryImpl) FindWithEnvironmentByCiIds(ctx context.Contex return cDPipelines, nil } -func (impl PipelineRepositoryImpl) FindDeploymentAppTypeByAppIdAndEnvId(appId, envId int) (string, error) { +func (impl *PipelineRepositoryImpl) FindDeploymentAppTypeByAppIdAndEnvId(appId, envId int) (string, error) { var deploymentAppType string err := impl.dbConnection.Model((*Pipeline)(nil)). Column("deployment_app_type"). @@ -829,7 +855,7 @@ func (impl PipelineRepositoryImpl) FindDeploymentAppTypeByAppIdAndEnvId(appId, e return deploymentAppType, err } -func (impl PipelineRepositoryImpl) FindByAppIdToEnvIdsMapping(appIdToEnvIds map[int][]int) ([]*Pipeline, error) { +func (impl *PipelineRepositoryImpl) FindByAppIdToEnvIdsMapping(appIdToEnvIds map[int][]int) ([]*Pipeline, error) { var pipelines []*Pipeline err := impl.dbConnection.Model(&pipelines). WhereGroup(func(query *orm.Query) (*orm.Query, error) { @@ -845,8 +871,42 @@ func (impl PipelineRepositoryImpl) FindByAppIdToEnvIdsMapping(appIdToEnvIds map[ return pipelines, err } -func (impl PipelineRepositoryImpl) FindDeploymentAppTypeByIds(ids []int) (pipelines []*Pipeline, err error) { +func (impl *PipelineRepositoryImpl) FindDeploymentAppTypeByIds(ids []int) (pipelines []*Pipeline, err error) { err = impl.dbConnection.Model(&pipelines).Column("id", "app_id", "env_id", "deployment_app_type"). Where("id in (?)", pg.In(ids)).Where("deleted = ?", false).Select() return pipelines, err } + +func (impl *PipelineRepositoryImpl) GetAllArgoAppInfoByDeploymentAppNames(deploymentAppNames []string) ([]*PipelineDeploymentConfigObj, error) { + result := make([]*PipelineDeploymentConfigObj, 0) + if len(deploymentAppNames) == 0 { + return result, nil + } + err := impl.dbConnection.Model(). + Table("pipeline"). + ColumnExpr("pipeline.deployment_app_name AS deployment_app_name"). + ColumnExpr("pipeline.app_id AS app_id"). + ColumnExpr("pipeline.environment_id AS environment_id"). + // inner join with app + Join("INNER JOIN app"). + JoinOn("pipeline.app_id = app.id"). + // inner join with environment + Join("INNER JOIN environment"). + JoinOn("pipeline.environment_id = environment.id"). + // left join with deployment_config + Join("LEFT JOIN deployment_config"). + JoinOn("pipeline.app_id = deployment_config.app_id"). + JoinOn("pipeline.environment_id = deployment_config.environment_id"). + JoinOn("deployment_config.active = ?", true). + // where conditions + Where("pipeline.deployment_app_name in (?)", pg.In(deploymentAppNames)). + Where("pipeline.deleted = ?", false). + Where("app.active = ?", true). + Where("environment.active = ?", true). + WhereGroup(func(query *orm.Query) (*orm.Query, error) { + return query.WhereOr("pipeline.deployment_app_type = ?", util.PIPELINE_DEPLOYMENT_TYPE_ACD). + WhereOr("deployment_config.deployment_app_type = ?", util.PIPELINE_DEPLOYMENT_TYPE_ACD), nil + }). + Select(&result) + return result, err +} diff --git a/internal/util/ChartTemplateService.go b/internal/util/ChartTemplateService.go index 6f15d3a36f..0fe4098b7d 100644 --- a/internal/util/ChartTemplateService.go +++ b/internal/util/ChartTemplateService.go @@ -47,6 +47,7 @@ const ( ) const ( + PIPELINE_RELEASE_MODE_LINK = "link" PIPELINE_RELEASE_MODE_CREATE = "create" ) diff --git a/pkg/app/AppListingService.go b/pkg/app/AppListingService.go index e9167aff94..ad2659551b 100644 --- a/pkg/app/AppListingService.go +++ b/pkg/app/AppListingService.go @@ -19,12 +19,11 @@ package app import ( "context" "fmt" - "github.com/devtron-labs/common-lib/utils/k8s/health" "github.com/devtron-labs/devtron/api/bean/AppView" - argoApplication "github.com/devtron-labs/devtron/client/argocdServer/bean" "github.com/devtron-labs/devtron/internal/middleware" "github.com/devtron-labs/devtron/internal/sql/repository/app" "github.com/devtron-labs/devtron/internal/sql/repository/pipelineConfig/bean/workflow/cdWorkflow" + read4 "github.com/devtron-labs/devtron/pkg/app/appDetails/read" userrepository "github.com/devtron-labs/devtron/pkg/auth/user/repository" ciConfig "github.com/devtron-labs/devtron/pkg/build/pipeline/read" chartRepoRepository "github.com/devtron-labs/devtron/pkg/chartRepo/repository" @@ -33,7 +32,6 @@ import ( "github.com/devtron-labs/devtron/pkg/deployment/manifest/deploymentTemplate/read" "github.com/devtron-labs/devtron/pkg/dockerRegistry" "github.com/devtron-labs/devtron/pkg/pipeline/constants" - util2 "github.com/devtron-labs/devtron/util" errors2 "github.com/juju/errors" "go.opentelemetry.io/otel" "golang.org/x/exp/slices" @@ -130,6 +128,7 @@ func (req FetchAppListingRequest) GetNamespaceClusterMapping() (namespaceCluster type AppListingServiceImpl struct { Logger *zap.SugaredLogger appRepository app.AppRepository + appDetailsReadService read4.AppDetailsReadService appListingRepository repository.AppListingRepository appListingViewBuilder AppListingViewBuilder pipelineRepository pipelineConfig.PipelineRepository @@ -147,7 +146,9 @@ type AppListingServiceImpl struct { ciPipelineConfigReadService ciConfig.CiPipelineConfigReadService } -func NewAppListingServiceImpl(Logger *zap.SugaredLogger, appListingRepository repository.AppListingRepository, +func NewAppListingServiceImpl(Logger *zap.SugaredLogger, + appListingRepository repository.AppListingRepository, + appDetailsReadService read4.AppDetailsReadService, appRepository app.AppRepository, appListingViewBuilder AppListingViewBuilder, pipelineRepository pipelineConfig.PipelineRepository, linkoutsRepository repository.LinkoutsRepository, cdWorkflowRepository pipelineConfig.CdWorkflowRepository, @@ -157,9 +158,10 @@ func NewAppListingServiceImpl(Logger *zap.SugaredLogger, appListingRepository re deployedAppMetricsService deployedAppMetrics.DeployedAppMetricsService, ciArtifactRepository repository.CiArtifactRepository, envConfigOverrideReadService read.EnvConfigOverrideService, ciPipelineConfigReadService ciConfig.CiPipelineConfigReadService) *AppListingServiceImpl { - serviceImpl := &AppListingServiceImpl{ + return &AppListingServiceImpl{ Logger: Logger, appListingRepository: appListingRepository, + appDetailsReadService: appDetailsReadService, appRepository: appRepository, appListingViewBuilder: appListingViewBuilder, pipelineRepository: pipelineRepository, @@ -176,7 +178,6 @@ func NewAppListingServiceImpl(Logger *zap.SugaredLogger, appListingRepository re envConfigOverrideReadService: envConfigOverrideReadService, ciPipelineConfigReadService: ciPipelineConfigReadService, } - return serviceImpl } const AcdInvalidAppErr = "invalid acd app name and env" @@ -562,190 +563,6 @@ func BuildJobListingResponse(jobContainers []*AppView.JobListingContainer, JobsL return result } -func (impl AppListingServiceImpl) fetchACDAppStatus(fetchAppListingRequest FetchAppListingRequest, existingAppEnvContainers []*AppView.AppEnvironmentContainer) (map[string][]*AppView.AppEnvironmentContainer, error) { - appEnvMapping := make(map[string][]*AppView.AppEnvironmentContainer) - var appNames []string - var appIds []int - var pipelineIds []int - for _, env := range existingAppEnvContainers { - appIds = append(appIds, env.AppId) - if env.EnvironmentName == "" { - continue - } - appName := util2.BuildDeployedAppName(env.AppName, env.EnvironmentName) - appNames = append(appNames, appName) - pipelineIds = append(pipelineIds, env.PipelineId) - } - - appEnvPipelinesMap := make(map[string][]*pipelineConfig.Pipeline) - appEnvCdWorkflowMap := make(map[string]*pipelineConfig.CdWorkflow) - appEnvCdWorkflowRunnerMap := make(map[int][]*pipelineConfig.CdWorkflowRunner) - - // get all the active cd pipelines - if len(pipelineIds) > 0 { - pipelinesAll, err := impl.pipelineRepository.FindByIdsIn(pipelineIds) // TODO - OPTIMIZE 1 - if err != nil && !util.IsErrNoRows(err) { - impl.Logger.Errorw("err", err) - return nil, err - } - // here to build a map of pipelines list for each (appId and envId) - for _, p := range pipelinesAll { - key := fmt.Sprintf("%d-%d", p.AppId, p.EnvironmentId) - if _, ok := appEnvPipelinesMap[key]; !ok { - var appEnvPipelines []*pipelineConfig.Pipeline - appEnvPipelines = append(appEnvPipelines, p) - appEnvPipelinesMap[key] = appEnvPipelines - } else { - appEnvPipelines := appEnvPipelinesMap[key] - appEnvPipelines = append(appEnvPipelines, p) - appEnvPipelinesMap[key] = appEnvPipelines - } - } - - // from all the active pipeline, get all the cd workflow - cdWorkflowAll, err := impl.cdWorkflowRepository.FindLatestCdWorkflowByPipelineIdV2(pipelineIds) // TODO - OPTIMIZE 2 - if err != nil && !util.IsErrNoRows(err) { - impl.Logger.Error(err) - return nil, err - } - // find and build a map of latest cd workflow for each (appId and envId), single latest CDWF for any of the cd pipelines. - var wfIds []int - for key, v := range appEnvPipelinesMap { - if _, ok := appEnvCdWorkflowMap[key]; !ok { - for _, itemW := range cdWorkflowAll { - for _, itemP := range v { - if itemW.PipelineId == itemP.Id { - // GOT LATEST CD WF, AND PUT INTO MAP - appEnvCdWorkflowMap[key] = itemW - wfIds = append(wfIds, itemW.Id) - } - } - } - // if no cd wf found for appid-envid, add it into map with nil - if _, ok := appEnvCdWorkflowMap[key]; !ok { - appEnvCdWorkflowMap[key] = nil - } - } - } - - // fetch all the cd workflow runner from cdWF ids, - cdWorkflowRunnersAll, err := impl.cdWorkflowRepository.FindWorkflowRunnerByCdWorkflowId(wfIds) // TODO - OPTIMIZE 3 - if err != nil { - impl.Logger.Errorw("error in getting wf", "err", err) - } - // build a map with key cdWF containing cdWFRunner List, which are later put in map for further requirement - for _, item := range cdWorkflowRunnersAll { - if _, ok := appEnvCdWorkflowRunnerMap[item.CdWorkflowId]; !ok { - var cdWorkflowRunners []*pipelineConfig.CdWorkflowRunner - cdWorkflowRunners = append(cdWorkflowRunners, item) - appEnvCdWorkflowRunnerMap[item.CdWorkflowId] = cdWorkflowRunners - } else { - appEnvCdWorkflowRunnerMap[item.CdWorkflowId] = append(appEnvCdWorkflowRunnerMap[item.CdWorkflowId], item) - } - } - } - releaseMap, _ := impl.ISLastReleaseStopTypeV2(pipelineIds) - - for _, env := range existingAppEnvContainers { - appKey := strconv.Itoa(env.AppId) + "_" + env.AppName - if _, ok := appEnvMapping[appKey]; !ok { - var appEnvContainers []*AppView.AppEnvironmentContainer - appEnvMapping[appKey] = appEnvContainers - } - - key := fmt.Sprintf("%d-%d", env.AppId, env.EnvironmentId) - pipelines := appEnvPipelinesMap[key] - if len(pipelines) == 0 { - impl.Logger.Debugw("no pipeline found") - appEnvMapping[appKey] = append(appEnvMapping[appKey], env) - continue - } - - latestTriggeredWf := appEnvCdWorkflowMap[key] - if latestTriggeredWf == nil || latestTriggeredWf.Id == 0 { - appEnvMapping[appKey] = append(appEnvMapping[appKey], env) - continue - } - var pipeline *pipelineConfig.Pipeline - for _, p := range pipelines { - if p.Id == latestTriggeredWf.PipelineId { - pipeline = p - break - } - } - var preCdStageRunner, postCdStageRunner, cdStageRunner *pipelineConfig.CdWorkflowRunner - cdStageRunners := appEnvCdWorkflowRunnerMap[latestTriggeredWf.Id] - for _, runner := range cdStageRunners { - if runner.WorkflowType == bean.CD_WORKFLOW_TYPE_PRE { - preCdStageRunner = runner - } else if runner.WorkflowType == bean.CD_WORKFLOW_TYPE_DEPLOY { - cdStageRunner = runner - } else if runner.WorkflowType == bean.CD_WORKFLOW_TYPE_POST { - postCdStageRunner = runner - } - } - - if latestTriggeredWf.WorkflowStatus == cdWorkflow.WF_STARTED || latestTriggeredWf.WorkflowStatus == cdWorkflow.WF_UNKNOWN { - if pipeline.PreStageConfig != "" { - if preCdStageRunner != nil && preCdStageRunner.Id != 0 { - env.PreStageStatus = &preCdStageRunner.Status - } else { - status := "" - env.PreStageStatus = &status - } - } - if pipeline.PostStageConfig != "" { - if postCdStageRunner != nil && postCdStageRunner.Id != 0 { - env.PostStageStatus = &postCdStageRunner.Status - } else { - status := "" - env.PostStageStatus = &status - } - } - if cdStageRunner != nil { - status := cdStageRunner.Status - if status == string(health.HealthStatusHealthy) { - stopType := releaseMap[pipeline.Id] - if stopType { - status = argoApplication.HIBERNATING - env.Status = status - } - } - env.CdStageStatus = &status - - } else { - status := "" - env.CdStageStatus = &status - } - } else { - if pipeline.PreStageConfig != "" { - if preCdStageRunner != nil && preCdStageRunner.Id != 0 { - var status string = latestTriggeredWf.WorkflowStatus.String() - env.PreStageStatus = &status - } else { - status := "" - env.PreStageStatus = &status - } - } - if pipeline.PostStageConfig != "" { - if postCdStageRunner != nil && postCdStageRunner.Id != 0 { - var status string = latestTriggeredWf.WorkflowStatus.String() - env.PostStageStatus = &status - } else { - status := "" - env.PostStageStatus = &status - } - } - var status string = latestTriggeredWf.WorkflowStatus.String() - - env.CdStageStatus = &status - } - - appEnvMapping[appKey] = append(appEnvMapping[appKey], env) - } - return appEnvMapping, nil -} - func (impl AppListingServiceImpl) fetchACDAppStatusV2(fetchAppListingRequest FetchAppListingRequest, existingAppEnvContainers []*AppView.AppEnvironmentContainer) (map[string][]*AppView.AppEnvironmentContainer, error) { appEnvMapping := make(map[string][]*AppView.AppEnvironmentContainer) for _, env := range existingAppEnvContainers { @@ -756,7 +573,7 @@ func (impl AppListingServiceImpl) fetchACDAppStatusV2(fetchAppListingRequest Fet } func (impl AppListingServiceImpl) FetchAppDetails(ctx context.Context, appId int, envId int) (AppView.AppDetailContainer, error) { - appDetailContainer, err := impl.appListingRepository.FetchAppDetail(ctx, appId, envId) + appDetailContainer, err := impl.appDetailsReadService.FetchAppDetail(ctx, appId, envId) if err != nil { impl.Logger.Errorw("error in fetching app detail", "error", err) return AppView.AppDetailContainer{}, err @@ -828,7 +645,7 @@ func (impl AppListingServiceImpl) FetchAppTriggerView(appId int) ([]AppView.Trig } func (impl AppListingServiceImpl) FetchAppStageStatus(appId int, appType int) ([]AppView.AppStageStatus, error) { - appStageStatuses, err := impl.appListingRepository.FetchAppStageStatus(appId, appType) + appStageStatuses, err := impl.appDetailsReadService.FetchAppStageStatus(appId, appType) return appStageStatuses, err } diff --git a/pkg/app/AppService.go b/pkg/app/AppService.go index 077688bb16..5bb33db0c8 100644 --- a/pkg/app/AppService.go +++ b/pkg/app/AppService.go @@ -26,6 +26,7 @@ import ( "github.com/devtron-labs/devtron/internal/sql/repository/pipelineConfig/adapter/cdWorkflow" "github.com/devtron-labs/devtron/internal/sql/repository/pipelineConfig/bean/timelineStatus" cdWorkflow2 "github.com/devtron-labs/devtron/internal/sql/repository/pipelineConfig/bean/workflow/cdWorkflow" + internalUtil "github.com/devtron-labs/devtron/internal/util" bean3 "github.com/devtron-labs/devtron/pkg/app/bean" installedAppReader "github.com/devtron-labs/devtron/pkg/appStore/installedApp/read" "github.com/devtron-labs/devtron/pkg/argoApplication/helper" @@ -39,10 +40,7 @@ import ( "github.com/devtron-labs/devtron/pkg/deployment/manifest/deploymentTemplate/read" bean4 "github.com/devtron-labs/devtron/pkg/deployment/trigger/devtronApps/bean" "github.com/devtron-labs/devtron/pkg/workflow/cd" - "io/ioutil" "net/url" - "path" - "path/filepath" "strconv" "time" @@ -57,7 +55,6 @@ import ( "github.com/devtron-labs/devtron/internal/sql/repository/app" "github.com/devtron-labs/devtron/internal/sql/repository/chartConfig" "github.com/devtron-labs/devtron/internal/sql/repository/pipelineConfig" - . "github.com/devtron-labs/devtron/internal/util" status2 "github.com/devtron-labs/devtron/pkg/app/status" "github.com/devtron-labs/devtron/pkg/appStatus" repository4 "github.com/devtron-labs/devtron/pkg/appStore/installedApp/repository" @@ -66,8 +63,8 @@ import ( "github.com/devtron-labs/devtron/pkg/sql" "github.com/devtron-labs/devtron/pkg/variables" _ "github.com/devtron-labs/devtron/pkg/variables/repository" - util2 "github.com/devtron-labs/devtron/util" - util "github.com/devtron-labs/devtron/util/event" + globalUtil "github.com/devtron-labs/devtron/util" + eventUtil "github.com/devtron-labs/devtron/util/event" "github.com/go-pg/pg" "go.uber.org/zap" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -99,7 +96,7 @@ func GetAppServiceConfig() (*AppServiceConfig, error) { type AppServiceImpl struct { pipelineOverrideRepository chartConfig.PipelineOverrideRepository - mergeUtil *MergeUtil + mergeUtil *internalUtil.MergeUtil logger *zap.SugaredLogger pipelineRepository pipelineConfig.PipelineRepository eventClient client.EventClient @@ -109,7 +106,7 @@ type AppServiceImpl struct { chartRepository chartRepoRepository.ChartRepository cdWorkflowRepository pipelineConfig.CdWorkflowRepository commonService commonService.CommonService - chartTemplateService ChartTemplateService + chartTemplateService internalUtil.ChartTemplateService pipelineStatusTimelineRepository pipelineConfig.PipelineStatusTimelineRepository pipelineStatusTimelineResourcesService status2.PipelineStatusTimelineResourcesService pipelineStatusSyncDetailService status2.PipelineStatusSyncDetailService @@ -133,10 +130,9 @@ type AppService interface { UpdateReleaseStatus(request *bean.ReleaseStatusUpdateRequest) (bool, error) GetConfigMapAndSecretJson(appId int, envId int, pipelineId int) ([]byte, error) UpdateCdWorkflowRunnerByACDObject(app *v1alpha1.Application, cdWfrId int, updateTimedOutStatus bool) error - UpdateDeploymentStatusForGitOpsPipelines(app *v1alpha1.Application, statusTime time.Time, isAppStore bool) (bool, bool, *chartConfig.PipelineOverride, error) + UpdateDeploymentStatusForGitOpsPipelines(app *v1alpha1.Application, applicationClusterId int, statusTime time.Time, isAppStore bool) (bool, bool, *chartConfig.PipelineOverride, error) WriteCDSuccessEvent(appId int, envId int, override *chartConfig.PipelineOverride) - CreateGitOpsRepo(app *app.App, userId int32) (gitopsRepoName string, chartGitAttr *commonBean.ChartGitAttribute, err error) - GetDeployedManifestByPipelineIdAndCDWorkflowId(appId int, envId int, cdWorkflowId int, ctx context.Context) ([]byte, error) + CreateGitOpsRepo(app *app.App, targetRevision string, userId int32) (gitopsRepoName string, chartGitAttr *commonBean.ChartGitAttribute, err error) // TODO: move inside reader service GetActiveCiCdAppsCount() (int, error) @@ -145,7 +141,7 @@ type AppService interface { func NewAppService( pipelineOverrideRepository chartConfig.PipelineOverrideRepository, - mergeUtil *MergeUtil, logger *zap.SugaredLogger, + mergeUtil *internalUtil.MergeUtil, logger *zap.SugaredLogger, pipelineRepository pipelineConfig.PipelineRepository, eventClient client.EventClient, eventFactory client.EventFactory, appRepository app.AppRepository, @@ -153,7 +149,7 @@ func NewAppService( chartRepository chartRepoRepository.ChartRepository, cdWorkflowRepository pipelineConfig.CdWorkflowRepository, commonService commonService.CommonService, - chartTemplateService ChartTemplateService, + chartTemplateService internalUtil.ChartTemplateService, cdPipelineStatusTimelineRepo pipelineConfig.PipelineStatusTimelineRepository, pipelineStatusTimelineResourcesService status2.PipelineStatusTimelineResourcesService, pipelineStatusSyncDetailService status2.PipelineStatusSyncDetailService, @@ -247,7 +243,7 @@ func (impl *AppServiceImpl) ComputeAppstatus(appId, envId int, status health2.He return appStatusInternal, nil } -func (impl *AppServiceImpl) UpdateDeploymentStatusForGitOpsPipelines(app *v1alpha1.Application, statusTime time.Time, isAppStore bool) (bool, bool, *chartConfig.PipelineOverride, error) { +func (impl *AppServiceImpl) UpdateDeploymentStatusForGitOpsPipelines(app *v1alpha1.Application, applicationClusterId int, statusTime time.Time, isAppStore bool) (bool, bool, *chartConfig.PipelineOverride, error) { isSucceeded := false isTimelineUpdated := false isTimelineTimedOut := false @@ -261,7 +257,7 @@ func (impl *AppServiceImpl) UpdateDeploymentStatusForGitOpsPipelines(app *v1alph var isValid bool var cdPipeline pipelineConfig.Pipeline var cdWfr pipelineConfig.CdWorkflowRunner - isValid, cdPipeline, cdWfr, pipelineOverride, err = impl.CheckIfPipelineUpdateEventIsValid(app.Name, gitHash) + isValid, cdPipeline, cdWfr, pipelineOverride, err = impl.CheckIfPipelineUpdateEventIsValid(app, applicationClusterId, gitHash) if err != nil { impl.logger.Errorw("service err, CheckIfPipelineUpdateEventIsValid", "err", err) return isSucceeded, isTimelineUpdated, pipelineOverride, err @@ -427,7 +423,7 @@ func (impl *AppServiceImpl) CheckIfPipelineUpdateEventIsValidForAppStore(gitOpsA return isValid, installedAppVersionHistory, appId, envId, nil } } - if util2.IsTerminalRunnerStatus(installedAppVersionHistory.Status) { + if globalUtil.IsTerminalRunnerStatus(installedAppVersionHistory.Status) { // drop event return isValid, installedAppVersionHistory, appId, envId, nil } @@ -441,18 +437,24 @@ func (impl *AppServiceImpl) CheckIfPipelineUpdateEventIsValidForAppStore(gitOpsA return isValid, installedAppVersionHistory, appId, envId, err } -func (impl *AppServiceImpl) CheckIfPipelineUpdateEventIsValid(argoAppName, gitHash string) (bool, pipelineConfig.Pipeline, pipelineConfig.CdWorkflowRunner, *chartConfig.PipelineOverride, error) { +func (impl *AppServiceImpl) CheckIfPipelineUpdateEventIsValid(app *v1alpha1.Application, applicationClusterId int, gitHash string) (bool, pipelineConfig.Pipeline, pipelineConfig.CdWorkflowRunner, *chartConfig.PipelineOverride, error) { isValid := false var err error // var deploymentStatus repository.DeploymentStatus var pipeline pipelineConfig.Pipeline var pipelineOverride *chartConfig.PipelineOverride var cdWfr pipelineConfig.CdWorkflowRunner - pipeline, err = impl.pipelineRepository.GetArgoPipelineByArgoAppName(argoAppName) + argoAppName := app.Name + pipelines, err := impl.pipelineRepository.GetArgoPipelineByArgoAppName(argoAppName) if err != nil { impl.logger.Errorw("error in getting cd pipeline by argoAppName", "err", err, "argoAppName", argoAppName) return isValid, pipeline, cdWfr, pipelineOverride, err } + pipeline, err = impl.deploymentConfigService.FilterPipelinesByApplicationClusterIdAndNamespace(pipelines, applicationClusterId, app.Namespace) + if err != nil { + impl.logger.Errorw("error in getting cd pipeline by applicationClusterId", "err", err, "applicationClusterId", applicationClusterId) + return isValid, pipeline, cdWfr, pipelineOverride, err + } // getting latest pipelineOverride for app (by appId and envId) pipelineOverride, err = impl.pipelineOverrideRepository.FindLatestByAppIdAndEnvId(pipeline.AppId, pipeline.EnvironmentId, bean4.ArgoCd) if err != nil { @@ -476,11 +478,16 @@ func (impl *AppServiceImpl) CheckIfPipelineUpdateEventIsValid(argoAppName, gitHa impl.logger.Errorw("error in getting latest wfr by pipelineId", "err", err, "pipelineId", pipeline.Id) return isValid, pipeline, cdWfr, pipelineOverride, err } - if util2.IsTerminalRunnerStatus(cdWfr.Status) { + if globalUtil.IsTerminalRunnerStatus(cdWfr.Status) { // drop event return isValid, pipeline, cdWfr, pipelineOverride, nil } - if impl.acdConfig.IsManualSyncEnabled() { + deploymentConfig, err := impl.deploymentConfigService.GetConfigForDevtronApps(pipeline.AppId, pipeline.EnvironmentId) + if err != nil { + impl.logger.Errorw("error in getting deployment config by appId and environmentId", "appId", pipeline.AppId, "environmentId", pipeline.EnvironmentId, "err", err) + return isValid, pipeline, cdWfr, pipelineOverride, err + } + if impl.acdConfig.IsManualSyncEnabled() && deploymentConfig.IsArgoAppSyncAndRefreshSupported() { // if manual sync, proceed only if ARGOCD_SYNC_COMPLETED timeline is created isArgoAppSynced := impl.pipelineStatusTimelineService.GetArgoAppSyncStatus(cdWfr.Id) if !isArgoAppSynced { @@ -755,7 +762,7 @@ func (impl *AppServiceImpl) UpdatePipelineStatusTimelineForApplicationChanges(ap } func (impl *AppServiceImpl) WriteCDSuccessEvent(appId int, envId int, override *chartConfig.PipelineOverride) { - event, _ := impl.eventFactory.Build(util.Success, &override.PipelineId, appId, &envId, util.CD) + event, _ := impl.eventFactory.Build(eventUtil.Success, &override.PipelineId, appId, &envId, eventUtil.CD) impl.logger.Debugw("event WriteCDSuccessEvent", "event", event, "override", override) event = impl.eventFactory.BuildExtraCDData(event, nil, override.Id, bean.CD_WORKFLOW_TYPE_DEPLOY) _, evtErr := impl.eventClient.WriteNotificationEvent(event) @@ -783,63 +790,20 @@ type ValuesOverrideResponse struct { ManifestPushTemplate *bean3.ManifestPushTemplate } -func (impl *AppServiceImpl) GetDeployedManifestByPipelineIdAndCDWorkflowId(appId int, envId int, cdWorkflowId int, ctx context.Context) ([]byte, error) { - - manifestByteArray := make([]byte, 0) +func (impl *AppServiceImpl) CreateGitOpsRepo(app *app.App, targetRevision string, userId int32) (gitOpsRepoName string, chartGitAttr *commonBean.ChartGitAttribute, err error) { - pipeline, err := impl.pipelineRepository.FindActiveByAppIdAndEnvironmentId(appId, envId) + deploymentConfig, err := impl.deploymentConfigService.GetConfigForDevtronApps(app.Id, 0) if err != nil { - impl.logger.Errorw("error in fetching pipeline by appId and envId", "appId", appId, "envId", envId, "err", err) - return manifestByteArray, err - } - - pipelineOverride, err := impl.pipelineOverrideRepository.FindLatestByCdWorkflowId(cdWorkflowId) - if err != nil { - impl.logger.Errorw("error in fetching latest release by appId and envId", "appId", appId, "envId", envId, "err", err) - return manifestByteArray, err - } - - envConfigOverride, err := impl.envConfigOverrideReadService.GetByIdIncludingInactive(pipelineOverride.EnvConfigOverrideId) - if err != nil { - impl.logger.Errorw("error in fetching env config repository by appId and envId", "appId", appId, "envId", envId, "err", err) - } - - appName := pipeline[0].App.AppName - builtChartPath, err := impl.deploymentTemplateService.BuildChartAndGetPath(appName, envConfigOverride, ctx) - if err != nil { - impl.logger.Errorw("error in parsing reference chart", "err", err) - return manifestByteArray, err - } - - // create values file in built chart path - valuesFilePath := path.Join(builtChartPath, "valuesOverride.yaml") - err = ioutil.WriteFile(valuesFilePath, []byte(pipelineOverride.PipelineMergedValues), 0600) - if err != nil { - return manifestByteArray, nil - } - - manifestByteArray, err = impl.chartTemplateService.LoadChartInBytes(builtChartPath, true) - if err != nil { - impl.logger.Errorw("error in converting chart to bytes", "err", err) - return manifestByteArray, err - } - - return manifestByteArray, nil - -} - -func (impl *AppServiceImpl) CreateGitOpsRepo(app *app.App, userId int32) (gitopsRepoName string, chartGitAttr *commonBean.ChartGitAttribute, err error) { - chart, err := impl.chartRepository.FindLatestChartForAppByAppId(app.Id) - if err != nil && pg.ErrNoRows != err { + impl.logger.Errorw("error in getting deployment config for devtron apps", "appId", app.Id, "err", err) return "", nil, err } - gitOpsRepoName := impl.gitOpsConfigReadService.GetGitOpsRepoName(app.AppName) - chartGitAttr, err = impl.gitOperationService.CreateGitRepositoryForDevtronApp(context.Background(), gitOpsRepoName, userId) + gitOpsRepoName = impl.gitOpsConfigReadService.GetGitOpsRepoName(app.AppName) + chartGitAttr, err = impl.gitOperationService.CreateGitRepositoryForDevtronApp(context.Background(), gitOpsRepoName, targetRevision, userId) if err != nil { impl.logger.Errorw("error in pushing chart to git ", "gitOpsRepoName", gitOpsRepoName, "err", err) return "", nil, err } - chartGitAttr.ChartLocation = filepath.Join(chart.ReferenceTemplate, chart.ChartVersion) + chartGitAttr.ChartLocation = deploymentConfig.GetChartLocation() return gitOpsRepoName, chartGitAttr, nil } @@ -914,7 +878,7 @@ type PipelineMaterialInfo struct { func buildCDTriggerEvent(impl *AppServiceImpl, overrideRequest *bean.ValuesOverrideRequest, pipeline *pipelineConfig.Pipeline, envOverride *chartConfig.EnvConfigOverride, materialInfo map[string]string, artifact *repository.CiArtifact) client.Event { - event, _ := impl.eventFactory.Build(util.Trigger, &pipeline.Id, pipeline.AppId, &pipeline.EnvironmentId, util.CD) + event, _ := impl.eventFactory.Build(eventUtil.Trigger, &pipeline.Id, pipeline.AppId, &pipeline.EnvironmentId, eventUtil.CD) return event } @@ -961,7 +925,7 @@ func NewReleaseAttributes(image, imageTag, pipelineName, deploymentStrategy stri } func (releaseAttributes *ReleaseAttributes) RenderJson(jsonTemplate string) (string, error) { - override, err := util2.Tprintf(jsonTemplate, releaseAttributes) + override, err := globalUtil.Tprintf(jsonTemplate, releaseAttributes) return override, err } @@ -1021,7 +985,7 @@ func (impl *AppServiceImpl) UpdateCdWorkflowRunnerByACDObject(app *v1alpha1.Appl impl.logger.Errorw("error in fetching environment deployment config by appId and envId", "appId", appId, "envId", envId, "err", err) return err } - util2.TriggerCDMetrics(cdWorkflow.GetTriggerMetricsFromRunnerObj(wfr, envDeploymentConfig), impl.appStatusConfig.ExposeCDMetrics) + globalUtil.TriggerCDMetrics(cdWorkflow.GetTriggerMetricsFromRunnerObj(wfr, envDeploymentConfig), impl.appStatusConfig.ExposeCDMetrics) return nil } diff --git a/pkg/app/appDetails/adapter/adapter.go b/pkg/app/appDetails/adapter/adapter.go new file mode 100644 index 0000000000..9dfc0c991a --- /dev/null +++ b/pkg/app/appDetails/adapter/adapter.go @@ -0,0 +1,166 @@ +/* + * Copyright (c) 2020-2024. Devtron Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package adapter + +import ( + "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1" + "github.com/argoproj/gitops-engine/pkg/health" + "github.com/devtron-labs/common-lib/utils/k8sObjectsUtil" + "github.com/devtron-labs/devtron/api/helm-app/gRPC" + argoApplication "github.com/devtron-labs/devtron/client/argocdServer/bean" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "time" +) + +func GetArgoPodMetadata(podMetadatas []*gRPC.PodMetadata) []*argoApplication.PodMetadata { + if len(podMetadatas) == 0 { + return []*argoApplication.PodMetadata{} + } + resp := make([]*argoApplication.PodMetadata, 0, len(podMetadatas)) + for _, podMetadata := range podMetadatas { + argoPodMetadata := &argoApplication.PodMetadata{ + Name: podMetadata.Name, + UID: podMetadata.Uid, + Containers: GetArgoContainers(podMetadata.Containers), + InitContainers: GetArgoContainers(podMetadata.InitContainers), + IsNew: podMetadata.IsNew, + EphemeralContainers: GetArgoEphemeralContainers(podMetadata.EphemeralContainers), + } + resp = append(resp, argoPodMetadata) + } + return resp +} + +func GetArgoEphemeralContainers(ephemeralContainers []*gRPC.EphemeralContainerData) []*k8sObjectsUtil.EphemeralContainerData { + if len(ephemeralContainers) == 0 { + return []*k8sObjectsUtil.EphemeralContainerData{} + } + resp := make([]*k8sObjectsUtil.EphemeralContainerData, 0, len(ephemeralContainers)) + for _, ephemeralContainer := range ephemeralContainers { + argoEphemeralContainerData := &k8sObjectsUtil.EphemeralContainerData{ + Name: ephemeralContainer.GetName(), + IsExternal: ephemeralContainer.GetIsExternal(), + } + resp = append(resp, argoEphemeralContainerData) + } + return resp +} + +func GetArgoContainers(containers []string) []*string { + if len(containers) == 0 { + return []*string{} + } + resp := make([]*string, 0, len(containers)) + for _, container := range containers { + argoContainer := &container + resp = append(resp, argoContainer) + } + return resp +} + +func GetArgoApplicationTreeForNodes(nodes []*gRPC.ResourceNode) (*v1alpha1.ApplicationTree, error) { + if len(nodes) == 0 { + return &v1alpha1.ApplicationTree{}, nil + } + argoNodes := make([]v1alpha1.ResourceNode, 0, len(nodes)) + for _, node := range nodes { + argoResourceNode := v1alpha1.ResourceNode{ + ResourceRef: v1alpha1.ResourceRef{ + Group: node.Group, + Version: node.Version, + Kind: node.Kind, + Namespace: node.Namespace, + Name: node.Name, + UID: node.Uid, + }, + ParentRefs: GetArgoParentRefs(node.GetParentRefs()), + Info: GetArgoInfoItems(node.GetInfo()), + NetworkingInfo: GetArgoNetworkingInfo(node.GetNetworkingInfo()), + ResourceVersion: node.ResourceVersion, + Images: nil, //TODO: do we use this?? and set this to null ?? + Health: GetArgoHealthStatus(node.Health), + } + createdAtTime, err := time.Parse(time.RFC3339, node.CreatedAt) + if err != nil { + return nil, err + } + argoResourceNode.CreatedAt = &metav1.Time{createdAtTime} + argoNodes = append(argoNodes, argoResourceNode) + } + return &v1alpha1.ApplicationTree{ + Nodes: argoNodes, + }, nil +} + +func GetArgoHealthStatus(status *gRPC.HealthStatus) *v1alpha1.HealthStatus { + if status == nil { + return nil + } + return &v1alpha1.HealthStatus{ + Status: health.HealthStatusCode(status.GetStatus()), + Message: status.GetMessage(), + } + +} + +func GetArgoNetworkingInfo(info *gRPC.ResourceNetworkingInfo) *v1alpha1.ResourceNetworkingInfo { + if info == nil { + return &v1alpha1.ResourceNetworkingInfo{} + } + return &v1alpha1.ResourceNetworkingInfo{ + Labels: info.GetLabels(), + } + +} + +func GetArgoInfoItems(infoItems []*gRPC.InfoItem) []v1alpha1.InfoItem { + if len(infoItems) == 0 { + return []v1alpha1.InfoItem{} + } + resp := make([]v1alpha1.InfoItem, 0, len(infoItems)) + for _, infoItem := range infoItems { + argoInfoItem := v1alpha1.InfoItem{ + Name: infoItem.Name, + Value: infoItem.Value, + } + resp = append(resp, argoInfoItem) + + } + return resp + +} + +func GetArgoParentRefs(parentRefs []*gRPC.ResourceRef) []v1alpha1.ResourceRef { + if len(parentRefs) == 0 { + return []v1alpha1.ResourceRef{} + } + + resp := make([]v1alpha1.ResourceRef, 0, len(parentRefs)) + + for _, parentRef := range parentRefs { + ref := v1alpha1.ResourceRef{ + Group: parentRef.Group, + Version: parentRef.Version, + Kind: parentRef.Kind, + Namespace: parentRef.Namespace, + Name: parentRef.Name, + UID: parentRef.Uid, + } + resp = append(resp, ref) + } + return resp +} diff --git a/pkg/app/appDetails/read/AppDetailsReadService.go b/pkg/app/appDetails/read/AppDetailsReadService.go new file mode 100644 index 0000000000..d3b4014564 --- /dev/null +++ b/pkg/app/appDetails/read/AppDetailsReadService.go @@ -0,0 +1,154 @@ +package read + +import ( + "context" + "github.com/devtron-labs/devtron/api/bean/AppView" + "github.com/devtron-labs/devtron/api/bean/gitOps" + internalRepo "github.com/devtron-labs/devtron/internal/sql/repository" + "github.com/devtron-labs/devtron/internal/sql/repository/appWorkflow" + "github.com/devtron-labs/devtron/pkg/deployment/common/read" + "github.com/devtron-labs/devtron/pkg/deployment/gitOps/config" + "github.com/go-pg/pg" + "go.opentelemetry.io/otel" + "go.uber.org/zap" +) + +type AppDetailsReadService interface { + FetchAppStageStatus(appId int, appType int) ([]AppView.AppStageStatus, error) + FetchAppDetail(ctx context.Context, appId int, envId int) (AppView.AppDetailContainer, error) +} + +type AppDetailsReadServiceImpl struct { + dbConnection *pg.DB + Logger *zap.SugaredLogger + gitOpsConfigReadService config.GitOpsConfigReadService + deploymentConfigReadService read.DeploymentConfigReadService + appWorkflowRepository appWorkflow.AppWorkflowRepository + appListingRepository internalRepo.AppListingRepository +} + +func NewAppDetailsReadServiceImpl( + dbConnection *pg.DB, + Logger *zap.SugaredLogger, + gitOpsConfigReadService config.GitOpsConfigReadService, + deploymentConfigReadService read.DeploymentConfigReadService, + appWorkflowRepository appWorkflow.AppWorkflowRepository, + appListingRepository internalRepo.AppListingRepository, +) *AppDetailsReadServiceImpl { + return &AppDetailsReadServiceImpl{ + dbConnection: dbConnection, + Logger: Logger, + gitOpsConfigReadService: gitOpsConfigReadService, + deploymentConfigReadService: deploymentConfigReadService, + appWorkflowRepository: appWorkflowRepository, + appListingRepository: appListingRepository, + } +} + +func (impl *AppDetailsReadServiceImpl) FetchAppStageStatus(appId int, appType int) ([]AppView.AppStageStatus, error) { + impl.Logger.Debugw("FetchAppStageStatus request received", "appId", appId, "appType", appType) + var appStageStatus []AppView.AppStageStatus + stages, err := impl.appListingRepository.FetchAppStage(appId, appType) + if err != nil { + impl.Logger.Errorw("error while fetching app stage", "appId", appId, "err", err) + return appStageStatus, err + } + isCustomGitopsRepoUrl := false + gitOpsConfigStatus, err := impl.gitOpsConfigReadService.IsGitOpsConfigured() + if err != nil { + impl.Logger.Errorw("error while checking IsGitOpsConfigured", "err", err) + return nil, err + } + if gitOpsConfigStatus.IsArgoCdInstalled && gitOpsConfigStatus.AllowCustomRepository { + isCustomGitopsRepoUrl = true + } + + deploymentConfigMin, err := impl.deploymentConfigReadService.GetDeploymentConfigMinForAppAndEnv(appId, 0) + if err != nil { + impl.Logger.Errorw("error while getting deploymentConfig", "appId", appId, "err", err) + return appStageStatus, err + } + + if deploymentConfigMin != nil { + stages.DeploymentConfigRepoURL = deploymentConfigMin.GitRepoUrl + } + + if (gitOps.IsGitOpsRepoNotConfigured(stages.ChartGitRepoUrl) && + gitOps.IsGitOpsRepoNotConfigured(stages.DeploymentConfigRepoURL)) && + stages.CiPipelineId == 0 { + + stages.ChartGitRepoUrl = "" + stages.DeploymentConfigRepoURL = "" + } + appStageStatus = append(appStageStatus, impl.makeAppStageStatus(0, "APP", stages.AppId, true), + impl.makeAppStageStatus(1, "MATERIAL", stages.GitMaterialExists, true), + impl.makeAppStageStatus(2, "TEMPLATE", stages.CiTemplateId, true), + impl.makeAppStageStatus(3, "CI_PIPELINE", stages.CiPipelineId, true), + impl.makeAppStageStatus(4, "CHART", stages.ChartId, true), + impl.makeAppStageStatus(5, "GITOPS_CONFIG", len(stages.ChartGitRepoUrl)+len(stages.DeploymentConfigRepoURL), isCustomGitopsRepoUrl), + impl.makeAppStageStatus(6, "CD_PIPELINE", stages.PipelineId, true), + impl.makeAppStageChartEnvConfigStatus(7, "CHART_ENV_CONFIG", stages.YamlStatus == 3 && stages.YamlReviewed), + ) + return appStageStatus, nil +} + +func (impl *AppDetailsReadServiceImpl) FetchAppDetail(ctx context.Context, appId int, envId int) (AppView.AppDetailContainer, error) { + impl.Logger.Debugw("FetchAppDetail request received", "appId", appId, "envId", envId) + var appDetailContainer AppView.AppDetailContainer + newCtx, span := otel.Tracer("orchestrator").Start(ctx, "AppDetailsReadServiceImpl.FetchAppDetail") + defer span.End() + // Fetch deployment detail of cd pipeline latest triggered within env of any App. + deploymentDetail, err := impl.deploymentDetailsByAppIdAndEnvId(newCtx, appId, envId) + if err != nil { + impl.Logger.Warn("unable to fetch deployment detail for app") + } + if deploymentDetail.PcoId > 0 { + deploymentDetail.IsPipelineTriggered = true + } + appWfMapping, _ := impl.appWorkflowRepository.FindWFCDMappingByCDPipelineId(deploymentDetail.CdPipelineId) + if appWfMapping.ParentType == appWorkflow.CDPIPELINE { + parentEnvironmentName, _ := impl.appListingRepository.GetEnvironmentNameFromPipelineId(appWfMapping.ParentId) + deploymentDetail.ParentEnvironmentName = parentEnvironmentName + } + appDetailContainer.DeploymentDetailContainer = deploymentDetail + return appDetailContainer, nil +} + +// deploymentDetailsByAppIdAndEnvId It will return the deployment detail of any cd pipeline which is latest triggered for Environment of any App +func (impl *AppDetailsReadServiceImpl) deploymentDetailsByAppIdAndEnvId(ctx context.Context, appId int, envId int) (AppView.DeploymentDetailContainer, error) { + _, span := otel.Tracer("orchestrator").Start(ctx, "AppDetailsReadServiceImpl.deploymentDetailsByAppIdAndEnvId") + defer span.End() + deploymentDetail, err := impl.appListingRepository.GetDeploymentDetailsByAppIdAndEnvId(appId, envId) + if err != nil { + impl.Logger.Errorw("error in getting deployment details by appId and envId", "appId", appId, "envId", envId, "err", err) + return deploymentDetail, err + } + deploymentDetail.EnvironmentId = envId + deploymentConfigMin, err := impl.deploymentConfigReadService.GetDeploymentConfigMinForAppAndEnv(appId, envId) + if err != nil { + impl.Logger.Errorw("error in getting deployment config by appId and envId", "appId", appId, "envId", envId, "err", err) + return deploymentDetail, err + } + deploymentDetail.DeploymentAppType = deploymentConfigMin.DeploymentAppType + deploymentDetail.ReleaseMode = deploymentConfigMin.ReleaseMode + return deploymentDetail, nil +} + +func (impl *AppDetailsReadServiceImpl) makeAppStageChartEnvConfigStatus(stage int, stageName string, status bool) AppView.AppStageStatus { + return AppView.AppStageStatus{Stage: stage, StageName: stageName, Status: status, Required: true} +} + +func (impl *AppDetailsReadServiceImpl) makeAppStageStatus(stage int, stageName string, id int, isRequired bool) AppView.AppStageStatus { + return AppView.AppStageStatus{ + Stage: stage, + StageName: stageName, + Status: func() bool { + if id > 0 { + return true + } else { + return false + } + }(), + Required: isRequired, + } +} diff --git a/pkg/app/bean/ManifestPushTemplate.go b/pkg/app/bean/ManifestPushTemplate.go index af6353a717..3439adafb1 100644 --- a/pkg/app/bean/ManifestPushTemplate.go +++ b/pkg/app/bean/ManifestPushTemplate.go @@ -30,16 +30,20 @@ type ManifestPushTemplate struct { UserId int32 PipelineOverrideId int AppName string - TargetEnvironmentName int + TargetEnvironmentId int ChartReferenceTemplate string ChartName string ChartVersion string ChartLocation string RepoUrl string + TargetRevision string + ValuesFilePath string + ReleaseMode string IsCustomGitRepository bool BuiltChartPath string BuiltChartBytes *[]byte MergedValues string + IsArgoSyncSupported bool } type ManifestPushResponse struct { diff --git a/pkg/appClone/AppCloneService.go b/pkg/appClone/AppCloneService.go index 44a1d7a409..4c7fd14bfc 100644 --- a/pkg/appClone/AppCloneService.go +++ b/pkg/appClone/AppCloneService.go @@ -34,11 +34,14 @@ import ( "github.com/devtron-labs/devtron/pkg/bean" pipeline2 "github.com/devtron-labs/devtron/pkg/build/pipeline" "github.com/devtron-labs/devtron/pkg/chart" + bean5 "github.com/devtron-labs/devtron/pkg/chart/bean" + read3 "github.com/devtron-labs/devtron/pkg/chart/read" "github.com/devtron-labs/devtron/pkg/deployment/gitOps/config" "github.com/devtron-labs/devtron/pkg/pipeline" bean3 "github.com/devtron-labs/devtron/pkg/pipeline/bean" "github.com/go-pg/pg" "go.uber.org/zap" + "net/http" "strings" ) @@ -61,6 +64,7 @@ type AppCloneServiceImpl struct { pipelineRepository pipelineConfig.PipelineRepository ciPipelineConfigService pipeline.CiPipelineConfigService gitOpsConfigReadService config.GitOpsConfigReadService + chartReadService read3.ChartReadService } func NewAppCloneServiceImpl(logger *zap.SugaredLogger, @@ -75,7 +79,8 @@ func NewAppCloneServiceImpl(logger *zap.SugaredLogger, appRepository app2.AppRepository, ciPipelineRepository pipelineConfig.CiPipelineRepository, pipelineRepository pipelineConfig.PipelineRepository, ciPipelineConfigService pipeline.CiPipelineConfigService, - gitOpsConfigReadService config.GitOpsConfigReadService) *AppCloneServiceImpl { + gitOpsConfigReadService config.GitOpsConfigReadService, + chartReadService read3.ChartReadService) *AppCloneServiceImpl { return &AppCloneServiceImpl{ logger: logger, pipelineBuilder: pipelineBuilder, @@ -92,6 +97,7 @@ func NewAppCloneServiceImpl(logger *zap.SugaredLogger, pipelineRepository: pipelineRepository, ciPipelineConfigService: ciPipelineConfigService, gitOpsConfigReadService: gitOpsConfigReadService, + chartReadService: chartReadService, } } @@ -331,13 +337,13 @@ func (impl *AppCloneServiceImpl) CreateCiTemplate(oldAppId, newAppId int, userId return res, err } -func (impl *AppCloneServiceImpl) CreateDeploymentTemplate(oldAppId, newAppId int, userId int32, context context.Context) (*chart.TemplateRequest, error) { - refTemplate, err := impl.chartService.FindLatestChartForAppByAppId(oldAppId) +func (impl *AppCloneServiceImpl) CreateDeploymentTemplate(oldAppId, newAppId int, userId int32, context context.Context) (*bean5.TemplateRequest, error) { + refTemplate, err := impl.chartReadService.FindLatestChartForAppByAppId(oldAppId) if err != nil { impl.logger.Errorw("error in fetching ref app chart ", "app", oldAppId, "err", err) return nil, err } - templateReq := chart.TemplateRequest{ + templateReq := bean5.TemplateRequest{ Id: 0, AppId: newAppId, Latest: refTemplate.Latest, @@ -519,7 +525,7 @@ func (impl *AppCloneServiceImpl) createEnvOverride(oldAppId, newAppId int, userI createResp, err := impl.propertiesConfigService.CreateEnvironmentProperties(newAppId, envPropertiesReq) if err != nil { if err.Error() == bean2.NOCHARTEXIST { - templateRequest := chart.TemplateRequest{ + templateRequest := bean5.TemplateRequest{ AppId: newAppId, ChartRefId: envPropertiesReq.ChartRefId, ValuesOverride: []byte("{}"), @@ -720,7 +726,7 @@ func (impl *AppCloneServiceImpl) createWfInstances(refWfMappings []bean4.AppWork sourceToNewPipelineId: sourceToNewPipelineIdMapping, externalCiPipelineId: createWorkflowMappingDto.externalCiPipelineId, } - pipeline, err := impl.CreateCdPipeline(cdCloneReq, ctx) + pipeline, err := impl.createClonedCdPipeline(cdCloneReq, ctx) impl.logger.Debugw("cd pipeline created", "pipeline", pipeline) if err != nil { impl.logger.Errorw("error in getting cd-pipeline", "refAppId", createWorkflowMappingDto.oldAppId, "newAppId", createWorkflowMappingDto.newAppId, "err", err) @@ -774,7 +780,7 @@ func (impl *AppCloneServiceImpl) createWfInstances(refWfMappings []bean4.AppWork refAppName: refApp.AppName, sourceToNewPipelineId: sourceToNewPipelineIdMapping, } - pipeline, err := impl.CreateCdPipeline(cdCloneReq, ctx) + pipeline, err := impl.createClonedCdPipeline(cdCloneReq, ctx) if err != nil { impl.logger.Errorw("error in creating cd pipeline, app clone", "err", err) return createWorkflowMappingDto, err @@ -966,7 +972,7 @@ type cloneCdPipelineRequest struct { externalCiPipelineId int } -func (impl *AppCloneServiceImpl) CreateCdPipeline(req *cloneCdPipelineRequest, ctx context.Context) (*bean.CdPipelines, error) { +func (impl *AppCloneServiceImpl) createClonedCdPipeline(req *cloneCdPipelineRequest, ctx context.Context) (*bean.CdPipelines, error) { refPipelines, err := impl.pipelineBuilder.GetCdPipelinesForApp(req.refAppId) if err != nil { return nil, err @@ -991,11 +997,11 @@ func (impl *AppCloneServiceImpl) CreateCdPipeline(req *cloneCdPipelineRequest, c util.PIPELINE_DEPLOYMENT_TYPE_ACD: true, util.PIPELINE_DEPLOYMENT_TYPE_HELM: true, } - DeploymentAppConfigForEnvironment, err := impl.attributesService.GetDeploymentEnforcementConfig(refCdPipeline.EnvironmentId) + deploymentAppConfigForEnvironment, err := impl.attributesService.GetDeploymentEnforcementConfig(refCdPipeline.EnvironmentId) if err != nil { impl.logger.Errorw("error in fetching deployment config for environment", "err", err) } - for deploymentType, allowed := range DeploymentAppConfigForEnvironment { + for deploymentType, allowed := range deploymentAppConfigForEnvironment { AllowedDeploymentAppTypes[deploymentType] = allowed } gitOpsConfigurationStatus, err := impl.gitOpsConfigReadService.IsGitOpsConfigured() @@ -1003,10 +1009,24 @@ func (impl *AppCloneServiceImpl) CreateCdPipeline(req *cloneCdPipelineRequest, c impl.logger.Errorw("error in checking if gitOps configured", "err", err) return nil, err } + + // TODO Asutosh: skip pipeline and it's children + if refCdPipeline.IsExternalArgoAppLinkRequest() { + impl.logger.Warnw("argo cd is not installed, skipping creation of linked cd pipeline", "cdPipelineId", refCdPipeline.Id) + apiErr := &util.ApiError{ + HttpStatusCode: http.StatusPreconditionFailed, + UserMessage: "GitOps integration is not installed/configured. Please install/configure GitOps.", + InternalMessage: "GitOps integration is not installed/configured. Please install/configure GitOps.", + } + return nil, apiErr + } + var deploymentAppType string if AllowedDeploymentAppTypes[util.PIPELINE_DEPLOYMENT_TYPE_ACD] && AllowedDeploymentAppTypes[util.PIPELINE_DEPLOYMENT_TYPE_HELM] { deploymentAppType = refCdPipeline.DeploymentAppType - } else if AllowedDeploymentAppTypes[util.PIPELINE_DEPLOYMENT_TYPE_ACD] && gitOpsConfigurationStatus.IsGitOpsConfigured { + } else if AllowedDeploymentAppTypes[util.PIPELINE_DEPLOYMENT_TYPE_ACD] && gitOpsConfigurationStatus.IsGitOpsConfiguredAndArgoCdInstalled() { + // if GitOps is configured and ArgoCD is installed, then the deployment type should be ACD + // if GitOps is configured and ArgoCD is not installed, then the deployment type should be Helm deploymentAppType = util.PIPELINE_DEPLOYMENT_TYPE_ACD } else if AllowedDeploymentAppTypes[util.PIPELINE_DEPLOYMENT_TYPE_HELM] { deploymentAppType = util.PIPELINE_DEPLOYMENT_TYPE_HELM diff --git a/pkg/appClone/batch/DeploymentTemplate.go b/pkg/appClone/batch/DeploymentTemplate.go index f0a096bd20..1780dd440c 100644 --- a/pkg/appClone/batch/DeploymentTemplate.go +++ b/pkg/appClone/batch/DeploymentTemplate.go @@ -23,6 +23,7 @@ import ( pc "github.com/devtron-labs/devtron/internal/sql/repository/app" v1 "github.com/devtron-labs/devtron/pkg/apis/devtron/v1" "github.com/devtron-labs/devtron/pkg/chart" + "github.com/devtron-labs/devtron/pkg/chart/bean" "github.com/devtron-labs/devtron/util" "go.uber.org/zap" ) @@ -81,7 +82,7 @@ func executeDeploymentTemplateCreate(impl DeploymentTemplateActionImpl, template impl.logger.Errorw("marshal err", "err", err) return fmt.Errorf("invalid values for deployment template") } - dTemplate := chart.TemplateRequest{ + dTemplate := bean.TemplateRequest{ Id: 0, AppId: app.Id, ValuesOverride: valueOverride, diff --git a/pkg/appStore/adapter/Adapter.go b/pkg/appStore/adapter/Adapter.go index 8c92890eaf..9808d43a72 100644 --- a/pkg/appStore/adapter/Adapter.go +++ b/pkg/appStore/adapter/Adapter.go @@ -274,7 +274,8 @@ func UpdateInstallAppDetails(request *appStoreBean.InstallAppVersionDTO, install request.Status = installedApp.Status request.DeploymentAppType = config.DeploymentAppType if util.IsAcdApp(config.DeploymentAppType) { - request.GitOpsRepoURL = config.RepoURL + request.GitOpsRepoURL = config.GetRepoURL() + request.TargetRevision = config.GetTargetRevision() } } diff --git a/pkg/appStore/bean/bean.go b/pkg/appStore/bean/bean.go index e74f3dc032..6644ef5bc1 100644 --- a/pkg/appStore/bean/bean.go +++ b/pkg/appStore/bean/bean.go @@ -19,14 +19,17 @@ package appStoreBean import ( "encoding/json" "github.com/argoproj/gitops-engine/pkg/health" + "github.com/devtron-labs/common-lib/utils/k8s/commonBean" apiBean "github.com/devtron-labs/devtron/api/bean/gitOps" openapi "github.com/devtron-labs/devtron/api/helm-app/openapiClient" bean3 "github.com/devtron-labs/devtron/api/helm-app/service/bean" + "github.com/devtron-labs/devtron/client/argocdServer" "github.com/devtron-labs/devtron/internal/sql/repository/pipelineConfig/bean/workflow/cdWorkflow" + util2 "github.com/devtron-labs/devtron/internal/util" + clusterBean "github.com/devtron-labs/devtron/pkg/cluster/bean" "github.com/devtron-labs/devtron/pkg/cluster/environment/bean" bean2 "github.com/devtron-labs/devtron/pkg/deployment/common/bean" "github.com/devtron-labs/devtron/util" - "github.com/devtron-labs/devtron/util/gitUtil" "slices" "time" ) @@ -108,6 +111,7 @@ type InstallAppVersionDTO struct { IsVirtualEnvironment bool `json:"isVirtualEnvironment"` HelmPackageName string `json:"helmPackageName"` GitOpsRepoURL string `json:"gitRepoURL"` + TargetRevision string `json:"-"` IsCustomRepository bool `json:"-"` IsNewGitOpsRepo bool `json:"-"` ACDAppName string `json:"-"` @@ -125,6 +129,13 @@ type InstallAppVersionDTO struct { IsChartLinkRequest bool } +func (chart *InstallAppVersionDTO) GetTargetRevision() string { + if len(chart.TargetRevision) == 0 { + return util.GetDefaultTargetRevision() + } + return chart.TargetRevision +} + func (chart *InstallAppVersionDTO) GetAppIdentifierString() string { displayName := chart.DisplayName if len(displayName) == 0 { @@ -218,9 +229,32 @@ func (chart *InstallAppVersionDTO) GetDeploymentConfig() *bean2.DeploymentConfig EnvironmentId: chart.EnvironmentId, ConfigType: configType, DeploymentAppType: chart.DeploymentAppType, - RepoURL: chart.GitOpsRepoURL, - RepoName: gitUtil.GetGitRepoNameFromGitRepoUrl(chart.GitOpsRepoURL), - Active: true, + ReleaseMode: util2.PIPELINE_RELEASE_MODE_CREATE, + ReleaseConfiguration: &bean2.ReleaseConfiguration{ + Version: bean2.Version, + ArgoCDSpec: bean2.ArgoCDSpec{ + Metadata: bean2.ApplicationMetadata{ + ClusterId: clusterBean.DefaultClusterId, + Namespace: argocdServer.DevtronInstalationNs, + }, + Spec: bean2.ApplicationSpec{ + Destination: &bean2.Destination{ + Namespace: chart.Namespace, + Server: commonBean.DefaultClusterUrl, + }, + Source: &bean2.ApplicationSource{ + RepoURL: chart.GitOpsRepoURL, + Path: util.BuildDeployedAppName(chart.AppName, chart.EnvironmentName), + Helm: &bean2.ApplicationSourceHelm{ + ValueFiles: []string{"values.yaml"}, + }, + TargetRevision: util.GetDefaultTargetRevision(), + }, + SyncPolicy: nil, + }, + }, + }, + Active: true, } } @@ -453,7 +487,6 @@ type ChartComponent struct { } const ( - DEFAULT_CLUSTER_ID = 1 DEFAULT_NAMESPACE = "default" DEFAULT_ENVIRONMENT_OR_NAMESPACE_OR_PROJECT = "devtron" CLUSTER_COMPONENT_DIR_PATH = "/cluster/component" diff --git a/pkg/appStore/chartGroup/ChartGroupService.go b/pkg/appStore/chartGroup/ChartGroupService.go index c9a92eedcc..2f965cb8dd 100644 --- a/pkg/appStore/chartGroup/ChartGroupService.go +++ b/pkg/appStore/chartGroup/ChartGroupService.go @@ -1038,6 +1038,7 @@ func (impl *ChartGroupServiceImpl) performDeployStageOnAcd(installedAppVersion * } installedAppVersion.GitHash = appStoreGitOpsResponse.GitHash chartGitAttr.RepoUrl = appStoreGitOpsResponse.ChartGitAttribute.RepoUrl + chartGitAttr.TargetRevision = appStoreGitOpsResponse.ChartGitAttribute.TargetRevision chartGitAttr.ChartLocation = appStoreGitOpsResponse.ChartGitAttribute.ChartLocation } else { impl.logger.Infow("DB and GIT operation already done for this app and env, proceed for further step", "installedAppId", installedAppVersion.InstalledAppId, "existing status", installedAppVersion.Status) @@ -1052,6 +1053,7 @@ func (impl *ChartGroupServiceImpl) performDeployStageOnAcd(installedAppVersion * adapter.UpdateAdditionalEnvDetails(installedAppVersion, installedAppVersion.Environment) chartGitAttr.RepoUrl = installedAppVersion.GitOpsRepoURL + chartGitAttr.TargetRevision = installedAppVersion.GitOpsRepoURL chartGitAttr.ChartLocation = fmt.Sprintf("%s-%s", installedAppVersion.AppName, installedAppVersion.EnvironmentName) } diff --git a/pkg/appStore/installedApp/adapter/Adapter.go b/pkg/appStore/installedApp/adapter/Adapter.go index 58ad1543eb..d48360e61a 100644 --- a/pkg/appStore/installedApp/adapter/Adapter.go +++ b/pkg/appStore/installedApp/adapter/Adapter.go @@ -35,6 +35,7 @@ func ParseChartGitPushRequest(installAppRequestDTO *appStoreBean.InstallAppVersi EnvName: installAppRequestDTO.EnvironmentName, ChartAppStoreName: installAppRequestDTO.AppStoreName, RepoURL: installAppRequestDTO.GitOpsRepoURL, + TargetRevision: installAppRequestDTO.GetTargetRevision(), TempChartRefDir: tempRefChart, UserId: installAppRequestDTO.UserId, } @@ -45,7 +46,7 @@ func ParseChartCreateRequest(appName string, includePackageChart bool) *util.Cha return &util.ChartCreateRequest{ ChartMetaData: &chart.Metadata{ Name: appName, - Version: "1.0.1", + Version: "1.0.1", // TODO Asutoh: Why not the actual version? }, ChartPath: chartPath, IncludePackageChart: includePackageChart, diff --git a/pkg/appStore/installedApp/read/InstalledAppReadEAService.go b/pkg/appStore/installedApp/read/InstalledAppReadEAService.go index 88d8a2568e..dedaca3e9e 100644 --- a/pkg/appStore/installedApp/read/InstalledAppReadEAService.go +++ b/pkg/appStore/installedApp/read/InstalledAppReadEAService.go @@ -33,6 +33,7 @@ type InstalledAppReadServiceEA interface { // Additional details like app store details are also fetched. // Refer bean.InstalledAppVersionWithAppStoreDetails for more details. GetInstalledAppVersionIncludingDeleted(installedAppVersionId int) (*bean.InstalledAppVersionWithAppStoreDetails, error) + GetAllArgoAppNamesByDeploymentAppNames(deploymentAppNames []string) ([]string, error) // IsChartStoreAppManagedByArgoCd returns if a chart store app is deployed via argo-cd or not IsChartStoreAppManagedByArgoCd(appId int) (bool, error) } @@ -101,6 +102,10 @@ func (impl *InstalledAppReadServiceEAImpl) GetInstalledAppVersionIncludingDelete return adapter.GetInstalledAppVersionWithAppStoreDetails(installedAppVersionModel), nil } +func (impl *InstalledAppReadServiceEAImpl) GetAllArgoAppNamesByDeploymentAppNames(deploymentAppNames []string) ([]string, error) { + return impl.installedAppRepository.GetAllArgoAppsByDeploymentAppNames(deploymentAppNames) +} + func (impl *InstalledAppReadServiceEAImpl) IsChartStoreAppManagedByArgoCd(appId int) (bool, error) { installedAppModel, err := impl.installedAppRepository.GetInstalledAppsMinByAppId(appId) if err != nil { diff --git a/pkg/appStore/installedApp/repository/InstalledAppRepository.go b/pkg/appStore/installedApp/repository/InstalledAppRepository.go index 236204aebb..cdcd7de6e4 100644 --- a/pkg/appStore/installedApp/repository/InstalledAppRepository.go +++ b/pkg/appStore/installedApp/repository/InstalledAppRepository.go @@ -166,6 +166,7 @@ type InstalledAppRepository interface { // GetInstalledAppsMinByAppId will return the installed app by app id. // Extra Environment, App, Team, Cluster details are not fetched GetInstalledAppsMinByAppId(appId int) (*InstalledApps, error) + GetAllArgoAppsByDeploymentAppNames(deploymentAppNames []string) ([]string, error) } type InstalledAppRepositoryImpl struct { @@ -999,3 +1000,35 @@ func (impl *InstalledAppRepositoryImpl) FindInstalledAppsByAppId(appId int) ([]* } return installedApps, err } + +func (impl *InstalledAppRepositoryImpl) GetAllArgoAppsByDeploymentAppNames(deploymentAppNames []string) ([]string, error) { + result := make([]string, 0) + if len(deploymentAppNames) == 0 { + return result, nil + } + err := impl.dbConnection.Model(). + Table("installed_apps"). + ColumnExpr("CONCAT(app.app_name, '-', environment.environment_name) AS deployment_app_name"). + // inner join with app + Join("INNER JOIN app"). + JoinOn("installed_apps.app_id = app.id"). + // inner join with environment + Join("INNER JOIN environment"). + JoinOn("installed_apps.environment_id = environment.id"). + // left join with deployment_config + Join("LEFT JOIN deployment_config"). + JoinOn("installed_apps.app_id = deployment_config.app_id"). + JoinOn("installed_apps.environment_id = deployment_config.environment_id"). + JoinOn("deployment_config.active = ?", true). + // where conditions + Where("CONCAT(app.app_name, '-', environment.environment_name) in (?)", pg.In(deploymentAppNames)). + Where("installed_apps.active = ?", true). + Where("app.active = ?", true). + Where("environment.active = ?", true). + WhereGroup(func(query *orm.Query) (*orm.Query, error) { + return query.WhereOr("installed_apps.deployment_app_type = ?", util2.PIPELINE_DEPLOYMENT_TYPE_ACD). + WhereOr("deployment_config.deployment_app_type = ?", util2.PIPELINE_DEPLOYMENT_TYPE_ACD), nil + }). + Select(&result) + return result, err +} diff --git a/pkg/appStore/installedApp/service/AppStoreDeploymentDBService.go b/pkg/appStore/installedApp/service/AppStoreDeploymentDBService.go index e666c26068..24ed4d3c5d 100644 --- a/pkg/appStore/installedApp/service/AppStoreDeploymentDBService.go +++ b/pkg/appStore/installedApp/service/AppStoreDeploymentDBService.go @@ -160,8 +160,7 @@ func (impl *AppStoreDeploymentDBServiceImpl) AppStoreDeployOperationDB(installRe return nil, err } - // Stage 2: validate deployment app type and override if ExternallyManagedDeploymentType - overrideDeploymentType, err := impl.validateAndGetOverrideDeploymentAppType(installRequest, gitOpsConfigStatus.IsGitOpsConfigured) + overrideDeploymentType, err := impl.validateAndGetOverrideDeploymentAppType(installRequest, gitOpsConfigStatus.IsGitOpsConfiguredAndArgoCdInstalled()) if err != nil { impl.logger.Errorw("error in validating deployment app type", "error", err) return nil, err @@ -178,21 +177,21 @@ func (impl *AppStoreDeploymentDBServiceImpl) AppStoreDeployOperationDB(installRe impl.logger.Errorw("GitOps request validation error", "allowCustomRepository", gitOpsConfigStatus.AllowCustomRepository, "gitOpsRepoURL", installRequest.GitOpsRepoURL, "err", validationErr) return nil, validationErr } - // This should be set before to validateCustomGitOpsRepoURL, - // as validateCustomGitOpsRepoURL will override installRequest.GitOpsRepoURL + // This should be set before to validateCustomGitOpsConfig, + // as validateCustomGitOpsConfig will override installRequest.GitOpsRepoURL if !apiGitOpsBean.IsGitOpsRepoNotConfigured(installRequest.GitOpsRepoURL) && installRequest.GitOpsRepoURL != apiGitOpsBean.GIT_REPO_DEFAULT { // If GitOps repo is configured and not configured to bean.GIT_REPO_DEFAULT installRequest.IsCustomRepository = true } // validating the git repository configured for GitOps deployments - gitOpsRepoURL, isNew, gitRepoErr := impl.validateCustomGitOpsRepoURL(gitOpsConfigStatus, installRequest) + gitOpsRepoURL, isNew, gitRepoErr := impl.validateCustomGitOpsConfig(gitOpsConfigStatus, installRequest) if gitRepoErr != nil { // Found validation err impl.logger.Errorw("validation failed for GitOps repository", "repo url", installRequest.GitOpsRepoURL, "err", gitRepoErr) return nil, gitRepoErr } - // validateCustomGitOpsRepoURL returns sanitized repo url after validation + // validateCustomGitOpsConfig returns sanitized repo url after validation installRequest.GitOpsRepoURL = gitOpsRepoURL installRequest.IsNewGitOpsRepo = isNew } else { @@ -698,14 +697,14 @@ func (impl *AppStoreDeploymentDBServiceImpl) validateGitOpsRequest(allowCustomRe return nil } -func (impl *AppStoreDeploymentDBServiceImpl) validateCustomGitOpsRepoURL(gitOpsConfigurationStatus *gitOpsBean.GitOpsConfigurationStatus, installAppVersionRequest *appStoreBean.InstallAppVersionDTO) (string, bool, error) { - validateCustomGitRepoURLRequest := validationBean.ValidateCustomGitRepoURLRequest{ +func (impl *AppStoreDeploymentDBServiceImpl) validateCustomGitOpsConfig(gitOpsConfigurationStatus *gitOpsBean.GitOpsConfigurationStatus, installAppVersionRequest *appStoreBean.InstallAppVersionDTO) (string, bool, error) { + validateCustomGitRepoURLRequest := validationBean.ValidateGitOpsRepoRequest{ GitRepoURL: installAppVersionRequest.GitOpsRepoURL, AppName: installAppVersionRequest.AppName, UserId: installAppVersionRequest.UserId, GitOpsProvider: gitOpsConfigurationStatus.Provider, } - gitopsRepoURL, isNew, gitRepoErr := impl.fullModeDeploymentService.ValidateCustomGitRepoURL(validateCustomGitRepoURLRequest) + gitopsRepoURL, isNew, gitRepoErr := impl.fullModeDeploymentService.ValidateCustomGitOpsConfig(validateCustomGitRepoURLRequest) if gitRepoErr != nil { // Found validation err impl.logger.Errorw("found validation error in custom GitOps repo", "repo url", installAppVersionRequest.GitOpsRepoURL, "err", gitRepoErr) diff --git a/pkg/appStore/installedApp/service/AppStoreDeploymentService.go b/pkg/appStore/installedApp/service/AppStoreDeploymentService.go index 1c0863644c..210d5739a9 100644 --- a/pkg/appStore/installedApp/service/AppStoreDeploymentService.go +++ b/pkg/appStore/installedApp/service/AppStoreDeploymentService.go @@ -592,14 +592,14 @@ func (impl *AppStoreDeploymentServiceImpl) updateInstalledApp(ctx context.Contex installedAppDeploymentAction := adapter.NewInstalledAppDeploymentAction(deploymentConfig.DeploymentAppType) // migrate installedApp.GitOpsRepoName to installedApp.GitOpsRepoUrl if util.IsAcdApp(deploymentConfig.DeploymentAppType) && - len(deploymentConfig.RepoURL) == 0 { + len(deploymentConfig.GetRepoURL()) == 0 { gitRepoUrl, err := impl.fullModeDeploymentService.GetAcdAppGitOpsRepoURL(installedApp.App.AppName, installedApp.Environment.Name) if err != nil { impl.logger.Errorw("error in GitOps repository url migration", "err", err) return nil, err } - deploymentConfig.RepoURL = gitRepoUrl - installedApp.GitOpsRepoUrl = gitRepoUrl + deploymentConfig.SetRepoURL(gitRepoUrl) + //installedApp.GitOpsRepoUrl = gitRepoUrl installedApp.GitOpsRepoName = impl.gitOpsConfigReadService.GetGitOpsRepoNameFromUrl(gitRepoUrl) } // migration ends @@ -752,9 +752,9 @@ func (impl *AppStoreDeploymentServiceImpl) updateInstalledApp(ctx context.Contex installedApp.UpdateStatus(appStoreBean.DEPLOY_SUCCESS) installedApp.UpdateAuditLog(upgradeAppRequest.UserId) if monoRepoMigrationRequired { - //if monorepo case is true then repoUrl is changed then also update repo url in database + //if mono repo case is true then repoUrl is changed then also update repo url in database installedApp.UpdateGitOpsRepository(gitOpsResponse.ChartGitAttribute.RepoUrl, installedApp.IsCustomRepository) - deploymentConfig.RepoURL = gitOpsResponse.ChartGitAttribute.RepoUrl + deploymentConfig.SetRepoURL(gitOpsResponse.ChartGitAttribute.RepoUrl) } installedApp, err = impl.installedAppRepository.UpdateInstalledApp(installedApp, tx) if err != nil { @@ -998,11 +998,11 @@ func (impl *AppStoreDeploymentServiceImpl) linkHelmApplicationToChartStore(insta // checkIfMonoRepoMigrationRequired checks if gitOps repo name is changed func (impl *AppStoreDeploymentServiceImpl) checkIfMonoRepoMigrationRequired(installedApp *repository.InstalledApps, deploymentConfig *bean5.DeploymentConfig) bool { monoRepoMigrationRequired := false - if !util.IsAcdApp(deploymentConfig.DeploymentAppType) || gitOps.IsGitOpsRepoNotConfigured(deploymentConfig.RepoURL) || deploymentConfig.ConfigType == bean5.CUSTOM.String() { + if !util.IsAcdApp(deploymentConfig.DeploymentAppType) || gitOps.IsGitOpsRepoNotConfigured(deploymentConfig.GetRepoURL()) || deploymentConfig.ConfigType == bean5.CUSTOM.String() { return false } var err error - gitOpsRepoName := impl.gitOpsConfigReadService.GetGitOpsRepoNameFromUrl(deploymentConfig.RepoURL) + gitOpsRepoName := impl.gitOpsConfigReadService.GetGitOpsRepoNameFromUrl(deploymentConfig.GetRepoURL()) if len(gitOpsRepoName) == 0 { gitOpsRepoName, err = impl.fullModeDeploymentService.GetAcdAppGitOpsRepoName(installedApp.App.AppName, installedApp.Environment.Name) if err != nil || gitOpsRepoName == "" { @@ -1019,62 +1019,6 @@ func (impl *AppStoreDeploymentServiceImpl) checkIfMonoRepoMigrationRequired(inst return monoRepoMigrationRequired } -// handleGitOpsRepoUrlMigration will migrate git_ops_repo_name to git_ops_repo_url -func (impl *AppStoreDeploymentServiceImpl) handleGitOpsRepoUrlMigration(tx *pg.Tx, installedApp *repository.InstalledApps, deploymentConfig *bean5.DeploymentConfig, userId int32) error { - var ( - localTx *pg.Tx - err error - ) - - if tx == nil { - dbConnection := impl.installedAppRepository.GetConnection() - localTx, err = dbConnection.Begin() - if err != nil { - return err - } - // Rollback tx on error. - defer localTx.Rollback() - } - - gitRepoUrl, err := impl.fullModeDeploymentService.GetGitRepoUrl(installedApp.GitOpsRepoName) - if err != nil { - impl.logger.Errorw("error in GitOps repository url migration", "err", err) - return err - } - installedApp.GitOpsRepoUrl = gitRepoUrl - installedApp.UpdatedOn = time.Now() - installedApp.UpdatedBy = userId - - var dbTx *pg.Tx - if localTx != nil { - dbTx = localTx - } else { - dbTx = tx - } - - _, err = impl.installedAppRepository.UpdateInstalledApp(installedApp, dbTx) - if err != nil { - impl.logger.Errorw("error in updating installed app model", "err", err) - return err - } - - deploymentConfig.RepoURL = gitRepoUrl - deploymentConfig, err = impl.deploymentConfigService.CreateOrUpdateConfig(dbTx, deploymentConfig, userId) - if err != nil { - impl.logger.Errorw("error in updating deployment config", "err", err) - return err - } - - if localTx != nil { - err = localTx.Commit() - if err != nil { - impl.logger.Errorw("error while committing transaction to db", "error", err) - return err - } - } - return err -} - // getAppNameForInstalledApp will fetch and returns AppName from app table func (impl *AppStoreDeploymentServiceImpl) getAppNameForInstalledApp(installedAppId int) string { installedApp, err := impl.installedAppRepository.GetInstalledApp(installedAppId) diff --git a/pkg/appStore/installedApp/service/EAMode/InstalledAppDBService.go b/pkg/appStore/installedApp/service/EAMode/InstalledAppDBService.go index 1e3418bed3..2e95dea1f1 100644 --- a/pkg/appStore/installedApp/service/EAMode/InstalledAppDBService.go +++ b/pkg/appStore/installedApp/service/EAMode/InstalledAppDBService.go @@ -80,7 +80,7 @@ type InstalledAppDBServiceImpl struct { UserService user.UserService EnvironmentService environment.EnvironmentService InstalledAppRepositoryHistory appStoreRepo.InstalledAppVersionHistoryRepository - deploymentConfigService common.DeploymentConfigService + DeploymentConfigService common.DeploymentConfigService } func NewInstalledAppDBServiceImpl(logger *zap.SugaredLogger, @@ -97,7 +97,7 @@ func NewInstalledAppDBServiceImpl(logger *zap.SugaredLogger, UserService: userService, EnvironmentService: environmentService, InstalledAppRepositoryHistory: installedAppRepositoryHistory, - deploymentConfigService: deploymentConfigService, + DeploymentConfigService: deploymentConfigService, } } @@ -211,7 +211,7 @@ func (impl *InstalledAppDBServiceImpl) FindAppDetailsForAppstoreApplication(inst return bean2.AppDetailContainer{}, err } - deploymentConfig, err := impl.deploymentConfigService.GetConfigForHelmApps(installedAppVerison.InstalledApp.AppId, installedAppVerison.InstalledApp.EnvironmentId) + deploymentConfig, err := impl.DeploymentConfigService.GetConfigForHelmApps(installedAppVerison.InstalledApp.AppId, installedAppVerison.InstalledApp.EnvironmentId) if err != nil { impl.Logger.Errorw("error in getiting deployment config db object by appId and envId", "appId", installedAppVerison.InstalledApp.AppId, "envId", installedAppVerison.InstalledApp.EnvironmentId, "err", err) return bean2.AppDetailContainer{}, err @@ -294,7 +294,7 @@ func (impl *InstalledAppDBServiceImpl) GetInstalledAppByClusterNamespaceAndName( if err != nil { return nil, err } - deploymentConfig, err := impl.deploymentConfigService.GetConfigForHelmApps(installedApp.AppId, installedApp.EnvironmentId) + deploymentConfig, err := impl.DeploymentConfigService.GetConfigForHelmApps(installedApp.AppId, installedApp.EnvironmentId) if err != nil { impl.Logger.Errorw("error in getiting deployment config db object by appId and envId", "appId", installedApp.AppId, "envId", installedApp.EnvironmentId, "err", err) return nil, err @@ -311,7 +311,7 @@ func (impl *InstalledAppDBServiceImpl) GetInstalledAppByInstalledAppId(installed return nil, err } installedApp := &installedAppVersion.InstalledApp - deploymentConfig, err := impl.deploymentConfigService.GetConfigForHelmApps(installedApp.AppId, installedApp.EnvironmentId) + deploymentConfig, err := impl.DeploymentConfigService.GetConfigForHelmApps(installedApp.AppId, installedApp.EnvironmentId) if err != nil { impl.Logger.Errorw("error in getiting deployment config db object by appId and envId", "appId", installedApp.AppId, "envId", installedApp.EnvironmentId, "err", err) return nil, err @@ -328,7 +328,7 @@ func (impl *InstalledAppDBServiceImpl) GetInstalledAppVersion(id int, userId int impl.Logger.Errorw("error while fetching from db", "error", err) return nil, err } - deploymentConfig, err := impl.deploymentConfigService.GetConfigForHelmApps(model.InstalledApp.AppId, model.InstalledApp.EnvironmentId) + deploymentConfig, err := impl.DeploymentConfigService.GetConfigForHelmApps(model.InstalledApp.AppId, model.InstalledApp.EnvironmentId) if err != nil { impl.Logger.Errorw("error in getiting deployment config db object by appId and envId", "appId", model.InstalledApp.AppId, "envId", model.InstalledApp.EnvironmentId, "err", err) return nil, err @@ -372,7 +372,7 @@ func (impl *InstalledAppDBServiceImpl) GetInstalledAppVersionByIdIncludeDeleted( impl.Logger.Errorw("error while fetching from db", "error", err) return nil, err } - deploymentConfig, err := impl.deploymentConfigService.GetConfigForHelmApps(model.InstalledApp.AppId, model.InstalledApp.EnvironmentId) + deploymentConfig, err := impl.DeploymentConfigService.GetConfigForHelmApps(model.InstalledApp.AppId, model.InstalledApp.EnvironmentId) if err != nil { impl.Logger.Errorw("error in getiting deployment config db object by appId and envId", "appId", model.InstalledApp.AppId, "envId", model.InstalledApp.EnvironmentId, "err", err) return nil, err @@ -537,5 +537,5 @@ func (impl *InstalledAppDBServiceImpl) MarkInstalledAppVersionModelInActive(inst } func (impl *InstalledAppDBServiceImpl) IsChartStoreAppManagedByArgoCd(appId int) (bool, error) { - return impl.deploymentConfigService.IsChartStoreAppManagedByArgoCd(appId) + return impl.DeploymentConfigService.IsChartStoreAppManagedByArgoCd(appId) } diff --git a/pkg/appStore/installedApp/service/EAMode/deployment/EAModeDeploymentService.go b/pkg/appStore/installedApp/service/EAMode/deployment/EAModeDeploymentService.go index c8c1fa46f7..b84da14a3d 100644 --- a/pkg/appStore/installedApp/service/EAMode/deployment/EAModeDeploymentService.go +++ b/pkg/appStore/installedApp/service/EAMode/deployment/EAModeDeploymentService.go @@ -423,10 +423,10 @@ func (impl *EAModeDeploymentServiceImpl) UpdateInstalledAppAndPipelineStatusForF // TODO: Need to refactor this,refer below reason // This is being done as in ea mode wire argocd service is being binded to helmServiceImpl due to which we are restricted to implement this here. // RefreshAndUpdateACDApp this will update chart info in acd app if required in case of mono repo migration and will refresh argo app -func (impl *EAModeDeploymentServiceImpl) UpdateAndSyncACDApps(installAppVersionRequest *appStoreBean.InstallAppVersionDTO, ChartGitAttribute *commonBean.ChartGitAttribute, isMonoRepoMigrationRequired bool, ctx context.Context, tx *pg.Tx) error { +func (impl *EAModeDeploymentServiceImpl) UpdateAndSyncACDApps(installAppVersionRequest *appStoreBean.InstallAppVersionDTO, chartGitAttribute *commonBean.ChartGitAttribute, isMonoRepoMigrationRequired bool, ctx context.Context, tx *pg.Tx) error { return errors.New("this is not implemented") } -func (impl *EAModeDeploymentServiceImpl) ValidateCustomGitRepoURL(request validationBean.ValidateCustomGitRepoURLRequest) (string, bool, error) { +func (impl *EAModeDeploymentServiceImpl) ValidateCustomGitOpsConfig(request validationBean.ValidateGitOpsRepoRequest) (string, bool, error) { return "", false, errors.New("this is not implemented") } diff --git a/pkg/appStore/installedApp/service/FullMode/InstalledAppDBExtendedService.go b/pkg/appStore/installedApp/service/FullMode/InstalledAppDBExtendedService.go index 79d8e11684..49a3a5c2a2 100644 --- a/pkg/appStore/installedApp/service/FullMode/InstalledAppDBExtendedService.go +++ b/pkg/appStore/installedApp/service/FullMode/InstalledAppDBExtendedService.go @@ -92,6 +92,16 @@ func (impl *InstalledAppDBExtendedServiceImpl) UpdateInstalledAppVersionStatus(a } func (impl *InstalledAppDBExtendedServiceImpl) IsGitOpsRepoAlreadyRegistered(repoUrl string) (bool, error) { + + urlPresent, err := impl.InstalledAppDBServiceImpl.DeploymentConfigService.CheckIfURLAlreadyPresent(repoUrl) + if err != nil && !util.IsErrNoRows(err) { + impl.Logger.Errorw("error in checking url in deployment configs", "repoUrl", repoUrl, "err", err) + return false, err + } + if urlPresent { + return true, nil + } + repoName := impl.gitOpsConfigReadService.GetGitOpsRepoNameFromUrl(repoUrl) installedAppModel, err := impl.InstalledAppRepository.GetInstalledAppByGitRepoUrl(repoName, repoUrl) if err != nil && !util.IsErrNoRows(err) { diff --git a/pkg/appStore/installedApp/service/FullMode/deployment/FullModeDeploymentService.go b/pkg/appStore/installedApp/service/FullMode/deployment/FullModeDeploymentService.go index 4f54e81799..abf065a1e3 100644 --- a/pkg/appStore/installedApp/service/FullMode/deployment/FullModeDeploymentService.go +++ b/pkg/appStore/installedApp/service/FullMode/deployment/FullModeDeploymentService.go @@ -147,7 +147,7 @@ func (impl *FullModeDeploymentServiceImpl) InstallApp(installAppVersionRequest * ctx, cancel := context.WithTimeout(ctx, 1*time.Minute) defer cancel() //STEP 4: registerInArgo - err := impl.argoClientWrapperService.RegisterGitOpsRepoInArgoWithRetry(ctx, chartGitAttr.RepoUrl, installAppVersionRequest.UserId) + err := impl.argoClientWrapperService.RegisterGitOpsRepoInArgoWithRetry(ctx, chartGitAttr.RepoUrl, chartGitAttr.TargetRevision, installAppVersionRequest.UserId) if err != nil { impl.Logger.Errorw("error in argo registry", "err", err) return nil, err @@ -163,7 +163,7 @@ func (impl *FullModeDeploymentServiceImpl) InstallApp(installAppVersionRequest * //STEP 7: normal refresh ACD - update for step 6 to avoid delay syncTime := time.Now() - err = impl.argoClientWrapperService.SyncArgoCDApplicationIfNeededAndRefresh(ctx, installAppVersionRequest.ACDAppName) + err = impl.argoClientWrapperService.SyncArgoCDApplicationIfNeededAndRefresh(ctx, installAppVersionRequest.ACDAppName, chartGitAttr.TargetRevision) if err != nil { impl.Logger.Errorw("error in getting the argo application with normal refresh", "err", err) return nil, err diff --git a/pkg/appStore/installedApp/service/FullMode/deployment/InstalledAppArgoCdService.go b/pkg/appStore/installedApp/service/FullMode/deployment/InstalledAppArgoCdService.go index 5f2a1d6bfc..9b637360f0 100644 --- a/pkg/appStore/installedApp/service/FullMode/deployment/InstalledAppArgoCdService.go +++ b/pkg/appStore/installedApp/service/FullMode/deployment/InstalledAppArgoCdService.go @@ -44,7 +44,7 @@ type InstalledAppArgoCdService interface { // CheckIfArgoAppExists will return- isFound: if Argo app object exists; err: if any err found CheckIfArgoAppExists(acdAppName string) (isFound bool, err error) // UpdateAndSyncACDApps this will update chart info in acd app if required in case of mono repo migration and will refresh argo app - UpdateAndSyncACDApps(installAppVersionRequest *appStoreBean.InstallAppVersionDTO, ChartGitAttribute *commonBean.ChartGitAttribute, isMonoRepoMigrationRequired bool, ctx context.Context, tx *pg.Tx) error + UpdateAndSyncACDApps(installAppVersionRequest *appStoreBean.InstallAppVersionDTO, chartGitAttribute *commonBean.ChartGitAttribute, isMonoRepoMigrationRequired bool, ctx context.Context, tx *pg.Tx) error DeleteACD(acdAppName string, ctx context.Context, isNonCascade bool) error GetAcdAppGitOpsRepoURL(appName string, environmentName string) (string, error) } @@ -95,7 +95,7 @@ func isArgoCdGitOpsRepoUrlOutOfSync(argoApplication *v1alpha1.Application, gitOp return false } -func (impl *FullModeDeploymentServiceImpl) UpdateAndSyncACDApps(installAppVersionRequest *appStoreBean.InstallAppVersionDTO, ChartGitAttribute *commonBean.ChartGitAttribute, isMonoRepoMigrationRequired bool, ctx context.Context, tx *pg.Tx) error { +func (impl *FullModeDeploymentServiceImpl) UpdateAndSyncACDApps(installAppVersionRequest *appStoreBean.InstallAppVersionDTO, chartGitAttribute *commonBean.ChartGitAttribute, isMonoRepoMigrationRequired bool, ctx context.Context, tx *pg.Tx) error { acdAppName := installAppVersionRequest.ACDAppName argoApplication, err := impl.argoClientWrapperService.GetArgoAppByName(ctx, acdAppName) if err != nil { @@ -107,7 +107,7 @@ func (impl *FullModeDeploymentServiceImpl) UpdateAndSyncACDApps(installAppVersio isArgoRepoUrlOutOfSync := isArgoCdGitOpsRepoUrlOutOfSync(argoApplication, installAppVersionRequest.GitOpsRepoURL) if isMonoRepoMigrationRequired || isArgoRepoUrlOutOfSync { // update repo details on ArgoCD as repo is changed - err := impl.UpgradeDeployment(installAppVersionRequest, ChartGitAttribute, 0, ctx) + err := impl.UpgradeDeployment(installAppVersionRequest, chartGitAttribute, 0, ctx) if err != nil { return err } @@ -119,7 +119,8 @@ func (impl *FullModeDeploymentServiceImpl) UpdateAndSyncACDApps(installAppVersio return err } syncTime := time.Now() - err = impl.argoClientWrapperService.SyncArgoCDApplicationIfNeededAndRefresh(ctx, acdAppName) + targetRevision := chartGitAttribute.TargetRevision + err = impl.argoClientWrapperService.SyncArgoCDApplicationIfNeededAndRefresh(ctx, acdAppName, targetRevision) if err != nil { impl.Logger.Errorw("error in getting argocd application with normal refresh", "err", err, "argoAppName", installAppVersionRequest.ACDAppName) clientErrCode, errMsg := util.GetClientDetailedError(err) @@ -192,7 +193,7 @@ func (impl *FullModeDeploymentServiceImpl) patchAcdApp(ctx context.Context, inst ctx, cancel := context.WithTimeout(ctx, 1*time.Minute) defer cancel() //registerInArgo - err := impl.argoClientWrapperService.RegisterGitOpsRepoInArgoWithRetry(ctx, chartGitAttr.RepoUrl, installAppVersionRequest.UserId) + err := impl.argoClientWrapperService.RegisterGitOpsRepoInArgoWithRetry(ctx, chartGitAttr.RepoUrl, chartGitAttr.TargetRevision, installAppVersionRequest.UserId) if err != nil { impl.Logger.Errorw("error in argo registry", "err", err) return err @@ -203,7 +204,7 @@ func (impl *FullModeDeploymentServiceImpl) patchAcdApp(ctx context.Context, inst ArgoAppName: installAppVersionRequest.ACDAppName, ChartLocation: chartGitAttr.ChartLocation, GitRepoUrl: chartGitAttr.RepoUrl, - TargetRevision: "master", + TargetRevision: chartGitAttr.TargetRevision, PatchType: "merge", } err = impl.argoClientWrapperService.PatchArgoCdApp(ctx, patchReq) diff --git a/pkg/appStore/installedApp/service/FullMode/deployment/InstalledAppGitOpsService.go b/pkg/appStore/installedApp/service/FullMode/deployment/InstalledAppGitOpsService.go index 8f6e94d0cf..f23c6be157 100644 --- a/pkg/appStore/installedApp/service/FullMode/deployment/InstalledAppGitOpsService.go +++ b/pkg/appStore/installedApp/service/FullMode/deployment/InstalledAppGitOpsService.go @@ -30,7 +30,7 @@ import ( commonBean "github.com/devtron-labs/devtron/pkg/deployment/gitOps/common/bean" "github.com/devtron-labs/devtron/pkg/deployment/gitOps/git" validationBean "github.com/devtron-labs/devtron/pkg/deployment/gitOps/validation/bean" - util2 "github.com/devtron-labs/devtron/util" + globalUtil "github.com/devtron-labs/devtron/util" "github.com/google/go-github/github" "github.com/microsoft/azure-devops-go-api/azuredevops" "github.com/xanzy/go-gitlab" @@ -53,8 +53,7 @@ type InstalledAppGitOpsService interface { // git.GitOperationService.CommitValues (If repo exists and Repo migration is not needed) // functions to perform GitOps during upgrade deployments (GitOps based Helm Apps) UpdateAppGitOpsOperations(manifest *bean.AppStoreManifestResponse, installAppVersionRequest *appStoreBean.InstallAppVersionDTO, monoRepoMigrationRequired bool, commitRequirements bool) (*bean.AppStoreGitOpsResponse, error) - ValidateCustomGitRepoURL(request validationBean.ValidateCustomGitRepoURLRequest) (string, bool, error) - GetGitRepoUrl(gitOpsRepoName string) (string, error) + ValidateCustomGitOpsConfig(request validationBean.ValidateGitOpsRepoRequest) (string, bool, error) CreateArgoRepoSecretIfNeeded(appStoreApplicationVersion *appStoreDiscoverRepository.AppStoreApplicationVersion) error } @@ -138,7 +137,8 @@ func (impl *FullModeDeploymentServiceImpl) UpdateAppGitOpsOperations(manifest *b noTargetFoundForRequirements, _ := impl.parseGitRepoErrorResponse(requirementsCommitErr) if noTargetFoundForRequirements || noTargetFoundForValues { //create repo again and try again - auto fix - _, _, err := impl.createGitOpsRepo(impl.gitOpsConfigReadService.GetGitOpsRepoNameFromUrl(installAppVersionRequest.GitOpsRepoURL), installAppVersionRequest.UserId) + _, _, err := impl.createGitOpsRepo(impl.gitOpsConfigReadService.GetGitOpsRepoNameFromUrl(installAppVersionRequest.GitOpsRepoURL), + installAppVersionRequest.GetTargetRevision(), installAppVersionRequest.UserId) if err != nil { impl.Logger.Errorw("error in creating GitOps repo for valuesCommitErr or requirementsCommitErr", "gitRepoUrl", installAppVersionRequest.GitOpsRepoURL) return nil, err @@ -149,7 +149,11 @@ func (impl *FullModeDeploymentServiceImpl) UpdateAppGitOpsOperations(manifest *b return nil, fmt.Errorf("error in committing values and requirements to git repository") } gitOpsResponse.GitHash = gitHash - gitOpsResponse.ChartGitAttribute = &commonBean.ChartGitAttribute{RepoUrl: installAppVersionRequest.GitOpsRepoURL, ChartLocation: installAppVersionRequest.ACDAppName} + gitOpsResponse.ChartGitAttribute = &commonBean.ChartGitAttribute{ + RepoUrl: installAppVersionRequest.GitOpsRepoURL, + TargetRevision: installAppVersionRequest.GetTargetRevision(), + ChartLocation: installAppVersionRequest.ACDAppName, + } return gitOpsResponse, nil } @@ -199,12 +203,12 @@ func (impl *FullModeDeploymentServiceImpl) createGitOpsRepoAndPushChart(installA return nil, "", fmt.Errorf("Invalid request! Git repository URL is not found for installed app '%s'", installAppVersionRequest.AppName) } gitOpsRepoName := impl.gitOpsConfigReadService.GetGitOpsRepoName(installAppVersionRequest.AppName) - gitopsRepoURL, isNew, err := impl.createGitOpsRepo(gitOpsRepoName, installAppVersionRequest.UserId) + gitOpsRepoURL, isNew, err := impl.createGitOpsRepo(gitOpsRepoName, installAppVersionRequest.GetTargetRevision(), installAppVersionRequest.UserId) if err != nil { impl.Logger.Errorw("Error in creating gitops repo for ", "appName", installAppVersionRequest.AppName, "err", err) return nil, "", err } - installAppVersionRequest.GitOpsRepoURL = gitopsRepoURL + installAppVersionRequest.GitOpsRepoURL = gitOpsRepoURL installAppVersionRequest.IsCustomRepository = false installAppVersionRequest.IsNewGitOpsRepo = isNew @@ -219,7 +223,7 @@ func (impl *FullModeDeploymentServiceImpl) createGitOpsRepoAndPushChart(installA } // createGitOpsRepo creates a gitOps repo with readme -func (impl *FullModeDeploymentServiceImpl) createGitOpsRepo(gitOpsRepoName string, userId int32) (string, bool, error) { +func (impl *FullModeDeploymentServiceImpl) createGitOpsRepo(gitOpsRepoName string, targetRevision string, userId int32) (string, bool, error) { bitbucketMetadata, err := impl.gitOpsConfigReadService.GetBitbucketMetadata() if err != nil { impl.Logger.Errorw("error in getting bitbucket metadata", "err", err) @@ -228,6 +232,7 @@ func (impl *FullModeDeploymentServiceImpl) createGitOpsRepo(gitOpsRepoName strin //getting user name & emailId for commit author data gitRepoRequest := &bean2.GitOpsConfigDto{ GitRepoName: gitOpsRepoName, + TargetRevision: targetRevision, Description: "helm chart for " + gitOpsRepoName, BitBucketWorkspaceId: bitbucketMetadata.BitBucketWorkspaceId, BitBucketProjectKey: bitbucketMetadata.BitBucketProjectKey, @@ -309,7 +314,7 @@ func (impl *FullModeDeploymentServiceImpl) getGitCommitConfig(installAppVersionR return nil, err } - argocdAppName := util2.BuildDeployedAppName(installAppVersionRequest.AppName, environment.Name) + argoCdAppName := globalUtil.BuildDeployedAppName(installAppVersionRequest.AppName, environment.Name) if util.IsAcdApp(installAppVersionRequest.DeploymentAppType) && len(installAppVersionRequest.GitOpsRepoURL) == 0 && installAppVersionRequest.InstalledAppId != 0 { @@ -332,7 +337,8 @@ func (impl *FullModeDeploymentServiceImpl) getGitCommitConfig(installAppVersionR return nil, apiErr } //installAppVersionRequest.GitOpsRepoURL = InstalledApp.GitOpsRepoUrl - installAppVersionRequest.GitOpsRepoURL = deploymentConfig.RepoURL + installAppVersionRequest.GitOpsRepoURL = deploymentConfig.GetRepoURL() + installAppVersionRequest.TargetRevision = deploymentConfig.GetTargetRevision() } gitOpsRepoName := impl.gitOpsConfigReadService.GetGitOpsRepoNameFromUrl(installAppVersionRequest.GitOpsRepoURL) userEmailId, userName := impl.gitOpsConfigReadService.GetUserEmailIdAndNameForGitOpsCommit(installAppVersionRequest.UserId) @@ -340,8 +346,9 @@ func (impl *FullModeDeploymentServiceImpl) getGitCommitConfig(installAppVersionR FileName: filename, FileContent: fileString, ChartName: installAppVersionRequest.AppName, - ChartLocation: argocdAppName, + ChartLocation: argoCdAppName, ChartRepoName: gitOpsRepoName, + TargetRevision: installAppVersionRequest.GetTargetRevision(), ReleaseMessage: fmt.Sprintf("release-%d-env-%d ", appStoreAppVersion.Id, environment.Id), UserEmailId: userEmailId, UserName: userName, @@ -386,12 +393,9 @@ func (impl *FullModeDeploymentServiceImpl) getValuesAndRequirementForGitConfig(i return valuesConfig, RequirementConfig, nil } -func (impl *FullModeDeploymentServiceImpl) ValidateCustomGitRepoURL(request validationBean.ValidateCustomGitRepoURLRequest) (string, bool, error) { - return impl.gitOpsValidationService.ValidateCustomGitRepoURL(request) -} - -func (impl *FullModeDeploymentServiceImpl) GetGitRepoUrl(gitOpsRepoName string) (string, error) { - return impl.gitOperationService.GetRepoUrlByRepoName(gitOpsRepoName) +func (impl *FullModeDeploymentServiceImpl) ValidateCustomGitOpsConfig(request validationBean.ValidateGitOpsRepoRequest) (string, bool, error) { + request.TargetRevision = globalUtil.GetDefaultTargetRevision() + return impl.gitOpsValidationService.ValidateCustomGitOpsConfig(request) } func (impl *FullModeDeploymentServiceImpl) CreateArgoRepoSecretIfNeeded(appStoreApplicationVersion *appStoreDiscoverRepository.AppStoreApplicationVersion) error { diff --git a/pkg/appStore/installedApp/service/FullMode/deploymentTypeChange/InstalledAppDeploymentTypeChangeService.go b/pkg/appStore/installedApp/service/FullMode/deploymentTypeChange/InstalledAppDeploymentTypeChangeService.go index 7e2eed87af..2d713500d6 100644 --- a/pkg/appStore/installedApp/service/FullMode/deploymentTypeChange/InstalledAppDeploymentTypeChangeService.go +++ b/pkg/appStore/installedApp/service/FullMode/deploymentTypeChange/InstalledAppDeploymentTypeChangeService.go @@ -38,6 +38,7 @@ import ( "github.com/devtron-labs/devtron/pkg/appStore/installedApp/service/FullMode/deployment" util2 "github.com/devtron-labs/devtron/pkg/appStore/util" "github.com/devtron-labs/devtron/pkg/argoApplication" + bean4 "github.com/devtron-labs/devtron/pkg/argoApplication/bean" "github.com/devtron-labs/devtron/pkg/bean" "github.com/devtron-labs/devtron/pkg/cluster" "github.com/devtron-labs/devtron/pkg/cluster/environment/repository" @@ -269,7 +270,7 @@ func (impl *InstalledAppDeploymentTypeChangeServiceImpl) AnnotateCRDsIfExist(ctx query := &application.ResourcesQuery{ ApplicationName: &deploymentAppName, } - resp, err := impl.argoApplicationService.ResourceTree(ctx, query) + resp, err := impl.argoApplicationService.GetResourceTree(ctx, bean4.NewImperativeQueryRequest(query)) if err != nil { impl.logger.Errorw("error in fetching resource tree", "err", err) err = &util.ApiError{ @@ -341,7 +342,7 @@ func (impl *InstalledAppDeploymentTypeChangeServiceImpl) deleteInstalledApps(ctx err = impl.fullModeDeploymentService.DeleteACD(deploymentAppName, ctx, false) } else if deploymentConfig.DeploymentAppType == bean2.Helm { // For converting from Helm to ArgoCD, GitOps should be configured - if gitOpsConfigErr != nil || !gitOpsConfigStatus.IsGitOpsConfigured { + if gitOpsConfigErr != nil || !gitOpsConfigStatus.IsGitOpsConfiguredAndArgoCdInstalled() { err = &util.ApiError{HttpStatusCode: http.StatusBadRequest, Code: "200", UserMessage: errors.New("GitOps not configured or unable to fetch GitOps configuration")} } if err != nil { diff --git a/pkg/appStore/installedApp/service/FullMode/resource/ResourceTreeService.go b/pkg/appStore/installedApp/service/FullMode/resource/ResourceTreeService.go index f6bccf4ba6..4ad2845bb4 100644 --- a/pkg/appStore/installedApp/service/FullMode/resource/ResourceTreeService.go +++ b/pkg/appStore/installedApp/service/FullMode/resource/ResourceTreeService.go @@ -38,6 +38,7 @@ import ( appStoreDiscoverRepository "github.com/devtron-labs/devtron/pkg/appStore/discover/repository" "github.com/devtron-labs/devtron/pkg/appStore/installedApp/repository" "github.com/devtron-labs/devtron/pkg/argoApplication" + bean3 "github.com/devtron-labs/devtron/pkg/argoApplication/bean" "github.com/devtron-labs/devtron/pkg/deployment/common" bean2 "github.com/devtron-labs/devtron/pkg/deployment/common/bean" "github.com/devtron-labs/devtron/pkg/k8s" @@ -225,7 +226,7 @@ func (impl *InstalledAppResourceServiceImpl) fetchResourceTreeForACD(rctx contex } defer cancel() start := time.Now() - resp, err := impl.argoApplicationService.ResourceTree(ctx, query) + resp, err := impl.argoApplicationService.GetResourceTree(ctx, bean3.NewImperativeQueryRequest(query)) elapsed := time.Since(start) impl.logger.Debugf("Time elapsed %s in fetching app-store installed application %s for environment %s", elapsed, deploymentAppName, envId) if err != nil { diff --git a/pkg/appStore/installedApp/service/common/AppStoreDeploymentCommonService.go b/pkg/appStore/installedApp/service/common/AppStoreDeploymentCommonService.go index 51dc2d0bc1..2e0322a238 100644 --- a/pkg/appStore/installedApp/service/common/AppStoreDeploymentCommonService.go +++ b/pkg/appStore/installedApp/service/common/AppStoreDeploymentCommonService.go @@ -32,6 +32,7 @@ import ( "github.com/devtron-labs/devtron/pkg/appStore/installedApp/repository" "github.com/devtron-labs/devtron/pkg/appStore/installedApp/service/EAMode" "github.com/devtron-labs/devtron/pkg/auth/user" + clusterBean "github.com/devtron-labs/devtron/pkg/cluster/bean" "github.com/go-pg/pg" "go.opentelemetry.io/otel" "go.uber.org/zap" @@ -142,7 +143,7 @@ func (impl *AppStoreDeploymentCommonServiceImpl) GetDeploymentHistoryInfoFromDB( // as virtual environment doesn't exist on actual cluster, we will use default cluster for running helm template command if installedApp.IsVirtualEnvironment { - clusterId = appStoreBean.DEFAULT_CLUSTER_ID + clusterId = clusterBean.DefaultClusterId installedApp.Namespace = appStoreBean.DEFAULT_NAMESPACE } diff --git a/pkg/appWorkflow/AppWorkflowService.go b/pkg/appWorkflow/AppWorkflowService.go index ba700ca925..5121445a79 100644 --- a/pkg/appWorkflow/AppWorkflowService.go +++ b/pkg/appWorkflow/AppWorkflowService.go @@ -787,13 +787,11 @@ func (impl AppWorkflowServiceImpl) FindCdPipelinesByAppId(appId int) (*bean.CdPi } for _, pipeline := range dbPipelines { - envDeploymentConfig, err := impl.deploymentConfigService.GetConfigForDevtronApps(appId, pipeline.EnvironmentId) if err != nil { impl.Logger.Errorw("error in fetching environment deployment config by appId and envId", "appId", appId, "envId", pipeline.EnvironmentId, "err", err) return nil, err } - cdPipelineConfigObj := &bean.CDPipelineConfigObject{ Id: pipeline.Id, EnvironmentId: pipeline.EnvironmentId, @@ -802,9 +800,10 @@ func (impl AppWorkflowServiceImpl) FindCdPipelinesByAppId(appId int) (*bean.CdPi TriggerType: pipeline.TriggerType, Name: pipeline.Name, DeploymentAppType: envDeploymentConfig.DeploymentAppType, + ReleaseMode: envDeploymentConfig.ReleaseMode, AppName: pipeline.DeploymentAppName, AppId: pipeline.AppId, - IsGitOpsRepoNotConfigured: !isAppLevelGitOpsConfigured, + IsGitOpsRepoNotConfigured: !envDeploymentConfig.IsPipelineGitOpsRepoConfigured(isAppLevelGitOpsConfigured), } cdPipelines.Pipelines = append(cdPipelines.Pipelines, cdPipelineConfigObj) } diff --git a/pkg/argoApplication/ArgoApplicationService.go b/pkg/argoApplication/ArgoApplicationService.go index 6688d2f02f..48cd31c56d 100644 --- a/pkg/argoApplication/ArgoApplicationService.go +++ b/pkg/argoApplication/ArgoApplicationService.go @@ -18,7 +18,6 @@ package argoApplication import ( "context" - application2 "github.com/argoproj/argo-cd/v2/pkg/apiclient/application" "github.com/devtron-labs/common-lib/utils/k8s" k8sCommonBean "github.com/devtron-labs/common-lib/utils/k8s/commonBean" "github.com/devtron-labs/devtron/api/helm-app/gRPC" @@ -31,9 +30,12 @@ import ( "github.com/devtron-labs/devtron/pkg/argoApplication/read/config" "github.com/devtron-labs/devtron/pkg/cluster/adapter" clusterRepository "github.com/devtron-labs/devtron/pkg/cluster/repository" + "github.com/devtron-labs/devtron/pkg/deployment/common" + commonBean "github.com/devtron-labs/devtron/pkg/deployment/common/bean" "github.com/devtron-labs/devtron/pkg/k8s/application" k8s2 "github.com/devtron-labs/devtron/pkg/k8s/bean" "github.com/devtron-labs/devtron/util" + "github.com/devtron-labs/devtron/util/sliceUtil" "go.uber.org/zap" "k8s.io/apimachinery/pkg/api/errors" "net/http" @@ -46,7 +48,7 @@ type ArgoApplicationService interface { //FUll mode // ResourceTree returns the status for all Apps deployed via ArgoCd - ResourceTree(ctx context.Context, query *application2.ResourcesQuery) (*argoApplication.ResourceTreeResponse, error) + GetResourceTree(ctx context.Context, acdQueryRequest *bean.AcdClientQueryRequest) (*argoApplication.ResourceTreeResponse, error) } type ArgoApplicationServiceImpl struct { @@ -57,6 +59,7 @@ type ArgoApplicationServiceImpl struct { helmAppService service.HelmAppService k8sApplicationService application.K8sApplicationService argoApplicationConfigService config.ArgoApplicationConfigService + deploymentConfigService common.DeploymentConfigService } func NewArgoApplicationServiceImpl(logger *zap.SugaredLogger, @@ -65,7 +68,8 @@ func NewArgoApplicationServiceImpl(logger *zap.SugaredLogger, helmAppClient gRPC.HelmAppClient, helmAppService service.HelmAppService, k8sApplicationService application.K8sApplicationService, - argoApplicationConfigService config.ArgoApplicationConfigService) *ArgoApplicationServiceImpl { + argoApplicationConfigService config.ArgoApplicationConfigService, + deploymentConfigService common.DeploymentConfigService) *ArgoApplicationServiceImpl { return &ArgoApplicationServiceImpl{ logger: logger, clusterRepository: clusterRepository, @@ -74,6 +78,7 @@ func NewArgoApplicationServiceImpl(logger *zap.SugaredLogger, helmAppClient: helmAppClient, k8sApplicationService: k8sApplicationService, argoApplicationConfigService: argoApplicationConfigService, + deploymentConfigService: deploymentConfigService, } } @@ -133,7 +138,24 @@ func (impl *ArgoApplicationServiceImpl) ListApplications(clusterIds []int) ([]*b appLists := getApplicationListDtos(resp, clusterObj.ClusterName, clusterObj.Id) appListFinal = append(appListFinal, appLists...) } - return appListFinal, nil + applicationNames := sliceUtil.NewSliceFromFuncExec(appListFinal, func(app *bean.ArgoApplicationListDto) string { + return app.Name + }) + allDevtronManagedArgoAppsInfo, err := impl.deploymentConfigService.GetAllArgoAppInfosByDeploymentAppNames(applicationNames) + if err != nil { + impl.logger.Errorw("error in getting all argo app names by cluster", "err", err, "applicationNames", applicationNames) + return nil, err + } + filteredAppList := make([]*bean.ArgoApplicationListDto, 0) + filteredAppList = sliceUtil.Filter(filteredAppList, appListFinal, func(app *bean.ArgoApplicationListDto) bool { + _, found := sliceUtil.Find(allDevtronManagedArgoAppsInfo, func(info *commonBean.DevtronArgoCdAppInfo) bool { + return info.ArgoAppClusterId == app.ClusterId && + info.ArgoAppNamespace == app.Namespace && + info.ArgoCdAppName == app.Name + }) + return !found + }) + return filteredAppList, nil } func getApplicationListDtos(resp *k8s.ClusterResourceListMap, clusterName string, clusterId int) []*bean.ArgoApplicationListDto { @@ -212,6 +234,6 @@ func (impl *ArgoApplicationServiceImpl) UnHibernateArgoApplication(ctx context.C return response, nil } -func (impl *ArgoApplicationServiceImpl) ResourceTree(ctx context.Context, query *application2.ResourcesQuery) (*argoApplication.ResourceTreeResponse, error) { +func (impl *ArgoApplicationServiceImpl) GetResourceTree(ctx context.Context, acdQueryRequest *bean.AcdClientQueryRequest) (*argoApplication.ResourceTreeResponse, error) { return nil, util2.DefaultApiError().WithHttpStatusCode(http.StatusNotFound).WithInternalMessage(util.NotSupportedErr).WithUserMessage(util.NotSupportedErr) } diff --git a/pkg/argoApplication/ArgoApplicationServiceExtended.go b/pkg/argoApplication/ArgoApplicationServiceExtended.go index fe2ca85067..8f69b2f74a 100644 --- a/pkg/argoApplication/ArgoApplicationServiceExtended.go +++ b/pkg/argoApplication/ArgoApplicationServiceExtended.go @@ -1,3 +1,19 @@ +/* + * Copyright (c) 2020-2024. Devtron Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + package argoApplication import ( @@ -7,126 +23,230 @@ import ( application2 "github.com/argoproj/argo-cd/v2/pkg/apiclient/application" "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1" "github.com/devtron-labs/common-lib/utils/k8s" - "github.com/devtron-labs/devtron/api/helm-app/gRPC" openapi "github.com/devtron-labs/devtron/api/helm-app/openapiClient" - "github.com/devtron-labs/devtron/api/helm-app/service" "github.com/devtron-labs/devtron/client/argocdServer" argoApplication "github.com/devtron-labs/devtron/client/argocdServer/bean" + util2 "github.com/devtron-labs/devtron/internal/util" + "github.com/devtron-labs/devtron/pkg/app/appDetails/adapter" "github.com/devtron-labs/devtron/pkg/argoApplication/bean" - "github.com/devtron-labs/devtron/pkg/argoApplication/read/config" - clusterRepository "github.com/devtron-labs/devtron/pkg/cluster/repository" - "github.com/devtron-labs/devtron/pkg/k8s/application" + "github.com/devtron-labs/devtron/pkg/argoApplication/read" + "github.com/devtron-labs/devtron/pkg/cluster" + "github.com/devtron-labs/devtron/pkg/cluster/environment" + util5 "github.com/devtron-labs/devtron/pkg/util" "github.com/devtron-labs/devtron/util" - "go.uber.org/zap" + "google.golang.org/grpc" v12 "k8s.io/api/apps/v1" + "net/http" "strings" "time" ) type ArgoApplicationServiceExtendedImpl struct { *ArgoApplicationServiceImpl - acdClientWrapper argocdServer.ArgoClientWrapperService + environmentService environment.EnvironmentService + aCDAuthConfig *util5.ACDAuthConfig + argoApplicationReadService read.ArgoApplicationReadService + clusterService cluster.ClusterService + acdClientWrapper argocdServer.ArgoClientWrapperService } -func NewArgoApplicationServiceExtendedServiceImpl(logger *zap.SugaredLogger, - clusterRepository clusterRepository.ClusterRepository, - k8sUtil *k8s.K8sServiceImpl, - helmAppClient gRPC.HelmAppClient, - helmAppService service.HelmAppService, - k8sApplicationService application.K8sApplicationService, - argoApplicationConfigService config.ArgoApplicationConfigService, +func NewArgoApplicationServiceExtendedServiceImpl(argoApplicationServiceImpl *ArgoApplicationServiceImpl, acdClientWrapper argocdServer.ArgoClientWrapperService) *ArgoApplicationServiceExtendedImpl { return &ArgoApplicationServiceExtendedImpl{ - ArgoApplicationServiceImpl: &ArgoApplicationServiceImpl{ - logger: logger, - clusterRepository: clusterRepository, - k8sUtil: k8sUtil, - helmAppService: helmAppService, - helmAppClient: helmAppClient, - k8sApplicationService: k8sApplicationService, - argoApplicationConfigService: argoApplicationConfigService, - }, - acdClientWrapper: acdClientWrapper, + ArgoApplicationServiceImpl: argoApplicationServiceImpl, + acdClientWrapper: acdClientWrapper, } } func (c *ArgoApplicationServiceExtendedImpl) ListApplications(clusterIds []int) ([]*bean.ArgoApplicationListDto, error) { return c.ArgoApplicationServiceImpl.ListApplications(clusterIds) } + func (c *ArgoApplicationServiceExtendedImpl) HibernateArgoApplication(ctx context.Context, app *bean.ArgoAppIdentifier, hibernateRequest *openapi.HibernateRequest) ([]*openapi.HibernateStatus, error) { return c.ArgoApplicationServiceImpl.HibernateArgoApplication(ctx, app, hibernateRequest) } + func (c *ArgoApplicationServiceExtendedImpl) UnHibernateArgoApplication(ctx context.Context, app *bean.ArgoAppIdentifier, hibernateRequest *openapi.HibernateRequest) ([]*openapi.HibernateStatus, error) { return c.ArgoApplicationServiceImpl.UnHibernateArgoApplication(ctx, app, hibernateRequest) } -func (impl *ArgoApplicationServiceExtendedImpl) ResourceTree(ctxt context.Context, query *application2.ResourcesQuery) (*argoApplication.ResourceTreeResponse, error) { - //all the apps deployed via argo are fetching status from here - ctx, cancel := context.WithTimeout(ctxt, argoApplication.TimeoutSlow) - defer cancel() +func (c *ArgoApplicationServiceExtendedImpl) updateArgoAppStatusMetaDataInResourceTree(application *v1alpha1.Application, + resourceTreeResponse *argoApplication.ResourceTreeResponse) *argoApplication.ResourceTreeResponse { + conditions := make([]v1alpha1.ApplicationCondition, 0) + resourcesSyncResultMap := make(map[string]string) + status := "Unknown" + hash := "" + if application != nil { + // https://github.com/argoproj/argo-cd/issues/11234 workaround + resourceTreeResponse.ApplicationTree = updateNodeHealthStatus(resourceTreeResponse.ApplicationTree, application) + argoApplicationStatus := application.Status + status = string(argoApplicationStatus.Health.Status) + hash = argoApplicationStatus.Sync.Revision + conditions = argoApplicationStatus.Conditions + for _, condition := range conditions { + if condition.Type != v1alpha1.ApplicationConditionSharedResourceWarning { + status = "Degraded" + } + } + if argoApplicationStatus.OperationState != nil && argoApplicationStatus.OperationState.SyncResult != nil { + resourcesSyncResults := argoApplicationStatus.OperationState.SyncResult.Resources + for _, resourcesSyncResult := range resourcesSyncResults { + if resourcesSyncResult == nil { + continue + } + resourceIdentifier := fmt.Sprintf("%s/%s", resourcesSyncResult.Kind, resourcesSyncResult.Name) + resourcesSyncResultMap[resourceIdentifier] = resourcesSyncResult.Message + } + } + if status == "" { + status = "Unknown" + } + } + resourceTreeResponse.Conditions = conditions + resourceTreeResponse.ResourcesSyncResultMap = resourcesSyncResultMap + resourceTreeResponse.Status = status + resourceTreeResponse.RevisionHash = hash + return resourceTreeResponse +} - asc, conn, err := impl.acdClientWrapper.GetArgoClient(ctxt) +func (c *ArgoApplicationServiceExtendedImpl) getApplicationObjectWithK8sClient(ctx context.Context, + acdQueryRequest *bean.AcdClientQueryRequest) (*v1alpha1.Application, error) { + var appNamespace, applicationName string + if acdQueryRequest.Query.AppNamespace != nil { + appNamespace = *acdQueryRequest.Query.AppNamespace + } + if acdQueryRequest.Query.ApplicationName != nil { + applicationName = *acdQueryRequest.Query.ApplicationName + } + application, err := c.acdClientWrapper.GetArgoAppByNameWithK8sClient(ctx, acdQueryRequest.ArgoClusterId, appNamespace, applicationName) if err != nil { - impl.logger.Errorw("Error in GetArgoClient", "err", err) + c.logger.Errorw("error in fetching application", "acdQueryRequest", acdQueryRequest, "err", err) return nil, err } - defer util.Close(conn, impl.logger) - impl.logger.Debugw("GRPC_GET_RESOURCETREE", "req", query) - resp, err := asc.ResourceTree(ctx, query) + return application, nil +} + +func (c *ArgoApplicationServiceExtendedImpl) getApplicationObjectWithAcdClient(ctx context.Context, + asc application2.ApplicationServiceClient, acdQueryRequest *bean.AcdClientQueryRequest) (*v1alpha1.Application, error) { + appQuery := application2.ApplicationQuery{Name: acdQueryRequest.Query.ApplicationName, AppNamespace: acdQueryRequest.Query.AppNamespace} + // TODO Asutosh: why not asc.Get(ctx, &appQuery) is used? + app, err := asc.Watch(ctx, &appQuery) + if app != nil { + appResp, argoErr := app.Recv() + if argoErr == nil { + return &appResp.Application, nil + } + // TODO Asutosh: why argoErr is not handled? + } + return nil, err +} + +func (c *ArgoApplicationServiceExtendedImpl) getApplicationObject(ctx context.Context, + asc application2.ApplicationServiceClient, acdQueryRequest *bean.AcdClientQueryRequest) (*v1alpha1.Application, error) { + if acdQueryRequest.Mode.IsDeclarative() { + argoApplicationSpec, err := c.getApplicationObjectWithK8sClient(ctx, acdQueryRequest) + if err != nil { + c.logger.Errorw("error in fetching application", "acdQueryRequest", acdQueryRequest, "err", err) + return nil, err + } + return argoApplicationSpec, nil + } else { + return c.getApplicationObjectWithAcdClient(ctx, asc, acdQueryRequest) + } +} + +func (c *ArgoApplicationServiceExtendedImpl) GetResourceTree(ctx context.Context, acdQueryRequest *bean.AcdClientQueryRequest) (*argoApplication.ResourceTreeResponse, error) { + if acdQueryRequest == nil || acdQueryRequest.Query == nil { + return nil, util2.NewApiError(http.StatusInternalServerError, "something went wrong!", "invalid argo application query request") + } + var ( + asc application2.ApplicationServiceClient + conn *grpc.ClientConn + err error + ) + if !acdQueryRequest.Mode.IsDeclarative() { + asc, conn, err = c.acdClientWrapper.GetArgoClient(ctx) + if err != nil { + c.logger.Errorw("Error in GetArgoClient", "err", err) + return nil, err + } + defer util.Close(conn, c.logger) + } + + appTree, podMetadata, err := c.getArgoResourceTreeAndPodMetadata(ctx, asc, acdQueryRequest) if err != nil { - impl.logger.Errorw("GRPC_GET_RESOURCETREE", "req", query, "err", err) + c.logger.Errorw("error in getting resource tree using cache", "acdQueryRequest", acdQueryRequest, "err", err) return nil, err } - responses := impl.parseResult(resp, query, ctx, asc, err) - podMetadata, newReplicaSets := impl.buildPodMetadata(resp, responses) + applicationObj, err := c.getApplicationObject(ctx, asc, acdQueryRequest) + // TODO Asutosh: why error is not handled here? + resourceTreeResponse := &argoApplication.ResourceTreeResponse{ + ApplicationTree: appTree, + PodMetadata: podMetadata, + } + return c.updateArgoAppStatusMetaDataInResourceTree(applicationObj, resourceTreeResponse), err +} - appQuery := application2.ApplicationQuery{Name: query.ApplicationName} - app, err := asc.Watch(ctxt, &appQuery) - var conditions = make([]v1alpha1.ApplicationCondition, 0) - resourcesSyncResultMap := make(map[string]string) - status := "Unknown" - hash := "" - if app != nil { - appResp, err := app.Recv() - if err == nil { - // https://github.com/argoproj/argo-cd/issues/11234 workaround - updateNodeHealthStatus(resp, appResp) - argoApplicationStatus := appResp.Application.Status - status = string(argoApplicationStatus.Health.Status) - hash = argoApplicationStatus.Sync.Revision - conditions = argoApplicationStatus.Conditions - for _, condition := range conditions { - if condition.Type != v1alpha1.ApplicationConditionSharedResourceWarning { - status = "Degraded" - } - } - if argoApplicationStatus.OperationState != nil && argoApplicationStatus.OperationState.SyncResult != nil { - resourcesSyncResults := argoApplicationStatus.OperationState.SyncResult.Resources - for _, resourcesSyncResult := range resourcesSyncResults { - if resourcesSyncResult == nil { - continue - } - resourceIdentifier := fmt.Sprintf("%s/%s", resourcesSyncResult.Kind, resourcesSyncResult.Name) - resourcesSyncResultMap[resourceIdentifier] = resourcesSyncResult.Message - } - } - if status == "" { - status = "Unknown" - } +func (c *ArgoApplicationServiceExtendedImpl) getArgoResourceTreeAndPodMetadata(ctx context.Context, asc application2.ApplicationServiceClient, acdQueryRequest *bean.AcdClientQueryRequest) (*v1alpha1.ApplicationTree, []*argoApplication.PodMetadata, error) { + if acdQueryRequest.Mode.IsDeclarative() { + argoResourceTree, podMetadataList, err := c.getResourceTreeUsingK8sClient(ctx, acdQueryRequest) + if err != nil { + c.logger.Errorw("Error in getArgoResourceTreeAndPodMetadata, calling fallback function to get from argo", "acdQueryRequest", acdQueryRequest, "err", err) + } else { + return argoResourceTree, podMetadataList, err } } - return &argoApplication.ResourceTreeResponse{ - ApplicationTree: resp, - Status: status, - RevisionHash: hash, - PodMetadata: podMetadata, - Conditions: conditions, - NewGenerationReplicaSets: newReplicaSets, - ResourcesSyncResultMap: resourcesSyncResultMap, - }, err + c.logger.Debugw("GRPC_GET_RESOURCETREE", "req", acdQueryRequest) + //fallback + return c.getResourceTreeUsingArgoClient(ctx, asc, acdQueryRequest) } -func (impl *ArgoApplicationServiceImpl) parseResult(resp *v1alpha1.ApplicationTree, query *application2.ResourcesQuery, ctx context.Context, asc application2.ApplicationServiceClient, err error) []*argoApplication.Result { +func (c *ArgoApplicationServiceExtendedImpl) getResourceTreeUsingArgoClient(ctx context.Context, asc application2.ApplicationServiceClient, acdQueryRequest *bean.AcdClientQueryRequest) (*v1alpha1.ApplicationTree, []*argoApplication.PodMetadata, error) { + //all the apps deployed via argo are fetching status from here + argoCtx, cancel := context.WithTimeout(ctx, argoApplication.TimeoutSlow) + defer cancel() + rtResp, err := asc.ResourceTree(argoCtx, acdQueryRequest.Query) + if err != nil { + c.logger.Errorw("Error in getting resource tree", "err", err) + return nil, nil, err + } + responses := c.parseResult(rtResp, acdQueryRequest.Query, ctx, asc, err) + podMetadata := c.buildPodMetadata(rtResp, responses) + return rtResp, podMetadata, nil +} + +func (c *ArgoApplicationServiceExtendedImpl) getResourceTreeUsingK8sClient(ctx context.Context, acdQueryRequest *bean.AcdClientQueryRequest) (*v1alpha1.ApplicationTree, []*argoApplication.PodMetadata, error) { + clusterConfig, err := c.clusterService.GetClusterConfigByClusterId(acdQueryRequest.ArgoClusterId) + if err != nil { + c.logger.Errorw("Error in getting cluster config by clusterId", "acdQueryRequest", acdQueryRequest, "err", err) + return nil, nil, err + } + return c.getAcdResourceTreeUsingK8sClient(ctx, clusterConfig, acdQueryRequest) +} + +func (c *ArgoApplicationServiceExtendedImpl) getAcdResourceTreeUsingK8sClient(ctx context.Context, clusterConfig *k8s.ClusterConfig, acdQueryRequest *bean.AcdClientQueryRequest) (*v1alpha1.ApplicationTree, []*argoApplication.PodMetadata, error) { + argoCdAppNamespace := acdQueryRequest.GetAppNamespace(c.aCDAuthConfig.ACDConfigMapNamespace) + argoMangedResourceResp, err := c.argoApplicationReadService.GetArgoManagedResources(acdQueryRequest.GetApplicationName(), argoCdAppNamespace, clusterConfig) + if err != nil { + c.logger.Errorw("Error in getting argo managed resources", "acdQueryRequest", acdQueryRequest, "err", err) + return nil, nil, err + } + resourceTreeResp, err := c.argoApplicationReadService.GetArgoAppResourceTree(clusterConfig, acdQueryRequest.TargetClusterId, argoMangedResourceResp) + if err != nil { + c.logger.Errorw("Error in getting resource tree for argo app using cache", "acdQueryRequest", acdQueryRequest, "err", err) + return nil, nil, err + } else { + argoResourceTree, err := adapter.GetArgoApplicationTreeForNodes(resourceTreeResp.GetNodes()) + if err != nil { + c.logger.Errorw("Error in building argo app details", "acdQueryRequest", acdQueryRequest, "err", err) + return nil, nil, err + } + podMetadataList := adapter.GetArgoPodMetadata(resourceTreeResp.PodMetadata) + return argoResourceTree, podMetadataList, nil + } +} + +func (c *ArgoApplicationServiceExtendedImpl) parseResult(resp *v1alpha1.ApplicationTree, query *application2.ResourcesQuery, ctx context.Context, asc application2.ApplicationServiceClient, err error) []*argoApplication.Result { var responses = make([]*argoApplication.Result, 0) qCount := 0 response := make(chan argoApplication.Result) @@ -162,7 +282,7 @@ func (impl *ArgoApplicationServiceImpl) parseResult(resp *v1alpha1.ApplicationTr } } - impl.logger.Debugw("needPods", "pods", needPods) + c.logger.Debugw("needPods", "pods", needPods) for _, node := range podParents { queryNodes = append(queryNodes, node) @@ -208,9 +328,9 @@ func (impl *ArgoApplicationServiceImpl) parseResult(resp *v1alpha1.ApplicationTr startTime := time.Now() res, err := asc.GetResource(ctx, &request) if err != nil { - impl.logger.Errorw("GRPC_GET_RESOURCE", "data", request, "timeTaken", time.Since(startTime), "err", err) + c.logger.Errorw("GRPC_GET_RESOURCE", "data", request, "timeTaken", time.Since(startTime), "err", err) } else { - impl.logger.Debugw("GRPC_GET_RESOURCE", "data", request, "timeTaken", time.Since(startTime)) + c.logger.Debugw("GRPC_GET_RESOURCE", "data", request, "timeTaken", time.Since(startTime)) } if res != nil || err != nil { response <- argoApplication.Result{Response: res, Error: err, Request: &request} @@ -242,7 +362,7 @@ func (impl *ArgoApplicationServiceImpl) parseResult(resp *v1alpha1.ApplicationTr return responses } -func (impl *ArgoApplicationServiceImpl) buildPodMetadata(resp *v1alpha1.ApplicationTree, responses []*argoApplication.Result) (podMetaData []*argoApplication.PodMetadata, newReplicaSets []string) { +func (c *ArgoApplicationServiceExtendedImpl) buildPodMetadata(resp *v1alpha1.ApplicationTree, responses []*argoApplication.Result) (podMetaData []*argoApplication.PodMetadata) { rolloutManifests := make([]map[string]interface{}, 0) statefulSetManifest := make(map[string]interface{}) deploymentManifests := make([]map[string]interface{}, 0) @@ -251,6 +371,7 @@ func (impl *ArgoApplicationServiceImpl) buildPodMetadata(resp *v1alpha1.Applicat podManifests := make([]map[string]interface{}, 0) controllerRevisionManifests := make([]map[string]interface{}, 0) jobsManifest := make(map[string]interface{}) + newReplicaSets := make([]string, 0) var parentWorkflow []string for _, response := range responses { if response != nil && response.Response != nil { @@ -260,7 +381,7 @@ func (impl *ArgoApplicationServiceImpl) buildPodMetadata(resp *v1alpha1.Applicat manifest := make(map[string]interface{}) err := json.Unmarshal([]byte(manifestFromResponse), &manifest) if err != nil { - impl.logger.Error(err) + c.logger.Error(err) } else { rolloutManifests = append(rolloutManifests, manifest) } @@ -268,45 +389,45 @@ func (impl *ArgoApplicationServiceImpl) buildPodMetadata(resp *v1alpha1.Applicat manifest := make(map[string]interface{}) err := json.Unmarshal([]byte(manifestFromResponse), &manifest) if err != nil { - impl.logger.Error(err) + c.logger.Error(err) } else { deploymentManifests = append(deploymentManifests, manifest) } } else if kind == "StatefulSet" { err := json.Unmarshal([]byte(manifestFromResponse), &statefulSetManifest) if err != nil { - impl.logger.Error(err) + c.logger.Error(err) } } else if kind == "DaemonSet" { err := json.Unmarshal([]byte(manifestFromResponse), &daemonSetManifest) if err != nil { - impl.logger.Error(err) + c.logger.Error(err) } } else if kind == "ReplicaSet" { manifest := make(map[string]interface{}) err := json.Unmarshal([]byte(manifestFromResponse), &manifest) if err != nil { - impl.logger.Error(err) + c.logger.Error(err) } replicaSetManifests = append(replicaSetManifests, manifest) } else if kind == "Pod" { manifest := make(map[string]interface{}) err := json.Unmarshal([]byte(manifestFromResponse), &manifest) if err != nil { - impl.logger.Error(err) + c.logger.Error(err) } podManifests = append(podManifests, manifest) } else if kind == "ControllerRevision" { manifest := make(map[string]interface{}) err := json.Unmarshal([]byte(manifestFromResponse), &manifest) if err != nil { - impl.logger.Error(err) + c.logger.Error(err) } controllerRevisionManifests = append(controllerRevisionManifests, manifest) } else if kind == "Job" { err := json.Unmarshal([]byte(manifestFromResponse), &jobsManifest) if err != nil { - impl.logger.Error(err) + c.logger.Error(err) } } } @@ -788,16 +909,16 @@ func updateMetadataOfDuplicatePods(podsMetadataFromPods []*argoApplication.PodMe } // fill the health status in node from app resources -func updateNodeHealthStatus(resp *v1alpha1.ApplicationTree, appResp *v1alpha1.ApplicationWatchEvent) { - if resp == nil || len(resp.Nodes) == 0 || appResp == nil || len(appResp.Application.Status.Resources) == 0 { - return +func updateNodeHealthStatus(resp *v1alpha1.ApplicationTree, application *v1alpha1.Application) *v1alpha1.ApplicationTree { + if resp == nil || len(resp.Nodes) == 0 || application == nil || len(application.Status.Resources) == 0 { + return resp } for index, node := range resp.Nodes { if node.Health != nil { continue } - for _, resource := range appResp.Application.Status.Resources { + for _, resource := range application.Status.Resources { if node.Group != resource.Group || node.Version != resource.Version || node.Kind != resource.Kind || node.Name != resource.Name || node.Namespace != resource.Namespace { continue @@ -815,4 +936,5 @@ func updateNodeHealthStatus(resp *v1alpha1.ApplicationTree, appResp *v1alpha1.Ap break } } + return resp } diff --git a/pkg/argoApplication/bean/bean.go b/pkg/argoApplication/bean/bean.go index 899eff0f6c..87f606d34b 100644 --- a/pkg/argoApplication/bean/bean.go +++ b/pkg/argoApplication/bean/bean.go @@ -17,6 +17,8 @@ package bean import ( + application2 "github.com/argoproj/argo-cd/v2/pkg/apiclient/application" + "github.com/devtron-labs/common-lib/utils/k8s" k8sCommonBean "github.com/devtron-labs/common-lib/utils/k8s/commonBean" "github.com/devtron-labs/devtron/api/helm-app/gRPC" "k8s.io/apimachinery/pkg/runtime/schema" @@ -63,6 +65,14 @@ type ArgoApplicationDetailDto struct { Manifest map[string]interface{} `json:"manifest"` } +type ArgoManagedResourceResponse struct { + ManifestResponse *k8s.ManifestResponse + HealthStatus string + SyncStatus string + DestinationServer string + ArgoManagedResources []*ArgoManagedResource +} + type ArgoManagedResource struct { Group string Kind string @@ -86,3 +96,59 @@ type ArgoAppIdentifier struct { Namespace string `json:"namespace"` AppName string `json:"appName"` } + +type AcdClientQueryRequest struct { + Mode ClientMode + Query *application2.ResourcesQuery + ArgoClusterId int + TargetClusterId int +} + +func (a *AcdClientQueryRequest) GetApplicationName() string { + if a.Query.ApplicationName == nil { + return "" + } + return *a.Query.ApplicationName +} + +func (a *AcdClientQueryRequest) GetAppNamespace(defaultAppNs string) string { + if a.Query.AppNamespace == nil { + return defaultAppNs + } + return *a.Query.AppNamespace +} + +func (a *AcdClientQueryRequest) WithTargetClusterId(clusterId int) *AcdClientQueryRequest { + a.TargetClusterId = clusterId + return a +} + +func (a *AcdClientQueryRequest) WithArgoClusterId(clusterId int) *AcdClientQueryRequest { + a.ArgoClusterId = clusterId + return a +} + +func NewDeclarativeQueryRequest(query *application2.ResourcesQuery) *AcdClientQueryRequest { + return &AcdClientQueryRequest{ + Mode: DeclarativeClient, + Query: query, + } +} + +func NewImperativeQueryRequest(query *application2.ResourcesQuery) *AcdClientQueryRequest { + return &AcdClientQueryRequest{ + Mode: ImperativeClient, + Query: query, + } +} + +type ClientMode string + +func (c ClientMode) IsDeclarative() bool { + return c == DeclarativeClient +} + +const ( + ImperativeClient ClientMode = "imperative" + DeclarativeClient ClientMode = "declarative" +) diff --git a/pkg/argoApplication/read/ArgoApplicationReadService.go b/pkg/argoApplication/read/ArgoApplicationReadService.go index faf9195639..b0f30ccb8f 100644 --- a/pkg/argoApplication/read/ArgoApplicationReadService.go +++ b/pkg/argoApplication/read/ArgoApplicationReadService.go @@ -1,3 +1,19 @@ +/* + * Copyright (c) 2020-2024. Devtron Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + package read import ( @@ -21,7 +37,9 @@ import ( type ArgoApplicationReadService interface { ValidateArgoResourceRequest(ctx context.Context, appIdentifier *bean.ArgoAppIdentifier, request *k8s.K8sRequestBean) (bool, error) - GetAppDetail(resourceName, resourceNamespace string, clusterId int) (*bean.ArgoApplicationDetailDto, error) + GetAppDetailEA(ctx context.Context, resourceName, resourceNamespace string, clusterId int) (*bean.ArgoApplicationDetailDto, error) + GetArgoManagedResources(resourceName, resourceNamespace string, clusterConfig *k8s.ClusterConfig) (*bean.ArgoManagedResourceResponse, error) + GetArgoAppResourceTree(clusterConfig *k8s.ClusterConfig, targetClusterId int, resp *bean.ArgoManagedResourceResponse) (*gRPC.ResourceTreeResponse, error) } type ArgoApplicationReadServiceImpl struct { @@ -47,7 +65,7 @@ func NewArgoApplicationReadServiceImpl(logger *zap.SugaredLogger, } -func (impl *ArgoApplicationReadServiceImpl) GetAppDetail(resourceName, resourceNamespace string, clusterId int) (*bean.ArgoApplicationDetailDto, error) { +func (impl *ArgoApplicationReadServiceImpl) GetAppDetailEA(ctx context.Context, resourceName, resourceNamespace string, clusterId int) (*bean.ArgoApplicationDetailDto, error) { appDetail := &bean.ArgoApplicationDetailDto{ ArgoApplicationListDto: &bean.ArgoApplicationListDto{ Name: resourceName, @@ -78,33 +96,50 @@ func (impl *ArgoApplicationReadServiceImpl) GetAppDetail(resourceName, resourceN } clusterBean := adapter.GetClusterBean(clusterWithApplicationObject) clusterConfig := clusterBean.GetClusterConfig() - restConfig, err := impl.k8sUtil.GetRestConfigByCluster(clusterConfig) + resp, err := impl.GetArgoManagedResources(resourceName, resourceNamespace, clusterConfig) if err != nil { - impl.logger.Errorw("error in getting rest config by cluster Id", "err", err, "clusterId", clusterWithApplicationObject.Id) + impl.logger.Errorw("error in getting argo managed resources", "err", err) return nil, err } - resp, err := impl.k8sUtil.GetResource(context.Background(), resourceNamespace, resourceName, bean.GvkForArgoApplication, restConfig) + targetClusterId := 0 + if len(resp.DestinationServer) != 0 { + if resp.DestinationServer == k8sCommonBean.DefaultClusterUrl { + targetClusterId = clusterWithApplicationObject.Id + } else if clusterIdFromMap, ok := clusterServerUrlIdMap[resp.DestinationServer]; ok { + targetClusterId = clusterIdFromMap + } + } + resourceTree, err := impl.GetArgoAppResourceTree(clusterConfig, targetClusterId, resp) if err != nil { - impl.logger.Errorw("error in getting resource list", "err", err) + impl.logger.Errorw("error in getting argo app resource tree", "err", err) return nil, err } - var destinationServer string - var argoManagedResources []*bean.ArgoManagedResource - if resp != nil && resp.Manifest.Object != nil { - appDetail.Manifest = resp.Manifest.Object - appDetail.HealthStatus, appDetail.SyncStatus, destinationServer, argoManagedResources = - helper.GetHealthSyncStatusDestinationServerAndManagedResourcesForArgoK8sRawObject(resp.Manifest.Object) + appDetail.ResourceTree = resourceTree + if resp.ManifestResponse != nil { + appDetail.Manifest = resp.ManifestResponse.Manifest.Object } - appDeployedOnClusterId := 0 - if destinationServer == k8sCommonBean.DefaultClusterUrl { - appDeployedOnClusterId = clusterWithApplicationObject.Id - } else if clusterIdFromMap, ok := clusterServerUrlIdMap[destinationServer]; ok { - appDeployedOnClusterId = clusterIdFromMap + appDetail.HealthStatus = resp.HealthStatus + appDetail.SyncStatus = resp.SyncStatus + return appDetail, nil +} + +func (impl *ArgoApplicationReadServiceImpl) GetArgoAppResourceTree(clusterConfig *k8s.ClusterConfig, targetClusterId int, resp *bean.ArgoManagedResourceResponse) (*gRPC.ResourceTreeResponse, error) { + if resp.ManifestResponse == nil || resp.ManifestResponse.Manifest.Object == nil { + return nil, fmt.Errorf("error in getting argo managed resources") } - var configOfClusterWhereAppIsDeployed bean.ArgoClusterConfigObj - if appDeployedOnClusterId < 1 { + var targetClusterConfig bean.ArgoClusterConfigObj + if targetClusterId < 1 { + restConfig, err := impl.k8sUtil.GetRestConfigByCluster(clusterConfig) + if err != nil { + impl.logger.Errorw("error in getting rest config by cluster Id", "err", err, "clusterHostUrl", clusterConfig.Host) + return nil, err + } // cluster is not added on devtron, need to get server config from secret which argo-cd saved coreV1Client, err := impl.k8sUtil.GetCoreV1ClientByRestConfig(restConfig) + if err != nil { + impl.logger.Errorw("error in getting core v1 client", "err", err, "clusterHostUrl", clusterConfig.Host) + return nil, err + } secrets, err := coreV1Client.Secrets(bean.AllNamespaces).List(context.Background(), v1.ListOptions{ LabelSelector: labels.SelectorFromSet(labels.Set{"argocd.argoproj.io/secret-type": "cluster"}).String(), }) @@ -115,9 +150,9 @@ func (impl *ArgoApplicationReadServiceImpl) GetAppDetail(resourceName, resourceN for _, secret := range secrets.Items { if secret.Data != nil { if val, ok := secret.Data["server"]; ok { - if string(val) == destinationServer { + if string(val) == resp.DestinationServer { if config, ok := secret.Data["config"]; ok { - err = json.Unmarshal(config, &configOfClusterWhereAppIsDeployed) + err = json.Unmarshal(config, &targetClusterConfig) if err != nil { impl.logger.Errorw("error in unmarshaling", "err", err) return nil, err @@ -129,17 +164,43 @@ func (impl *ArgoApplicationReadServiceImpl) GetAppDetail(resourceName, resourceN } } } - resourceTreeResp, err := impl.getResourceTreeForExternalCluster(appDeployedOnClusterId, destinationServer, configOfClusterWhereAppIsDeployed, argoManagedResources) + resourceTreeResp, err := impl.getResourceTreeForExternalCluster(targetClusterId, targetClusterConfig, resp.DestinationServer, resp.ArgoManagedResources) if err != nil { impl.logger.Errorw("error in getting resource tree response", "err", err) return nil, err } - appDetail.ResourceTree = resourceTreeResp - return appDetail, nil + return resourceTreeResp, nil } -func (impl *ArgoApplicationReadServiceImpl) getResourceTreeForExternalCluster(clusterId int, destinationServer string, - configOfClusterWhereAppIsDeployed bean.ArgoClusterConfigObj, argoManagedResources []*bean.ArgoManagedResource) (*gRPC.ResourceTreeResponse, error) { +func (impl *ArgoApplicationReadServiceImpl) GetArgoManagedResources(resourceName, resourceNamespace string, clusterConfig *k8s.ClusterConfig) (*bean.ArgoManagedResourceResponse, error) { + restConfig, err := impl.k8sUtil.GetRestConfigByCluster(clusterConfig) + if err != nil { + impl.logger.Errorw("error in getting rest config by cluster Id", "err", err, "clusterId", clusterConfig.ClusterId) + return nil, err + } + resp, err := impl.k8sUtil.GetResource(context.Background(), resourceNamespace, resourceName, bean.GvkForArgoApplication, restConfig) + if err != nil { + impl.logger.Errorw("error in getting resource list", "err", err) + return nil, err + } + impl.logger.Infow("resp for argo external", "resp", resp) + + if resp != nil && resp.Manifest.Object != nil { + healthStatus, syncStatus, destinationServer, argoManagedResources := + helper.GetHealthSyncStatusDestinationServerAndManagedResourcesForArgoK8sRawObject(resp.Manifest.Object) + return &bean.ArgoManagedResourceResponse{ + ManifestResponse: resp, + HealthStatus: healthStatus, + SyncStatus: syncStatus, + DestinationServer: destinationServer, + ArgoManagedResources: argoManagedResources, + }, nil + } else { + return &bean.ArgoManagedResourceResponse{}, nil + } +} + +func (impl *ArgoApplicationReadServiceImpl) getResourceTreeForExternalCluster(clusterId int, configOfClusterWhereAppIsDeployed bean.ArgoClusterConfigObj, destinationServer string, argoManagedResources []*bean.ArgoManagedResource) (*gRPC.ResourceTreeResponse, error) { var resources []*gRPC.ExternalResourceDetail for _, argoManagedResource := range argoManagedResources { resources = append(resources, &gRPC.ExternalResourceDetail{ @@ -170,7 +231,7 @@ func (impl *ArgoApplicationReadServiceImpl) getResourceTreeForExternalCluster(cl } func (impl *ArgoApplicationReadServiceImpl) ValidateArgoResourceRequest(ctx context.Context, appIdentifier *bean.ArgoAppIdentifier, request *k8s.K8sRequestBean) (bool, error) { - app, err := impl.GetAppDetail(appIdentifier.AppName, appIdentifier.Namespace, appIdentifier.ClusterId) + app, err := impl.GetAppDetailEA(ctx, appIdentifier.AppName, appIdentifier.Namespace, appIdentifier.ClusterId) if err != nil { impl.logger.Errorw("error in getting app detail", "err", err, "appDetails", appIdentifier) apiError := clientErrors.ConvertToApiError(err) @@ -200,39 +261,40 @@ func (impl *ArgoApplicationReadServiceImpl) ValidateArgoResourceRequest(ctx cont appDetail := &gRPC.AppDetail{ ResourceTreeResponse: app.ResourceTree, } - return validateContainerNameIfReqd(valid, request, appDetail), nil + if !valid { + valid = validateContainerName(request, appDetail) + } + return valid, nil } -func validateContainerNameIfReqd(valid bool, request *k8s.K8sRequestBean, app *gRPC.AppDetail) bool { - if !valid { - requestContainerName := request.PodLogsRequest.ContainerName - podName := request.ResourceIdentifier.Name - for _, pod := range app.ResourceTreeResponse.PodMetadata { - if pod.Name == podName { - - // finding the container name in main Containers - for _, container := range pod.Containers { - if container == requestContainerName { - return true - } - } +func validateContainerName(request *k8s.K8sRequestBean, app *gRPC.AppDetail) bool { + requestContainerName := request.PodLogsRequest.ContainerName + podName := request.ResourceIdentifier.Name + for _, pod := range app.ResourceTreeResponse.PodMetadata { + if pod.Name == podName { - // finding the container name in init containers - for _, initContainer := range pod.InitContainers { - if initContainer == requestContainerName { - return true - } + // finding the container name in main Containers + for _, container := range pod.Containers { + if container == requestContainerName { + return true } + } - // finding the container name in ephemeral containers - for _, ephemeralContainer := range pod.EphemeralContainers { - if ephemeralContainer.Name == requestContainerName { - return true - } + // finding the container name in init containers + for _, initContainer := range pod.InitContainers { + if initContainer == requestContainerName { + return true } + } + // finding the container name in ephemeral containers + for _, ephemeralContainer := range pod.EphemeralContainers { + if ephemeralContainer.Name == requestContainerName { + return true + } } + } } - return valid + return false } diff --git a/pkg/bean/adapter/adapter.go b/pkg/bean/adapter/adapter.go new file mode 100644 index 0000000000..883997aafe --- /dev/null +++ b/pkg/bean/adapter/adapter.go @@ -0,0 +1,46 @@ +/* + * Copyright (c) 2020-2024. Devtron Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package adapter + +import ( + "github.com/devtron-labs/devtron/internal/sql/repository/pipelineConfig" + bean3 "github.com/devtron-labs/devtron/pkg/bean" +) + +func NewCDPipelineMinConfigFromModel(pipeline *pipelineConfig.Pipeline) *bean3.CDPipelineMinConfig { + deploymentConfigMin := &bean3.CDPipelineMinConfig{ + Id: pipeline.Id, + Name: pipeline.Name, + CiPipelineId: pipeline.CiPipelineId, + EnvironmentId: pipeline.EnvironmentId, + AppId: pipeline.AppId, + DeploymentAppDeleteRequest: pipeline.DeploymentAppDeleteRequest, + DeploymentAppCreated: pipeline.DeploymentAppCreated, + DeploymentAppType: pipeline.DeploymentAppType, + + // pipeline.App is not of pointer type + AppName: pipeline.App.AppName, + TeamId: pipeline.App.TeamId, + + // pipeline.Environment is not of pointer type + EnvironmentName: pipeline.Environment.Name, + EnvironmentIdentifier: pipeline.Environment.EnvironmentIdentifier, + Namespace: pipeline.Environment.Namespace, + IsProdEnv: pipeline.Environment.Default, + } + return deploymentConfigMin +} diff --git a/pkg/bean/app.go b/pkg/bean/app.go index b4dd9f7f22..b9f9d5acd5 100644 --- a/pkg/bean/app.go +++ b/pkg/bean/app.go @@ -26,6 +26,7 @@ import ( "github.com/devtron-labs/devtron/internal/sql/repository/helper" repository2 "github.com/devtron-labs/devtron/internal/sql/repository/imageTagging" "github.com/devtron-labs/devtron/internal/sql/repository/pipelineConfig" + "github.com/devtron-labs/devtron/internal/util" "github.com/devtron-labs/devtron/pkg/bean/common" CiPipeline2 "github.com/devtron-labs/devtron/pkg/build/pipeline/bean" "github.com/devtron-labs/devtron/pkg/chartRepo/repository" @@ -648,7 +649,38 @@ type CDPipelineConfigObject struct { ChildPipelineId int `json:"childPipelineId"` IsDigestEnforcedForPipeline bool `json:"isDigestEnforcedForPipeline"` IsDigestEnforcedForEnv bool `json:"isDigestEnforcedForEnv"` - ReleaseMode string `json:"releaseMode" validate:"oneof=create"` + ApplicationObjectClusterId int `json:"applicationObjectClusterId"` //ACDAppClusterId + ApplicationObjectNamespace string `json:"applicationObjectNamespace"` //ACDAppNamespace + DeploymentAppName string `json:"deploymentAppName"` + ReleaseMode string `json:"releaseMode" validate:"omitempty,oneof=link create"` +} + +func (cdPipelineConfig *CDPipelineConfigObject) IsLinkedRelease() bool { + return cdPipelineConfig.GetReleaseMode() == util.PIPELINE_RELEASE_MODE_LINK +} + +func (cdPipelineConfig *CDPipelineConfigObject) GetReleaseMode() string { + if cdPipelineConfig == nil || len(cdPipelineConfig.ReleaseMode) == 0 { + return util.PIPELINE_RELEASE_MODE_CREATE + } + return cdPipelineConfig.ReleaseMode +} + +type CDPipelineMinConfig struct { + Id int + Name string + CiPipelineId int + EnvironmentId int + EnvironmentName string + EnvironmentIdentifier string + Namespace string + IsProdEnv bool + AppId int + AppName string + TeamId int + DeploymentAppDeleteRequest bool + DeploymentAppCreated bool + DeploymentAppType string } type CDPipelineAddType string @@ -667,6 +699,11 @@ func (cdPipelineConfig *CDPipelineConfigObject) PatchSourceInfo() (int, string) return cdPipelineConfig.SwitchFromCiPipelineId, appWorkflow.CIPIPELINE } +func (cdPipelineConfig *CDPipelineConfigObject) IsExternalArgoAppLinkRequest() bool { + return cdPipelineConfig.DeploymentAppType == util.PIPELINE_DEPLOYMENT_TYPE_ACD && + cdPipelineConfig.GetReleaseMode() == util.PIPELINE_RELEASE_MODE_LINK +} + type PreStageConfigMapSecretNames struct { ConfigMaps []string `json:"configMaps"` Secrets []string `json:"secrets"` diff --git a/pkg/chart/ChartService.go b/pkg/chart/ChartService.go index 74db49eefb..006bba7795 100644 --- a/pkg/chart/ChartService.go +++ b/pkg/chart/ChartService.go @@ -26,9 +26,12 @@ import ( "github.com/devtron-labs/devtron/internal/sql/repository/app" "github.com/devtron-labs/devtron/internal/sql/repository/chartConfig" "github.com/devtron-labs/devtron/internal/util" + bean3 "github.com/devtron-labs/devtron/pkg/chart/bean" + read2 "github.com/devtron-labs/devtron/pkg/chart/read" chartRepoRepository "github.com/devtron-labs/devtron/pkg/chartRepo/repository" "github.com/devtron-labs/devtron/pkg/cluster/environment/repository" "github.com/devtron-labs/devtron/pkg/deployment/common" + adapter2 "github.com/devtron-labs/devtron/pkg/deployment/common/adapter" bean2 "github.com/devtron-labs/devtron/pkg/deployment/common/bean" "github.com/devtron-labs/devtron/pkg/deployment/gitOps/config" "github.com/devtron-labs/devtron/pkg/deployment/manifest/deployedAppMetrics" @@ -54,13 +57,12 @@ import ( ) type ChartService interface { - Create(templateRequest TemplateRequest, ctx context.Context) (chart *TemplateRequest, err error) - CreateChartFromEnvOverride(templateRequest TemplateRequest, ctx context.Context) (chart *TemplateRequest, err error) - FindLatestChartForAppByAppId(appId int) (chartTemplate *TemplateRequest, err error) - GetByAppIdAndChartRefId(appId int, chartRefId int) (chartTemplate *TemplateRequest, err error) - UpdateAppOverride(ctx context.Context, templateRequest *TemplateRequest) (*TemplateRequest, error) + Create(templateRequest bean3.TemplateRequest, ctx context.Context) (chart *bean3.TemplateRequest, err error) + CreateChartFromEnvOverride(templateRequest bean3.TemplateRequest, ctx context.Context) (chart *bean3.TemplateRequest, err error) + GetByAppIdAndChartRefId(appId int, chartRefId int) (chartTemplate *bean3.TemplateRequest, err error) + UpdateAppOverride(ctx context.Context, templateRequest *bean3.TemplateRequest) (*bean3.TemplateRequest, error) IsReadyToTrigger(appId int, envId int, pipelineId int) (IsReady, error) - FindPreviousChartByAppId(appId int) (chartTemplate *TemplateRequest, err error) + FindPreviousChartByAppId(appId int) (chartTemplate *bean3.TemplateRequest, err error) UpgradeForApp(appId int, chartRefId int, newAppOverride map[string]interface{}, userId int32, ctx context.Context) (bool, error) CheckIfChartRefUserUploadedByAppId(id int) (bool, error) PatchEnvOverrides(values json.RawMessage, oldChartType string, newChartType string) (json.RawMessage, error) @@ -70,7 +72,6 @@ type ChartService interface { ConfigureGitOpsRepoUrlForApp(appId int, repoUrl, chartLocation string, isCustomRepo bool, userId int32) (*bean2.DeploymentConfig, error) IsGitOpsRepoConfiguredForDevtronApp(appId int) (bool, error) - IsGitOpsRepoConfiguredForDevtronApps(appIds []int) (map[int]bool, error) IsGitOpsRepoAlreadyRegistered(gitOpsRepoUrl string) (bool, error) } @@ -91,6 +92,7 @@ type ChartServiceImpl struct { gitOpsConfigReadService config.GitOpsConfigReadService deploymentConfigService common.DeploymentConfigService envConfigOverrideReadService read.EnvConfigOverrideService + chartReadService read2.ChartReadService } func NewChartServiceImpl(chartRepository chartRepoRepository.ChartRepository, @@ -108,7 +110,8 @@ func NewChartServiceImpl(chartRepository chartRepoRepository.ChartRepository, chartRefService chartRef.ChartRefService, gitOpsConfigReadService config.GitOpsConfigReadService, deploymentConfigService common.DeploymentConfigService, - envConfigOverrideReadService read.EnvConfigOverrideService) *ChartServiceImpl { + envConfigOverrideReadService read.EnvConfigOverrideService, + chartReadService read2.ChartReadService) *ChartServiceImpl { return &ChartServiceImpl{ chartRepository: chartRepository, logger: logger, @@ -126,6 +129,7 @@ func NewChartServiceImpl(chartRepository chartRepoRepository.ChartRepository, gitOpsConfigReadService: gitOpsConfigReadService, deploymentConfigService: deploymentConfigService, envConfigOverrideReadService: envConfigOverrideReadService, + chartReadService: chartReadService, } } @@ -133,7 +137,7 @@ func (impl *ChartServiceImpl) PatchEnvOverrides(values json.RawMessage, oldChart return PatchWinterSoldierConfig(values, newChartType) } -func (impl *ChartServiceImpl) Create(templateRequest TemplateRequest, ctx context.Context) (*TemplateRequest, error) { +func (impl *ChartServiceImpl) Create(templateRequest bean3.TemplateRequest, ctx context.Context) (*bean3.TemplateRequest, error) { err := impl.chartRefService.CheckChartExists(templateRequest.ChartRefId) if err != nil { impl.logger.Errorw("error in getting missing chart for chartRefId", "err", err, "chartRefId") @@ -266,10 +270,10 @@ func (impl *ChartServiceImpl) Create(templateRequest TemplateRequest, ctx contex ChartRepo: charRepository.Name, ChartRepoUrl: charRepository.Url, ChartVersion: chartMeta.Version, + GitRepoUrl: gitRepoUrl, + ChartLocation: chartLocation, Status: models.CHARTSTATUS_NEW, Active: true, - ChartLocation: chartLocation, - GitRepoUrl: gitRepoUrl, ReferenceTemplate: templateName, ChartRefId: templateRequest.ChartRefId, Latest: true, @@ -288,9 +292,20 @@ func (impl *ChartServiceImpl) Create(templateRequest TemplateRequest, ctx contex deploymentConfig := &bean2.DeploymentConfig{ AppId: templateRequest.AppId, - ConfigType: common.GetDeploymentConfigType(templateRequest.IsCustomGitRepository), + ConfigType: adapter2.GetDeploymentConfigType(templateRequest.IsCustomGitRepository), RepoURL: gitRepoUrl, - Active: true, + ReleaseConfiguration: &bean2.ReleaseConfiguration{ + Version: bean2.Version, + ArgoCDSpec: bean2.ArgoCDSpec{ + Spec: bean2.ApplicationSpec{ + Source: &bean2.ApplicationSource{ + RepoURL: gitRepoUrl, + Path: chartLocation, + }, + }, + }, + }, + Active: true, } deploymentConfig, err = impl.deploymentConfigService.CreateOrUpdateConfig(nil, deploymentConfig, templateRequest.UserId) if err != nil { @@ -298,6 +313,12 @@ func (impl *ChartServiceImpl) Create(templateRequest TemplateRequest, ctx contex return nil, err } + err = impl.UpdateChartLocationForEnvironmentConfigs(templateRequest.AppId, chart.ChartRefId, templateRequest.UserId, version) + if err != nil { + impl.logger.Errorw("error in updating chart location in env overrides", "appId", templateRequest.AppId, "err", err) + return nil, err + } + //creating history entry for deployment template err = impl.deploymentTemplateHistoryService.CreateDeploymentTemplateHistoryFromGlobalTemplate(chart, nil, templateRequest.IsAppMetricsEnabled) if err != nil { @@ -327,7 +348,27 @@ func (impl *ChartServiceImpl) Create(templateRequest TemplateRequest, ctx contex return chartVal, err } -func (impl *ChartServiceImpl) CreateChartFromEnvOverride(templateRequest TemplateRequest, ctx context.Context) (*TemplateRequest, error) { +func (impl *ChartServiceImpl) UpdateChartLocationForEnvironmentConfigs(appId, chartRefId int, userId int32, version string) error { + envOverrides, err := impl.envConfigOverrideReadService.GetAllOverridesForApp(appId) + if err != nil { + impl.logger.Errorw("error in getting all overrides for app", "appId", appId, "err", err) + return err + } + uniqueEnvMap := make(map[int]bool) + for _, override := range envOverrides { + if _, ok := uniqueEnvMap[override.TargetEnvironment]; !ok && !override.IsOverride { + uniqueEnvMap[override.TargetEnvironment] = true + err := impl.deploymentConfigService.UpdateChartLocationInDeploymentConfig(appId, override.TargetEnvironment, chartRefId, userId, version) + if err != nil { + impl.logger.Errorw("error in updating chart location for env level deployment configs", "appId", appId, "envId", override.TargetEnvironment, "err", err) + return err + } + } + } + return nil +} + +func (impl *ChartServiceImpl) CreateChartFromEnvOverride(templateRequest bean3.TemplateRequest, ctx context.Context) (*bean3.TemplateRequest, error) { err := impl.chartRefService.CheckChartExists(templateRequest.ChartRefId) if err != nil { impl.logger.Errorw("error in getting missing chart for chartRefId", "err", err, "chartRefId") @@ -374,18 +415,24 @@ func (impl *ChartServiceImpl) CreateChartFromEnvOverride(templateRequest Templat if err != nil && pg.ErrNoRows != err { return nil, err } + + deploymentConfig, err := impl.deploymentConfigService.GetConfigForDevtronApps(templateRequest.AppId, 0) + if err != nil { + impl.logger.Errorw("error in getting deployment config by appId", "appId", templateRequest.AppId, "err", err) + return nil, err + } + chartLocation := filepath.Join(templateName, version) gitRepoUrl := apiGitOpsBean.GIT_REPO_NOT_CONFIGURED - if currentLatestChart.Id > 0 && currentLatestChart.GitRepoUrl != "" { + if currentLatestChart.Id > 0 && deploymentConfig.GetRepoURL() != "" { gitRepoUrl = currentLatestChart.GitRepoUrl } - deploymentConfig := &bean2.DeploymentConfig{ - AppId: templateRequest.AppId, - ConfigType: common.GetDeploymentConfigType(templateRequest.IsCustomGitRepository), - RepoURL: gitRepoUrl, - Active: true, - } + // maintained for backward compatibility; + // adding git repo url to both deprecated and new state + deploymentConfig = deploymentConfig.SetRepoURL(gitRepoUrl) + deploymentConfig.SetChartLocation(chartLocation) + deploymentConfig, err = impl.deploymentConfigService.CreateOrUpdateConfig(nil, deploymentConfig, templateRequest.UserId) if err != nil { impl.logger.Errorw("error in saving deployment config", "appId", templateRequest.AppId, "err", err) @@ -459,15 +506,16 @@ func (impl *ChartServiceImpl) CreateChartFromEnvOverride(templateRequest Templat } // converts db object to bean -func (impl *ChartServiceImpl) chartAdaptor(chart *chartRepoRepository.Chart, isAppMetricsEnabled bool, deploymentConfig *bean2.DeploymentConfig) (*TemplateRequest, error) { +func (impl *ChartServiceImpl) chartAdaptor(chart *chartRepoRepository.Chart, isAppMetricsEnabled bool, deploymentConfig *bean2.DeploymentConfig) (*bean3.TemplateRequest, error) { if chart == nil || chart.Id == 0 { - return &TemplateRequest{}, &util.ApiError{UserMessage: "no chart found"} + return &bean3.TemplateRequest{}, &util.ApiError{UserMessage: "no chart found"} } - gitRepoUrl := "" - if !apiGitOpsBean.IsGitOpsRepoNotConfigured(deploymentConfig.RepoURL) { - gitRepoUrl = deploymentConfig.RepoURL + var gitRepoUrl, targetRevision string + if !apiGitOpsBean.IsGitOpsRepoNotConfigured(deploymentConfig.GetRepoURL()) { + gitRepoUrl = deploymentConfig.GetRepoURL() + targetRevision = deploymentConfig.GetTargetRevision() } - templateRequest := &TemplateRequest{ + templateRequest := &bean3.TemplateRequest{ RefChartTemplate: chart.ReferenceTemplate, Id: chart.Id, AppId: chart.AppId, @@ -480,6 +528,7 @@ func (impl *ChartServiceImpl) chartAdaptor(chart *chartRepoRepository.Chart, isA IsBasicViewLocked: chart.IsBasicViewLocked, CurrentViewEditor: chart.CurrentViewEditor, GitRepoUrl: gitRepoUrl, + TargetRevision: targetRevision, IsCustomGitRepository: deploymentConfig.ConfigType == bean2.CUSTOM.String(), ImageDescriptorTemplate: chart.ImageDescriptorTemplate, } @@ -489,7 +538,7 @@ func (impl *ChartServiceImpl) chartAdaptor(chart *chartRepoRepository.Chart, isA return templateRequest, nil } -func (impl *ChartServiceImpl) getChartMetaData(templateRequest TemplateRequest) (*chart.Metadata, error) { +func (impl *ChartServiceImpl) getChartMetaData(templateRequest bean3.TemplateRequest) (*chart.Metadata, error) { pg, err := impl.pipelineGroupRepository.FindById(templateRequest.AppId) if err != nil { impl.logger.Errorw("error in fetching pg", "id", templateRequest.AppId, "err", err) @@ -500,7 +549,7 @@ func (impl *ChartServiceImpl) getChartMetaData(templateRequest TemplateRequest) return metadata, err } -func (impl *ChartServiceImpl) getChartRepo(templateRequest TemplateRequest) (*chartRepoRepository.ChartRepo, error) { +func (impl *ChartServiceImpl) getChartRepo(templateRequest bean3.TemplateRequest) (*chartRepoRepository.ChartRepo, error) { if templateRequest.ChartRepositoryId == 0 { chartRepo, err := impl.repoRepository.GetDefault() if err != nil { @@ -553,12 +602,12 @@ func (impl *ChartServiceImpl) IsGitOpsRepoConfiguredForDevtronApp(appId int) (bo if err != nil { impl.logger.Errorw("error in fetching latest chart for app by appId") return false, err - } else if !gitOpsConfigStatus.IsGitOpsConfigured { + } else if !gitOpsConfigStatus.IsGitOpsConfiguredAndArgoCdInstalled() { return false, nil } else if !gitOpsConfigStatus.AllowCustomRepository { return true, nil } - latestChartConfiguredInApp, err := impl.FindLatestChartForAppByAppId(appId) + latestChartConfiguredInApp, err := impl.chartReadService.FindLatestChartForAppByAppId(appId) if err != nil { impl.logger.Errorw("error in fetching latest chart for app by appId") return false, err @@ -566,53 +615,7 @@ func (impl *ChartServiceImpl) IsGitOpsRepoConfiguredForDevtronApp(appId int) (bo return !apiGitOpsBean.IsGitOpsRepoNotConfigured(latestChartConfiguredInApp.GitRepoUrl), nil } -func (impl *ChartServiceImpl) IsGitOpsRepoConfiguredForDevtronApps(appIds []int) (map[int]bool, error) { - gitOpsConfigStatus, err := impl.gitOpsConfigReadService.IsGitOpsConfigured() - if err != nil { - impl.logger.Errorw("error in fetching latest chart for app by appId") - return nil, err - } - appIdRepoConfiguredMap := make(map[int]bool, len(appIds)) - for _, appId := range appIds { - if !gitOpsConfigStatus.IsGitOpsConfigured { - appIdRepoConfiguredMap[appId] = false - } else if !gitOpsConfigStatus.AllowCustomRepository { - appIdRepoConfiguredMap[appId] = true - } else { - latestChartConfiguredInApp, err := impl.FindLatestChartForAppByAppId(appId) - if err != nil { - impl.logger.Errorw("error in fetching latest chart for app by appId") - return nil, err - } - appIdRepoConfiguredMap[appId] = !apiGitOpsBean.IsGitOpsRepoNotConfigured(latestChartConfiguredInApp.GitRepoUrl) - } - } - return appIdRepoConfiguredMap, nil -} - -func (impl *ChartServiceImpl) FindLatestChartForAppByAppId(appId int) (chartTemplate *TemplateRequest, err error) { - chart, err := impl.chartRepository.FindLatestChartForAppByAppId(appId) - if err != nil { - impl.logger.Errorw("error in fetching chart ", "appId", appId, "err", err) - return nil, err - } - - deploymentConfig, err := impl.deploymentConfigService.GetConfigForDevtronApps(appId, 0) - if err != nil { - impl.logger.Errorw("error in fetching deployment config by appId", "appId", appId, "err", err) - return nil, err - } - - isAppMetricsEnabled, err := impl.deployedAppMetricsService.GetMetricsFlagByAppId(appId) - if err != nil { - impl.logger.Errorw("error in fetching app-metrics", "appId", appId, "err", err) - return nil, err - } - chartTemplate, err = impl.chartAdaptor(chart, isAppMetricsEnabled, deploymentConfig) - return chartTemplate, err -} - -func (impl *ChartServiceImpl) GetByAppIdAndChartRefId(appId int, chartRefId int) (chartTemplate *TemplateRequest, err error) { +func (impl *ChartServiceImpl) GetByAppIdAndChartRefId(appId int, chartRefId int) (chartTemplate *bean3.TemplateRequest, err error) { chart, err := impl.chartRepository.FindChartByAppIdAndRefId(appId, chartRefId) if err != nil { impl.logger.Errorw("error in fetching chart ", "appId", appId, "err", err) @@ -632,7 +635,7 @@ func (impl *ChartServiceImpl) GetByAppIdAndChartRefId(appId int, chartRefId int) return chartTemplate, err } -func (impl *ChartServiceImpl) UpdateAppOverride(ctx context.Context, templateRequest *TemplateRequest) (*TemplateRequest, error) { +func (impl *ChartServiceImpl) UpdateAppOverride(ctx context.Context, templateRequest *bean3.TemplateRequest) (*bean3.TemplateRequest, error) { _, span := otel.Tracer("orchestrator").Start(ctx, "chartRepository.FindById") template, err := impl.chartRepository.FindById(templateRequest.Id) @@ -654,6 +657,13 @@ func (impl *ChartServiceImpl) UpdateAppOverride(ctx context.Context, templateReq if err != nil { return nil, err } + + chartRef, err := impl.chartRefService.FindById(template.ChartRefId) + if err != nil { + impl.logger.Errorw("error in finding chart ref by id", "chartRefId", template.ChartRefId, "err", err) + return nil, err + } + if currentLatestChart.Id > 0 && currentLatestChart.Id == templateRequest.Id { } else if currentLatestChart.Id != templateRequest.Id { @@ -709,6 +719,12 @@ func (impl *ChartServiceImpl) UpdateAppOverride(ctx context.Context, templateReq return nil, err } + err = impl.UpdateChartLocationForEnvironmentConfigs(templateRequest.AppId, templateRequest.ChartRefId, templateRequest.UserId, template.ChartVersion) + if err != nil { + impl.logger.Errorw("error in updating chart location in env overrides", "appId", templateRequest.AppId, "err", err) + return nil, err + } + } else { return nil, nil } @@ -734,11 +750,29 @@ func (impl *ChartServiceImpl) UpdateAppOverride(ctx context.Context, templateReq return nil, err } + config, err := impl.deploymentConfigService.GetConfigForDevtronApps(template.AppId, 0) + if err != nil { + impl.logger.Errorw("error in fetching config", "appId", template.AppId, "err", err) + return nil, err + } + + chartGitLocation := filepath.Join(chartRef.Location, template.ChartVersion) deploymentConfig := &bean2.DeploymentConfig{ AppId: template.AppId, - ConfigType: common.GetDeploymentConfigType(template.IsCustomGitRepository), - RepoURL: template.GitRepoUrl, - Active: true, + ConfigType: adapter2.GetDeploymentConfigType(template.IsCustomGitRepository), + RepoURL: config.GetRepoURL(), + ReleaseConfiguration: &bean2.ReleaseConfiguration{ + Version: bean2.Version, + ArgoCDSpec: bean2.ArgoCDSpec{ + Spec: bean2.ApplicationSpec{ + Source: &bean2.ApplicationSource{ + RepoURL: config.GetRepoURL(), + Path: chartGitLocation, + }, + }, + }, + }, + Active: true, } deploymentConfig, err = impl.deploymentConfigService.CreateOrUpdateConfig(nil, deploymentConfig, templateRequest.UserId) @@ -866,7 +900,7 @@ func (impl *ChartServiceImpl) ChartRefAutocompleteForAppOrEnv(appId int, envId i return chartRefResponse, nil } -func (impl *ChartServiceImpl) FindPreviousChartByAppId(appId int) (chartTemplate *TemplateRequest, err error) { +func (impl *ChartServiceImpl) FindPreviousChartByAppId(appId int) (chartTemplate *bean3.TemplateRequest, err error) { chart, err := impl.chartRepository.FindPreviousChartByAppId(appId) if err != nil { impl.logger.Errorw("error in fetching chart ", "appId", appId, "err", err) @@ -883,7 +917,7 @@ func (impl *ChartServiceImpl) FindPreviousChartByAppId(appId int) (chartTemplate func (impl *ChartServiceImpl) UpgradeForApp(appId int, chartRefId int, newAppOverride map[string]interface{}, userId int32, ctx context.Context) (bool, error) { - currentChart, err := impl.FindLatestChartForAppByAppId(appId) + currentChart, err := impl.chartReadService.FindLatestChartForAppByAppId(appId) if err != nil && pg.ErrNoRows != err { impl.logger.Error(err) return false, err @@ -893,7 +927,7 @@ func (impl *ChartServiceImpl) UpgradeForApp(appId int, chartRefId int, newAppOve return false, fmt.Errorf("no chart configured for this app, skip it for upgrade") } - templateRequest := TemplateRequest{} + templateRequest := bean3.TemplateRequest{} templateRequest.ChartRefId = chartRefId templateRequest.AppId = appId templateRequest.ChartRepositoryId = currentChart.ChartRepositoryId @@ -985,8 +1019,7 @@ func (impl *ChartServiceImpl) CheckIfChartRefUserUploadedByAppId(id int) (bool, func (impl *ChartServiceImpl) ConfigureGitOpsRepoUrlForApp(appId int, repoUrl, chartLocation string, isCustomRepo bool, userId int32) (*bean2.DeploymentConfig, error) { - //update in both charts and deployment config - + ////update in both charts and deployment config charts, err := impl.chartRepository.FindActiveChartsByAppId(appId) if err != nil { return nil, err @@ -998,10 +1031,8 @@ func (impl *ChartServiceImpl) ConfigureGitOpsRepoUrlForApp(appId int, repoUrl, c } defer impl.chartRepository.RollbackTx(tx) var updatedCharts []*chartRepoRepository.Chart - var isCustom bool for _, ch := range charts { if !ch.IsCustomGitRepository { - isCustom = ch.IsCustomGitRepository ch.GitRepoUrl = repoUrl ch.UpdateAuditLog(userId) updatedCharts = append(updatedCharts, ch) @@ -1017,17 +1048,20 @@ func (impl *ChartServiceImpl) ConfigureGitOpsRepoUrlForApp(appId int, repoUrl, c return nil, err } - deploymentConfig := &bean2.DeploymentConfig{ - AppId: appId, - ConfigType: common.GetDeploymentConfigType(isCustom), - RepoURL: repoUrl, - Active: true, + deploymentConfig, err := impl.deploymentConfigService.GetConfigForDevtronApps(appId, 0) + if err != nil { + impl.logger.Errorw("error in getting deployment config", "appId", appId, "error", err) + return nil, err } + deploymentConfig = deploymentConfig.SetRepoURL(repoUrl) + deploymentConfig.SetChartLocation(chartLocation) + deploymentConfig, err = impl.deploymentConfigService.CreateOrUpdateConfig(nil, deploymentConfig, userId) if err != nil { impl.logger.Errorw("error in saving deployment config for app", "appId", appId, "err", err) return nil, err } + return deploymentConfig, nil } @@ -1063,6 +1097,16 @@ func (impl *ChartServiceImpl) ConfigureGitOpsRepoUrlForApp(appId int, repoUrl, c //} func (impl *ChartServiceImpl) IsGitOpsRepoAlreadyRegistered(gitOpsRepoUrl string) (bool, error) { + + isURLPresent, err := impl.deploymentConfigService.CheckIfURLAlreadyPresent(gitOpsRepoUrl) + if err != nil { + impl.logger.Errorw("error in checking if gitOps repo url is already present", "error", err) + return false, err + } + if isURLPresent { + return true, nil + } + chartModel, err := impl.chartRepository.FindChartByGitRepoUrl(gitOpsRepoUrl) if err != nil && !util.IsErrNoRows(err) { impl.logger.Errorw("error in fetching chartModel", "repoUrl", gitOpsRepoUrl, "err", err) diff --git a/pkg/chart/bean.go b/pkg/chart/bean/bean.go similarity index 98% rename from pkg/chart/bean.go rename to pkg/chart/bean/bean.go index cbe6fe3503..abcc28bdfe 100644 --- a/pkg/chart/bean.go +++ b/pkg/chart/bean/bean.go @@ -14,7 +14,7 @@ * limitations under the License. */ -package chart +package bean import ( "encoding/json" @@ -44,6 +44,7 @@ type TemplateRequest struct { IsBasicViewLocked bool `json:"isBasicViewLocked"` CurrentViewEditor models.ChartsViewEditorType `json:"currentViewEditor"` //default "UNDEFINED" in db GitRepoUrl string `json:"-"` + TargetRevision string `json:"-"` IsCustomGitRepository bool `json:"-"` UserId int32 `json:"-"` LatestChartVersion string `json:"-"` diff --git a/pkg/chart/gitOpsConfig/DevtronAppGitOpsConfigService.go b/pkg/chart/gitOpsConfig/DevtronAppGitOpsConfigService.go index d85e99bf69..d6da3a1a17 100644 --- a/pkg/chart/gitOpsConfig/DevtronAppGitOpsConfigService.go +++ b/pkg/chart/gitOpsConfig/DevtronAppGitOpsConfigService.go @@ -22,11 +22,13 @@ import ( "github.com/devtron-labs/devtron/client/argocdServer" chartService "github.com/devtron-labs/devtron/pkg/chart" "github.com/devtron-labs/devtron/pkg/chart/gitOpsConfig/bean" + "github.com/devtron-labs/devtron/pkg/chart/read" "github.com/devtron-labs/devtron/pkg/deployment/common" commonBean "github.com/devtron-labs/devtron/pkg/deployment/gitOps/common/bean" "github.com/devtron-labs/devtron/pkg/deployment/gitOps/config" "github.com/devtron-labs/devtron/pkg/deployment/gitOps/validation" bean3 "github.com/devtron-labs/devtron/pkg/deployment/gitOps/validation/bean" + globalUtil "github.com/devtron-labs/devtron/util" "net/http" "path/filepath" @@ -50,6 +52,7 @@ type DevtronAppGitOpConfigServiceImpl struct { gitOpsValidationService validation.GitOpsValidationService argoClientWrapperService argocdServer.ArgoClientWrapperService deploymentConfigService common.DeploymentConfigService + chartReadService read.ChartReadService } func NewDevtronAppGitOpConfigServiceImpl(logger *zap.SugaredLogger, @@ -58,7 +61,8 @@ func NewDevtronAppGitOpConfigServiceImpl(logger *zap.SugaredLogger, gitOpsConfigReadService config.GitOpsConfigReadService, gitOpsValidationService validation.GitOpsValidationService, argoClientWrapperService argocdServer.ArgoClientWrapperService, - deploymentConfigService common.DeploymentConfigService) *DevtronAppGitOpConfigServiceImpl { + deploymentConfigService common.DeploymentConfigService, + chartReadService read.ChartReadService) *DevtronAppGitOpConfigServiceImpl { return &DevtronAppGitOpConfigServiceImpl{ logger: logger, chartRepository: chartRepository, @@ -67,6 +71,7 @@ func NewDevtronAppGitOpConfigServiceImpl(logger *zap.SugaredLogger, gitOpsValidationService: gitOpsValidationService, argoClientWrapperService: argoClientWrapperService, deploymentConfigService: deploymentConfigService, + chartReadService: chartReadService, } } @@ -76,7 +81,7 @@ func (impl *DevtronAppGitOpConfigServiceImpl) SaveAppLevelGitOpsConfiguration(ap impl.logger.Errorw("error in fetching active gitOps config", "err", err) return err } - if !gitOpsConfigurationStatus.IsGitOpsConfigured { + if !gitOpsConfigurationStatus.IsGitOpsConfiguredAndArgoCdInstalled() { apiErr := &util.ApiError{ HttpStatusCode: http.StatusPreconditionFailed, UserMessage: "GitOps integration is not installed/configured. Please install/configure GitOps.", @@ -102,7 +107,7 @@ func (impl *DevtronAppGitOpConfigServiceImpl) SaveAppLevelGitOpsConfiguration(ap return apiErr } - appDeploymentTemplate, err := impl.chartService.FindLatestChartForAppByAppId(appGitOpsRequest.AppId) + appDeploymentTemplate, err := impl.chartReadService.FindLatestChartForAppByAppId(appGitOpsRequest.AppId) if util.IsErrNoRows(err) { impl.logger.Errorw("no base charts configured for app", "appId", appGitOpsRequest.AppId, "err", err) apiErr := &util.ApiError{ @@ -115,13 +120,14 @@ func (impl *DevtronAppGitOpConfigServiceImpl) SaveAppLevelGitOpsConfiguration(ap impl.logger.Errorw("error in fetching latest chart for app by appId", "appId", appGitOpsRequest.AppId, "err", err) return err } - validateCustomGitRepoURLRequest := bean3.ValidateCustomGitRepoURLRequest{ + validateCustomGitRepoURLRequest := bean3.ValidateGitOpsRepoRequest{ GitRepoURL: appGitOpsRequest.GitOpsRepoURL, UserId: appGitOpsRequest.UserId, AppName: appName, GitOpsProvider: gitOpsConfigurationStatus.Provider, + TargetRevision: globalUtil.GetDefaultTargetRevision(), } - repoUrl, _, validationErr := impl.gitOpsValidationService.ValidateCustomGitRepoURL(validateCustomGitRepoURLRequest) + repoUrl, _, validationErr := impl.gitOpsValidationService.ValidateCustomGitOpsConfig(validateCustomGitRepoURLRequest) if validationErr != nil { apiErr := &util.ApiError{ HttpStatusCode: http.StatusBadRequest, @@ -130,13 +136,14 @@ func (impl *DevtronAppGitOpConfigServiceImpl) SaveAppLevelGitOpsConfiguration(ap } return apiErr } - // ValidateCustomGitRepoURL returns sanitized repo url after validation + // ValidateCustomGitOpsConfig returns sanitized repo url after validation appGitOpsRequest.GitOpsRepoURL = repoUrl chartGitAttr := &commonBean.ChartGitAttribute{ - RepoUrl: repoUrl, - ChartLocation: filepath.Join(appDeploymentTemplate.RefChartTemplate, appDeploymentTemplate.LatestChartVersion), + RepoUrl: repoUrl, + TargetRevision: globalUtil.GetDefaultTargetRevision(), + ChartLocation: filepath.Join(appDeploymentTemplate.RefChartTemplate, appDeploymentTemplate.LatestChartVersion), } - err = impl.argoClientWrapperService.RegisterGitOpsRepoInArgoWithRetry(ctx, chartGitAttr.RepoUrl, appGitOpsRequest.UserId) + err = impl.argoClientWrapperService.RegisterGitOpsRepoInArgoWithRetry(ctx, chartGitAttr.RepoUrl, chartGitAttr.TargetRevision, appGitOpsRequest.UserId) if err != nil { impl.logger.Errorw("error while register git repo in argo", "err", err) return err @@ -155,7 +162,7 @@ func (impl *DevtronAppGitOpConfigServiceImpl) GetAppLevelGitOpsConfiguration(app if err != nil { impl.logger.Errorw("error in fetching active gitOps config", "err", err) return nil, err - } else if !gitOpsConfigurationStatus.IsGitOpsConfigured { + } else if !gitOpsConfigurationStatus.IsGitOpsConfiguredAndArgoCdInstalled() { apiErr := &util.ApiError{ HttpStatusCode: http.StatusPreconditionFailed, UserMessage: "GitOps integration is not installed/configured. Please install/configure GitOps.", @@ -170,7 +177,7 @@ func (impl *DevtronAppGitOpConfigServiceImpl) GetAppLevelGitOpsConfiguration(app } return nil, apiErr } - appDeploymentTemplate, err := impl.chartService.FindLatestChartForAppByAppId(appId) + appDeploymentTemplate, err := impl.chartReadService.FindLatestChartForAppByAppId(appId) if util.IsErrNoRows(err) { impl.logger.Errorw("no base charts configured for app", "appId", appId, "err", err) apiErr := &util.ApiError{ @@ -201,5 +208,5 @@ func (impl *DevtronAppGitOpConfigServiceImpl) isGitRepoUrlPresent(appId int) boo impl.logger.Errorw("error fetching git repo url from deploymentConfig for latest chart") return false } - return !apiGitOpsBean.IsGitOpsRepoNotConfigured(deploymentConfig.RepoURL) + return !apiGitOpsBean.IsGitOpsRepoNotConfigured(deploymentConfig.GetRepoURL()) } diff --git a/pkg/chart/mocks/ChartService.go b/pkg/chart/mocks/ChartService.go index 7c7447ed36..740c6a99be 100644 --- a/pkg/chart/mocks/ChartService.go +++ b/pkg/chart/mocks/ChartService.go @@ -4,6 +4,7 @@ package mocks import ( chart "github.com/devtron-labs/devtron/pkg/chart" + bean2 "github.com/devtron-labs/devtron/pkg/chart/bean" bean "github.com/devtron-labs/devtron/pkg/deployment/manifest/deploymentTemplate/chartRef/bean" context "context" @@ -69,23 +70,23 @@ func (_m *ChartService) CheckIfChartRefUserUploadedByAppId(id int) (bool, error) } // Create provides a mock function with given fields: templateRequest, ctx -func (_m *ChartService) Create(templateRequest chart.TemplateRequest, ctx context.Context) (*chart.TemplateRequest, error) { +func (_m *ChartService) Create(templateRequest bean2.TemplateRequest, ctx context.Context) (*bean2.TemplateRequest, error) { ret := _m.Called(templateRequest, ctx) - var r0 *chart.TemplateRequest + var r0 *bean2.TemplateRequest var r1 error - if rf, ok := ret.Get(0).(func(chart.TemplateRequest, context.Context) (*chart.TemplateRequest, error)); ok { + if rf, ok := ret.Get(0).(func(bean2.TemplateRequest, context.Context) (*bean2.TemplateRequest, error)); ok { return rf(templateRequest, ctx) } - if rf, ok := ret.Get(0).(func(chart.TemplateRequest, context.Context) *chart.TemplateRequest); ok { + if rf, ok := ret.Get(0).(func(bean2.TemplateRequest, context.Context) *bean2.TemplateRequest); ok { r0 = rf(templateRequest, ctx) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*chart.TemplateRequest) + r0 = ret.Get(0).(*bean2.TemplateRequest) } } - if rf, ok := ret.Get(1).(func(chart.TemplateRequest, context.Context) error); ok { + if rf, ok := ret.Get(1).(func(bean2.TemplateRequest, context.Context) error); ok { r1 = rf(templateRequest, ctx) } else { r1 = ret.Error(1) @@ -95,23 +96,23 @@ func (_m *ChartService) Create(templateRequest chart.TemplateRequest, ctx contex } // CreateChartFromEnvOverride provides a mock function with given fields: templateRequest, ctx -func (_m *ChartService) CreateChartFromEnvOverride(templateRequest chart.TemplateRequest, ctx context.Context) (*chart.TemplateRequest, error) { +func (_m *ChartService) CreateChartFromEnvOverride(templateRequest bean2.TemplateRequest, ctx context.Context) (*bean2.TemplateRequest, error) { ret := _m.Called(templateRequest, ctx) - var r0 *chart.TemplateRequest + var r0 *bean2.TemplateRequest var r1 error - if rf, ok := ret.Get(0).(func(chart.TemplateRequest, context.Context) (*chart.TemplateRequest, error)); ok { + if rf, ok := ret.Get(0).(func(bean2.TemplateRequest, context.Context) (*bean2.TemplateRequest, error)); ok { return rf(templateRequest, ctx) } - if rf, ok := ret.Get(0).(func(chart.TemplateRequest, context.Context) *chart.TemplateRequest); ok { + if rf, ok := ret.Get(0).(func(bean2.TemplateRequest, context.Context) *bean2.TemplateRequest); ok { r0 = rf(templateRequest, ctx) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*chart.TemplateRequest) + r0 = ret.Get(0).(*bean2.TemplateRequest) } } - if rf, ok := ret.Get(1).(func(chart.TemplateRequest, context.Context) error); ok { + if rf, ok := ret.Get(1).(func(bean2.TemplateRequest, context.Context) error); ok { r1 = rf(templateRequest, ctx) } else { r1 = ret.Error(1) @@ -121,19 +122,19 @@ func (_m *ChartService) CreateChartFromEnvOverride(templateRequest chart.Templat } // FindLatestChartForAppByAppId provides a mock function with given fields: appId -func (_m *ChartService) FindLatestChartForAppByAppId(appId int) (*chart.TemplateRequest, error) { +func (_m *ChartService) FindLatestChartForAppByAppId(appId int) (*bean2.TemplateRequest, error) { ret := _m.Called(appId) - var r0 *chart.TemplateRequest + var r0 *bean2.TemplateRequest var r1 error - if rf, ok := ret.Get(0).(func(int) (*chart.TemplateRequest, error)); ok { + if rf, ok := ret.Get(0).(func(int) (*bean2.TemplateRequest, error)); ok { return rf(appId) } - if rf, ok := ret.Get(0).(func(int) *chart.TemplateRequest); ok { + if rf, ok := ret.Get(0).(func(int) *bean2.TemplateRequest); ok { r0 = rf(appId) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*chart.TemplateRequest) + r0 = ret.Get(0).(*bean2.TemplateRequest) } } @@ -147,19 +148,19 @@ func (_m *ChartService) FindLatestChartForAppByAppId(appId int) (*chart.Template } // FindPreviousChartByAppId provides a mock function with given fields: appId -func (_m *ChartService) FindPreviousChartByAppId(appId int) (*chart.TemplateRequest, error) { +func (_m *ChartService) FindPreviousChartByAppId(appId int) (*bean2.TemplateRequest, error) { ret := _m.Called(appId) - var r0 *chart.TemplateRequest + var r0 *bean2.TemplateRequest var r1 error - if rf, ok := ret.Get(0).(func(int) (*chart.TemplateRequest, error)); ok { + if rf, ok := ret.Get(0).(func(int) (*bean2.TemplateRequest, error)); ok { return rf(appId) } - if rf, ok := ret.Get(0).(func(int) *chart.TemplateRequest); ok { + if rf, ok := ret.Get(0).(func(int) *bean2.TemplateRequest); ok { r0 = rf(appId) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*chart.TemplateRequest) + r0 = ret.Get(0).(*bean2.TemplateRequest) } } @@ -173,19 +174,19 @@ func (_m *ChartService) FindPreviousChartByAppId(appId int) (*chart.TemplateRequ } // GetByAppIdAndChartRefId provides a mock function with given fields: appId, chartRefId -func (_m *ChartService) GetByAppIdAndChartRefId(appId int, chartRefId int) (*chart.TemplateRequest, error) { +func (_m *ChartService) GetByAppIdAndChartRefId(appId int, chartRefId int) (*bean2.TemplateRequest, error) { ret := _m.Called(appId, chartRefId) - var r0 *chart.TemplateRequest + var r0 *bean2.TemplateRequest var r1 error - if rf, ok := ret.Get(0).(func(int, int) (*chart.TemplateRequest, error)); ok { + if rf, ok := ret.Get(0).(func(int, int) (*bean2.TemplateRequest, error)); ok { return rf(appId, chartRefId) } - if rf, ok := ret.Get(0).(func(int, int) *chart.TemplateRequest); ok { + if rf, ok := ret.Get(0).(func(int, int) *bean2.TemplateRequest); ok { r0 = rf(appId, chartRefId) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*chart.TemplateRequest) + r0 = ret.Get(0).(*bean2.TemplateRequest) } } @@ -297,23 +298,23 @@ func (_m *ChartService) PatchEnvOverrides(values json.RawMessage, oldChartType s } // UpdateAppOverride provides a mock function with given fields: ctx, templateRequest -func (_m *ChartService) UpdateAppOverride(ctx context.Context, templateRequest *chart.TemplateRequest) (*chart.TemplateRequest, error) { +func (_m *ChartService) UpdateAppOverride(ctx context.Context, templateRequest *bean2.TemplateRequest) (*bean2.TemplateRequest, error) { ret := _m.Called(ctx, templateRequest) - var r0 *chart.TemplateRequest + var r0 *bean2.TemplateRequest var r1 error - if rf, ok := ret.Get(0).(func(context.Context, *chart.TemplateRequest) (*chart.TemplateRequest, error)); ok { + if rf, ok := ret.Get(0).(func(context.Context, *bean2.TemplateRequest) (*bean2.TemplateRequest, error)); ok { return rf(ctx, templateRequest) } - if rf, ok := ret.Get(0).(func(context.Context, *chart.TemplateRequest) *chart.TemplateRequest); ok { + if rf, ok := ret.Get(0).(func(context.Context, *bean2.TemplateRequest) *bean2.TemplateRequest); ok { r0 = rf(ctx, templateRequest) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*chart.TemplateRequest) + r0 = ret.Get(0).(*bean2.TemplateRequest) } } - if rf, ok := ret.Get(1).(func(context.Context, *chart.TemplateRequest) error); ok { + if rf, ok := ret.Get(1).(func(context.Context, *bean2.TemplateRequest) error); ok { r1 = rf(ctx, templateRequest) } else { r1 = ret.Error(1) diff --git a/pkg/chart/read/ChartReadService.go b/pkg/chart/read/ChartReadService.go new file mode 100644 index 0000000000..41e38cba0e --- /dev/null +++ b/pkg/chart/read/ChartReadService.go @@ -0,0 +1,151 @@ +package read + +import ( + "encoding/json" + "fmt" + apiGitOpsBean "github.com/devtron-labs/devtron/api/bean/gitOps" + "github.com/devtron-labs/devtron/internal/util" + "github.com/devtron-labs/devtron/pkg/chart/bean" + chartRepoRepository "github.com/devtron-labs/devtron/pkg/chartRepo/repository" + "github.com/devtron-labs/devtron/pkg/deployment/common" + bean2 "github.com/devtron-labs/devtron/pkg/deployment/common/bean" + "github.com/devtron-labs/devtron/pkg/deployment/gitOps/config" + "github.com/devtron-labs/devtron/pkg/deployment/manifest/deployedAppMetrics" + util2 "github.com/devtron-labs/devtron/util" + "go.uber.org/zap" + "strings" +) + +type ChartReadService interface { + GetByAppIdAndChartRefId(appId int, chartRefId int) (chartTemplate *bean.TemplateRequest, err error) + IsGitOpsRepoConfiguredForDevtronApps(appIds []int) (map[int]bool, error) + FindLatestChartForAppByAppId(appId int) (chartTemplate *bean.TemplateRequest, err error) +} + +type ChartReadServiceImpl struct { + logger *zap.SugaredLogger + chartRepository chartRepoRepository.ChartRepository + deploymentConfigService common.DeploymentConfigService + deployedAppMetricsService deployedAppMetrics.DeployedAppMetricsService + gitOpsConfigReadService config.GitOpsConfigReadService +} + +func NewChartReadServiceImpl(logger *zap.SugaredLogger, + chartRepository chartRepoRepository.ChartRepository, + deploymentConfigService common.DeploymentConfigService, + deployedAppMetricsService deployedAppMetrics.DeployedAppMetricsService, + gitOpsConfigReadService config.GitOpsConfigReadService) *ChartReadServiceImpl { + return &ChartReadServiceImpl{ + logger: logger, + chartRepository: chartRepository, + deploymentConfigService: deploymentConfigService, + deployedAppMetricsService: deployedAppMetricsService, + gitOpsConfigReadService: gitOpsConfigReadService, + } + +} + +func (impl *ChartReadServiceImpl) GetByAppIdAndChartRefId(appId int, chartRefId int) (chartTemplate *bean.TemplateRequest, err error) { + chart, err := impl.chartRepository.FindChartByAppIdAndRefId(appId, chartRefId) + if err != nil { + impl.logger.Errorw("error in fetching chart ", "appId", appId, "err", err) + return nil, err + } + isAppMetricsEnabled, err := impl.deployedAppMetricsService.GetMetricsFlagByAppId(appId) + if err != nil { + impl.logger.Errorw("error in fetching app-metrics", "appId", appId, "err", err) + return nil, err + } + deploymentConfig, err := impl.deploymentConfigService.GetConfigForDevtronApps(appId, 0) + if err != nil { + impl.logger.Errorw("error in fetching deployment config by appId", "appId", appId, "err", err) + return nil, err + } + chartTemplate, err = impl.chartAdaptor(chart, isAppMetricsEnabled, deploymentConfig) + return chartTemplate, err +} + +func (impl *ChartReadServiceImpl) IsGitOpsRepoConfiguredForDevtronApps(appIds []int) (map[int]bool, error) { + gitOpsConfigStatus, err := impl.gitOpsConfigReadService.IsGitOpsConfigured() + if err != nil { + impl.logger.Errorw("error in fetching latest chart for app by appId") + return nil, err + } + appIdRepoConfiguredMap := make(map[int]bool, len(appIds)) + for _, appId := range appIds { + if !gitOpsConfigStatus.IsGitOpsConfiguredAndArgoCdInstalled() { + appIdRepoConfiguredMap[appId] = false + } else if !gitOpsConfigStatus.AllowCustomRepository { + appIdRepoConfiguredMap[appId] = true + } else { + latestChartConfiguredInApp, err := impl.FindLatestChartForAppByAppId(appId) + if err != nil { + impl.logger.Errorw("error in fetching latest chart for app by appId") + return nil, err + } + appIdRepoConfiguredMap[appId] = !apiGitOpsBean.IsGitOpsRepoNotConfigured(latestChartConfiguredInApp.GitRepoUrl) + } + } + return appIdRepoConfiguredMap, nil +} + +func (impl *ChartReadServiceImpl) FindLatestChartForAppByAppId(appId int) (chartTemplate *bean.TemplateRequest, err error) { + chart, err := impl.chartRepository.FindLatestChartForAppByAppId(appId) + if err != nil { + impl.logger.Errorw("error in fetching chart ", "appId", appId, "err", err) + return nil, err + } + + deploymentConfig, err := impl.deploymentConfigService.GetConfigForDevtronApps(appId, 0) + if err != nil { + impl.logger.Errorw("error in fetching deployment config by appId", "appId", appId, "err", err) + return nil, err + } + + isAppMetricsEnabled, err := impl.deployedAppMetricsService.GetMetricsFlagByAppId(appId) + if err != nil { + impl.logger.Errorw("error in fetching app-metrics", "appId", appId, "err", err) + return nil, err + } + chartTemplate, err = impl.chartAdaptor(chart, isAppMetricsEnabled, deploymentConfig) + return chartTemplate, err +} + +// converts db object to bean +func (impl *ChartReadServiceImpl) chartAdaptor(chartInput *chartRepoRepository.Chart, isAppMetricsEnabled bool, deploymentConfig *bean2.DeploymentConfig) (*bean.TemplateRequest, error) { + if chartInput == nil || chartInput.Id == 0 { + return &bean.TemplateRequest{}, &util.ApiError{UserMessage: "no chartInput found"} + } + gitRepoUrl := "" + targetRevision := util2.GetDefaultTargetRevision() + if !apiGitOpsBean.IsGitOpsRepoNotConfigured(deploymentConfig.GetRepoURL()) { + gitRepoUrl = deploymentConfig.GetRepoURL() + targetRevision = deploymentConfig.GetTargetRevision() + } + templateRequest := &bean.TemplateRequest{ + RefChartTemplate: chartInput.ReferenceTemplate, + Id: chartInput.Id, + AppId: chartInput.AppId, + ChartRepositoryId: chartInput.ChartRepoId, + DefaultAppOverride: json.RawMessage(chartInput.GlobalOverride), + RefChartTemplateVersion: impl.getParentChartVersion(chartInput.ChartVersion), + Latest: chartInput.Latest, + ChartRefId: chartInput.ChartRefId, + IsAppMetricsEnabled: isAppMetricsEnabled, + IsBasicViewLocked: chartInput.IsBasicViewLocked, + CurrentViewEditor: chartInput.CurrentViewEditor, + GitRepoUrl: gitRepoUrl, + IsCustomGitRepository: deploymentConfig.ConfigType == bean2.CUSTOM.String(), + ImageDescriptorTemplate: chartInput.ImageDescriptorTemplate, + TargetRevision: targetRevision, + } + if chartInput.Latest { + templateRequest.LatestChartVersion = chartInput.ChartVersion + } + return templateRequest, nil +} + +func (impl *ChartReadServiceImpl) getParentChartVersion(childVersion string) string { + placeholders := strings.Split(childVersion, ".") + return fmt.Sprintf("%s.%s.0", placeholders[0], placeholders[1]) +} diff --git a/pkg/chartRepo/repository/ChartsRepository.go b/pkg/chartRepo/repository/ChartsRepository.go index 32229c17c9..b73b06e69e 100644 --- a/pkg/chartRepo/repository/ChartsRepository.go +++ b/pkg/chartRepo/repository/ChartsRepository.go @@ -38,7 +38,7 @@ type Chart struct { Status models.ChartStatus `sql:"status"` //(new , deployment-in-progress, deployed-To-production, error ) Active bool `sql:"active"` GitRepoUrl string `sql:"git_repo_url"` // Deprecated; use deployment_config table instead //git repository where chart is stored - ChartLocation string `sql:"chart_location"` //location within git repo where current chart is pointing + ChartLocation string `sql:"chart_location"` // Deprecated; location within git repo where current chart is pointing ReferenceTemplate string `sql:"reference_template"` ImageDescriptorTemplate string `sql:"image_descriptor_template"` ChartRefId int `sql:"chart_ref_id"` diff --git a/pkg/cluster/ClusterServiceExtended.go b/pkg/cluster/ClusterServiceExtended.go index f8876cb66f..6ae1fe00bd 100644 --- a/pkg/cluster/ClusterServiceExtended.go +++ b/pkg/cluster/ClusterServiceExtended.go @@ -237,7 +237,7 @@ func (impl *ClusterServiceImplExtended) Update(ctx context.Context, bean *bean.C } // if git-ops configured, then only update cluster in ACD, otherwise ignore - if gitOpsConfigurationStatus.IsGitOpsConfigured { + if gitOpsConfigurationStatus.IsGitOpsConfiguredAndArgoCdInstalled() { configMap := bean.Config serverUrl := bean.ServerUrl bearerToken := "" @@ -350,7 +350,7 @@ func (impl *ClusterServiceImplExtended) Save(ctx context.Context, bean *bean.Clu } // if git-ops configured, then only add cluster in ACD, otherwise ignore - if gitOpsConfigurationStatus.IsGitOpsConfigured { + if gitOpsConfigurationStatus.IsGitOpsConfiguredAndArgoCdInstalled() { //create it into argo cd as well cl := impl.ConvertClusterBeanObjectToCluster(bean) diff --git a/pkg/cluster/environment/read/EnvironmentReadService.go b/pkg/cluster/environment/read/EnvironmentReadService.go index 23d15e3b08..f61e5c4272 100644 --- a/pkg/cluster/environment/read/EnvironmentReadService.go +++ b/pkg/cluster/environment/read/EnvironmentReadService.go @@ -7,6 +7,7 @@ import ( ) type EnvironmentReadService interface { + GetClusterIdByEnvId(envId int) (int, error) GetAll() ([]bean2.EnvironmentBean, error) } @@ -22,6 +23,16 @@ func NewEnvironmentReadServiceImpl(logger *zap.SugaredLogger, environmentRepository: environmentRepository, } } + +func (impl *EnvironmentReadServiceImpl) GetClusterIdByEnvId(envId int) (int, error) { + model, err := impl.environmentRepository.FindById(envId) + if err != nil { + impl.logger.Errorw("error in fetching environment", "err", err, "envId", envId) + return 0, err + } + return model.ClusterId, nil +} + func (impl *EnvironmentReadServiceImpl) GetAll() ([]bean2.EnvironmentBean, error) { models, err := impl.environmentRepository.FindAll() if err != nil { diff --git a/pkg/cluster/read/ClusterReadService.go b/pkg/cluster/read/ClusterReadService.go index e2619151a3..e8ab539baa 100644 --- a/pkg/cluster/read/ClusterReadService.go +++ b/pkg/cluster/read/ClusterReadService.go @@ -11,6 +11,7 @@ type ClusterReadService interface { IsClusterReachable(clusterId int) (bool, error) FindById(id int) (*bean.ClusterBean, error) FindOne(clusterName string) (*bean.ClusterBean, error) + FindByClusterURL(clusterURL string) (*bean.ClusterBean, error) } type ClusterReadServiceImpl struct { @@ -56,3 +57,12 @@ func (impl *ClusterReadServiceImpl) FindOne(clusterName string) (*bean.ClusterBe bean := adapter.GetClusterBean(*model) return &bean, nil } + +func (impl *ClusterReadServiceImpl) FindByClusterURL(clusterURL string) (*bean.ClusterBean, error) { + model, err := impl.clusterRepository.FindByClusterURL(clusterURL) + if err != nil { + return nil, err + } + bean := adapter.GetClusterBean(*model) + return &bean, nil +} diff --git a/pkg/cluster/repository/ClusterRepository.go b/pkg/cluster/repository/ClusterRepository.go index b037727770..27a78a040d 100644 --- a/pkg/cluster/repository/ClusterRepository.go +++ b/pkg/cluster/repository/ClusterRepository.go @@ -70,6 +70,7 @@ type ClusterRepository interface { FindActiveClusters() ([]Cluster, error) SaveAll(models []*Cluster) error FindByNames(clusterNames []string) ([]*Cluster, error) + FindByClusterURL(clusterURL string) (*Cluster, error) } func NewClusterRepositoryImpl(dbConnection *pg.DB, logger *zap.SugaredLogger) *ClusterRepositoryImpl { @@ -205,3 +206,13 @@ func (impl ClusterRepositoryImpl) UpdateClusterConnectionStatus(clusterId int, e Update() return err } + +func (impl ClusterRepositoryImpl) FindByClusterURL(clusterURL string) (*Cluster, error) { + cluster := &Cluster{} + err := impl.dbConnection. + Model(cluster). + Where("server_url =?", clusterURL). + Where("active =?", true). + Select() + return cluster, err +} diff --git a/pkg/commonService/CommonBaseService.go b/pkg/commonService/CommonBaseService.go new file mode 100644 index 0000000000..e398266d89 --- /dev/null +++ b/pkg/commonService/CommonBaseService.go @@ -0,0 +1,87 @@ +/* + * Copyright (c) 2020-2024. Devtron Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package commonService + +import ( + "errors" + util2 "github.com/devtron-labs/devtron/internal/util" + "github.com/devtron-labs/devtron/pkg/module/bean" + moduleRead "github.com/devtron-labs/devtron/pkg/module/read" + moduleErr "github.com/devtron-labs/devtron/pkg/module/read/error" + "github.com/devtron-labs/devtron/util" + "go.uber.org/zap" + "net/http" +) + +type CommonBaseServiceImpl struct { + logger *zap.SugaredLogger + globalEnvVariables *util.EnvironmentVariables + moduleReadService moduleRead.ModuleReadService +} + +func NewCommonBaseServiceImpl(logger *zap.SugaredLogger, envVariables *util.EnvironmentVariables, + moduleReadService moduleRead.ModuleReadService) *CommonBaseServiceImpl { + return &CommonBaseServiceImpl{ + logger: logger, + globalEnvVariables: envVariables, + moduleReadService: moduleReadService, + } +} + +func (impl *CommonBaseServiceImpl) isGitOpsEnable() (*FeatureGitOpsVariables, error) { + featureGitOpsFlags := &FeatureGitOpsVariables{ + IsFeatureArgoCdMigrationEnabled: impl.globalEnvVariables.DeploymentServiceTypeConfig.IsFeatureMigrateArgoCdApplicationEnable(), + } + argoModule, err := impl.moduleReadService.GetModuleInfoByName(bean.ModuleNameArgoCd) + if err != nil && !errors.Is(err, moduleErr.ModuleNotFoundError) { + impl.logger.Errorw("error in getting argo module", "error", err) + return featureGitOpsFlags, err + } + if !impl.globalEnvVariables.DeploymentServiceTypeConfig.IsFeatureMigrateArgoCdApplicationEnable() { + featureGitOpsFlags.IsFeatureGitOpsEnabled = argoModule.IsInstalled() + featureGitOpsFlags.IsFeatureUserDefinedGitOpsEnabled = argoModule.IsInstalled() + return featureGitOpsFlags, nil + } else { + ciCdModule, err := impl.moduleReadService.GetModuleInfoByName(bean.ModuleNameCiCd) + if err != nil && !errors.Is(err, moduleErr.ModuleNotFoundError) { + impl.logger.Errorw("error in getting ci cd module", "error", err) + return featureGitOpsFlags, err + } + featureGitOpsFlags.IsFeatureGitOpsEnabled = ciCdModule.IsInstalled() + featureGitOpsFlags.IsFeatureUserDefinedGitOpsEnabled = argoModule.IsInstalled() + } + return featureGitOpsFlags, nil +} + +func (impl *CommonBaseServiceImpl) EnvironmentVariableList() (*EnvironmentVariableList, error) { + environmentVariableList := &EnvironmentVariableList{} + featureGitOpsFlags, err := impl.isGitOpsEnable() + if err != nil { + impl.logger.Errorw("error in getting gitops enabled", "error", err) + return environmentVariableList, err + } + environmentVariableList.FeatureGitOpsFlags = featureGitOpsFlags + return environmentVariableList, nil +} + +func (impl *CommonBaseServiceImpl) GlobalChecklist() (*GlobalChecklist, error) { + return nil, util2.DefaultApiError().WithHttpStatusCode(http.StatusNotFound).WithInternalMessage(util.NotSupportedErr).WithUserMessage(util.NotSupportedErr) +} + +func (impl *CommonBaseServiceImpl) FetchLatestChartVersion(appId int, envId int) (string, error) { + return "", util2.DefaultApiError().WithHttpStatusCode(http.StatusNotFound).WithInternalMessage(util.NotSupportedErr).WithUserMessage(util.NotSupportedErr) +} diff --git a/pkg/commonService/CommonService.go b/pkg/commonService/CommonService.go index 267b4c445d..b06ad9573d 100644 --- a/pkg/commonService/CommonService.go +++ b/pkg/commonService/CommonService.go @@ -36,6 +36,7 @@ import ( type CommonService interface { FetchLatestChartVersion(appId int, envId int) (string, error) GlobalChecklist() (*GlobalChecklist, error) + EnvironmentVariableList() (*EnvironmentVariableList, error) } type CommonServiceImpl struct { @@ -48,6 +49,7 @@ type CommonServiceImpl struct { environmentRepository repository3.EnvironmentRepository appRepository app.AppRepository gitOpsConfigReadService config.GitOpsConfigReadService + commonBaseServiceImpl *CommonBaseServiceImpl envConfigOverrideReadService read3.EnvConfigOverrideService teamReadService read2.TeamReadService } @@ -62,6 +64,7 @@ func NewCommonServiceImpl(logger *zap.SugaredLogger, gitOpsConfigReadService config.GitOpsConfigReadService, gitProviderReadService read.GitProviderReadService, envConfigOverrideReadService read3.EnvConfigOverrideService, + commonBaseServiceImpl *CommonBaseServiceImpl, teamReadService read2.TeamReadService) *CommonServiceImpl { serviceImpl := &CommonServiceImpl{ logger: logger, @@ -73,35 +76,13 @@ func NewCommonServiceImpl(logger *zap.SugaredLogger, appRepository: appRepository, gitOpsConfigReadService: gitOpsConfigReadService, gitProviderReadService: gitProviderReadService, + commonBaseServiceImpl: commonBaseServiceImpl, envConfigOverrideReadService: envConfigOverrideReadService, teamReadService: teamReadService, } return serviceImpl } -type GlobalChecklist struct { - AppChecklist *AppChecklist `json:"appChecklist"` - ChartChecklist *ChartChecklist `json:"chartChecklist"` - IsAppCreated bool `json:"isAppCreated"` - UserId int32 `json:"-"` -} - -type ChartChecklist struct { - GitOps int `json:"gitOps,omitempty"` - Project int `json:"project"` - Environment int `json:"environment"` -} - -type AppChecklist struct { - GitOps int `json:"gitOps,omitempty"` - Project int `json:"project"` - Git int `json:"git"` - Environment int `json:"environment"` - Docker int `json:"docker"` - HostUrl int `json:"hostUrl"` - //ChartChecklist *ChartChecklist `json:",inline"` -} - func (impl *CommonServiceImpl) FetchLatestChartVersion(appId int, envId int) (string, error) { var chart *chartRepoRepository.Chart if appId > 0 && envId > 0 { @@ -211,3 +192,7 @@ func (impl *CommonServiceImpl) GlobalChecklist() (*GlobalChecklist, error) { } return config, err } + +func (impl *CommonServiceImpl) EnvironmentVariableList() (*EnvironmentVariableList, error) { + return impl.commonBaseServiceImpl.EnvironmentVariableList() +} diff --git a/pkg/commonService/bean.go b/pkg/commonService/bean.go new file mode 100644 index 0000000000..f1eeb9a86c --- /dev/null +++ b/pkg/commonService/bean.go @@ -0,0 +1,51 @@ +/* + * Copyright (c) 2020-2024. Devtron Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package commonService + +type GlobalChecklist struct { + AppChecklist *AppChecklist `json:"appChecklist"` + ChartChecklist *ChartChecklist `json:"chartChecklist"` + IsAppCreated bool `json:"isAppCreated"` + UserId int32 `json:"-"` +} + +type ChartChecklist struct { + GitOps int `json:"gitOps,omitempty"` + Project int `json:"project"` + Environment int `json:"environment"` +} + +type FeatureGitOpsVariables struct { + IsFeatureGitOpsEnabled bool `json:"isFeatureGitOpsEnabled"` + IsFeatureUserDefinedGitOpsEnabled bool `json:"isFeatureUserDefinedGitOpsEnabled"` + IsFeatureArgoCdMigrationEnabled bool `json:"isFeatureArgoCdMigrationEnabled"` +} + +type EnvironmentVariableList struct { + FeatureGitOpsFlags *FeatureGitOpsVariables `json:"featureGitOpsFlags"` + EnvironmentVariableListEnt +} + +type AppChecklist struct { + GitOps int `json:"gitOps,omitempty"` + Project int `json:"project"` + Git int `json:"git"` + Environment int `json:"environment"` + Docker int `json:"docker"` + HostUrl int `json:"hostUrl"` + //ChartChecklist *ChartChecklist `json:",inline"` +} diff --git a/pkg/commonService/bean_ent.go b/pkg/commonService/bean_ent.go new file mode 100644 index 0000000000..c04eaf9c68 --- /dev/null +++ b/pkg/commonService/bean_ent.go @@ -0,0 +1,20 @@ +/* + * Copyright (c) 2020-2024. Devtron Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package commonService + +type EnvironmentVariableListEnt struct { +} diff --git a/pkg/config/configDiff/DeploymentConfigurationService.go b/pkg/config/configDiff/DeploymentConfigurationService.go index e820fd2827..b8168bfc60 100644 --- a/pkg/config/configDiff/DeploymentConfigurationService.go +++ b/pkg/config/configDiff/DeploymentConfigurationService.go @@ -3,11 +3,11 @@ package configDiff import ( "context" "encoding/json" + errors2 "errors" "fmt" "github.com/argoproj/gitops-engine/pkg/utils/kube" k8sUtil "github.com/devtron-labs/common-lib/utils/k8s" bean4 "github.com/devtron-labs/devtron/api/bean" - bean5 "github.com/devtron-labs/devtron/api/helm-app/bean" "github.com/devtron-labs/devtron/api/helm-app/gRPC" "github.com/devtron-labs/devtron/api/helm-app/service" read3 "github.com/devtron-labs/devtron/api/helm-app/service/read" @@ -18,6 +18,8 @@ import ( "github.com/devtron-labs/devtron/internal/util" bean3 "github.com/devtron-labs/devtron/pkg/bean" chartService "github.com/devtron-labs/devtron/pkg/chart" + read4 "github.com/devtron-labs/devtron/pkg/chart/read" + clusterBean "github.com/devtron-labs/devtron/pkg/cluster/bean" repository4 "github.com/devtron-labs/devtron/pkg/cluster/environment/repository" "github.com/devtron-labs/devtron/pkg/config/configDiff/adaptor" bean2 "github.com/devtron-labs/devtron/pkg/config/configDiff/bean" @@ -80,6 +82,7 @@ type DeploymentConfigurationServiceImpl struct { k8sUtil k8sUtil.K8sService mergeUtil util.MergeUtil HelmAppReadService read3.HelmAppReadService + chartReadService read4.ChartReadService } func NewDeploymentConfigurationServiceImpl(logger *zap.SugaredLogger, @@ -107,6 +110,7 @@ func NewDeploymentConfigurationServiceImpl(logger *zap.SugaredLogger, k8sUtil k8sUtil.K8sService, mergeUtil util.MergeUtil, HelmAppReadService read3.HelmAppReadService, + chartReadService read4.ChartReadService, ) (*DeploymentConfigurationServiceImpl, error) { deploymentConfigurationService := &DeploymentConfigurationServiceImpl{ logger: logger, @@ -134,6 +138,7 @@ func NewDeploymentConfigurationServiceImpl(logger *zap.SugaredLogger, k8sUtil: k8sUtil, mergeUtil: mergeUtil, HelmAppReadService: HelmAppReadService, + chartReadService: chartReadService, } return deploymentConfigurationService, nil @@ -212,6 +217,7 @@ func (impl *DeploymentConfigurationServiceImpl) GetManifest(ctx context.Context, impl.logger.Errorw("error in finding app by id", "appId", appId, "err", err) return nil, err } + releaseName := app.AppName refChart, chartInBytes, err := impl.getRefChartBytes(ctx, envId, appId, app) if err != nil { @@ -235,12 +241,25 @@ func (impl *DeploymentConfigurationServiceImpl) GetManifest(ctx context.Context, impl.logger.Errorw("error in getting environment", "envId", envId, "err", err) return nil, err } - envName = environment.Name - scope.ClusterId = environment.ClusterId - scope.SystemMetadata.EnvironmentName = envName - scope.SystemMetadata.ClusterName = environment.Cluster.ClusterName - namespace = environment.Namespace - scope.SystemMetadata.Namespace = namespace + if environment != nil { + envName = environment.Name + scope.ClusterId = environment.ClusterId + scope.SystemMetadata.EnvironmentName = envName + scope.SystemMetadata.ClusterName = environment.Cluster.ClusterName + namespace = environment.Namespace + scope.SystemMetadata.Namespace = namespace + if len(envName) != 0 { + releaseName = util2.BuildDeployedAppName(app.AppName, envName) + } + } + pipelineModel, err := impl.pipelineRepository.FindOneByAppIdAndEnvId(appId, envId) + if err != nil && !errors2.Is(err, pg.ErrNoRows) { + impl.logger.Errorw("error in getting pipeline model", "appId", appId, "envId", envId, "err", err) + return nil, err + } + if pipelineModel != nil && len(pipelineModel.DeploymentAppName) != 0 { + releaseName = pipelineModel.DeploymentAppName + } } isSuperAdmin, err := util2.GetIsSuperAdminFromContext(ctx) @@ -272,10 +291,6 @@ func (impl *DeploymentConfigurationServiceImpl) GetManifest(ctx context.Context, sanitizedK8sVersion := k8sServerVersion.String() - releaseName := app.AppName - if len(envName) > 0 { - releaseName = fmt.Sprintf("%s-%s", app.AppName, envName) - } installReleaseRequest := &gRPC.InstallReleaseRequest{ AppName: app.AppName, ChartName: refChart.Name, @@ -289,7 +304,7 @@ func (impl *DeploymentConfigurationServiceImpl) GetManifest(ctx context.Context, ReleaseName: releaseName, }, } - config, err := impl.HelmAppReadService.GetClusterConf(bean5.DEFAULT_CLUSTER_ID) + config, err := impl.HelmAppReadService.GetClusterConf(clusterBean.DefaultClusterId) if err != nil { impl.logger.Errorw("error in fetching cluster detail", "clusterId", 1, "err", err) return nil, err @@ -469,7 +484,7 @@ func (impl *DeploymentConfigurationServiceImpl) getConfiguredChartRef(envId int, if envOverride != nil && envOverride.Chart != nil { chartRefId = envOverride.Chart.ChartRefId } else { - chart, err := impl.chartService.FindLatestChartForAppByAppId(appId) + chart, err := impl.chartReadService.FindLatestChartForAppByAppId(appId) if err != nil && err != pg.ErrNoRows { impl.logger.Errorw("error in fetching latest chart", "err", err) return 0, nil @@ -478,7 +493,7 @@ func (impl *DeploymentConfigurationServiceImpl) getConfiguredChartRef(envId int, chartRefId = chart.ChartRefId } } else { - chart, err := impl.chartService.FindLatestChartForAppByAppId(appId) + chart, err := impl.chartReadService.FindLatestChartForAppByAppId(appId) if err != nil && err != pg.ErrNoRows { impl.logger.Errorw("error in fetching latest chart", "err", err) return 0, nil @@ -1207,7 +1222,7 @@ func (impl *DeploymentConfigurationServiceImpl) getPublishedPipelineStrategyConf } func (impl *DeploymentConfigurationServiceImpl) getBaseDeploymentTemplate(appId int) (*bean2.DeploymentTemplateMetadata, error) { - deploymentTemplateData, err := impl.chartService.FindLatestChartForAppByAppId(appId) + deploymentTemplateData, err := impl.chartReadService.FindLatestChartForAppByAppId(appId) if err != nil { impl.logger.Errorw("error in getting base deployment template for appId", "appId", appId, "err", err) return nil, err diff --git a/pkg/deployment/common/adapter.go b/pkg/deployment/common/adapter.go deleted file mode 100644 index 9d58afaa20..0000000000 --- a/pkg/deployment/common/adapter.go +++ /dev/null @@ -1,32 +0,0 @@ -package common - -import ( - "github.com/devtron-labs/devtron/internal/sql/repository/deploymentConfig" - "github.com/devtron-labs/devtron/pkg/deployment/common/bean" -) - -func ConvertDeploymentConfigDTOToDbObj(config *bean.DeploymentConfig) *deploymentConfig.DeploymentConfig { - return &deploymentConfig.DeploymentConfig{ - Id: config.Id, - AppId: config.AppId, - EnvironmentId: config.EnvironmentId, - DeploymentAppType: config.DeploymentAppType, - ConfigType: config.ConfigType, - RepoUrl: config.RepoURL, - Active: config.Active, - ReleaseMode: config.ReleaseMode, - } -} - -func ConvertDeploymentConfigDbObjToDTO(dbObj *deploymentConfig.DeploymentConfig) *bean.DeploymentConfig { - return &bean.DeploymentConfig{ - Id: dbObj.Id, - AppId: dbObj.AppId, - EnvironmentId: dbObj.EnvironmentId, - DeploymentAppType: dbObj.DeploymentAppType, - ConfigType: dbObj.ConfigType, - RepoURL: dbObj.RepoUrl, - Active: dbObj.Active, - ReleaseMode: dbObj.ReleaseMode, - } -} diff --git a/pkg/deployment/common/adapter/adapter.go b/pkg/deployment/common/adapter/adapter.go new file mode 100644 index 0000000000..eab0d11bf0 --- /dev/null +++ b/pkg/deployment/common/adapter/adapter.go @@ -0,0 +1,89 @@ +package adapter + +import ( + "encoding/json" + "github.com/devtron-labs/devtron/internal/sql/repository/deploymentConfig" + "github.com/devtron-labs/devtron/pkg/deployment/common/bean" +) + +func NewDeploymentConfigMin(deploymentAppType, releaseMode string, isGitOpsRepoConfigured bool) *bean.DeploymentConfigMin { + return &bean.DeploymentConfigMin{ + DeploymentAppType: deploymentAppType, + ReleaseMode: releaseMode, + IsGitOpsRepoConfigured: isGitOpsRepoConfigured, + } +} + +func ConvertDeploymentConfigDTOToDbObj(config *bean.DeploymentConfig) (*deploymentConfig.DeploymentConfig, error) { + releaseConfigJson, err := json.Marshal(config.ReleaseConfiguration) + if err != nil { + return nil, err + } + return &deploymentConfig.DeploymentConfig{ + Id: config.Id, + AppId: config.AppId, + EnvironmentId: config.EnvironmentId, + DeploymentAppType: config.DeploymentAppType, + RepoUrl: config.RepoURL, + ConfigType: config.ConfigType, + Active: config.Active, + ReleaseMode: config.ReleaseMode, + ReleaseConfig: string(releaseConfigJson), + }, nil +} + +func ConvertDeploymentConfigDbObjToDTO(dbObj *deploymentConfig.DeploymentConfig) (*bean.DeploymentConfig, error) { + + if dbObj == nil { + return nil, nil + } + + var releaseConfig bean.ReleaseConfiguration + + if len(dbObj.ReleaseConfig) != 0 { + err := json.Unmarshal([]byte(dbObj.ReleaseConfig), &releaseConfig) + if err != nil { + return nil, err + } + } + + return &bean.DeploymentConfig{ + Id: dbObj.Id, + AppId: dbObj.AppId, + EnvironmentId: dbObj.EnvironmentId, + DeploymentAppType: dbObj.DeploymentAppType, + ConfigType: dbObj.ConfigType, + Active: dbObj.Active, + ReleaseMode: dbObj.ReleaseMode, + RepoURL: dbObj.RepoUrl, + ReleaseConfiguration: &releaseConfig, + }, nil +} + +func NewAppLevelReleaseConfigFromChart(gitRepoURL, chartLocation string) *bean.ReleaseConfiguration { + return &bean.ReleaseConfiguration{ + Version: bean.Version, + ArgoCDSpec: bean.ArgoCDSpec{ + Spec: bean.ApplicationSpec{ + Source: &bean.ApplicationSource{ + RepoURL: gitRepoURL, + Path: chartLocation, + }, + }, + }} +} + +func GetDeploymentConfigType(isCustomGitOpsRepo bool) string { + if isCustomGitOpsRepo { + return string(bean.CUSTOM) + } + return string(bean.SYSTEM_GENERATED) +} + +func GetDevtronArgoCdAppInfo(acdAppName string, acdAppClusterId int, acdDefaultNamespace string) *bean.DevtronArgoCdAppInfo { + return &bean.DevtronArgoCdAppInfo{ + ArgoCdAppName: acdAppName, + ArgoAppClusterId: acdAppClusterId, + ArgoAppNamespace: acdDefaultNamespace, + } +} diff --git a/pkg/deployment/common/bean/bean.go b/pkg/deployment/common/bean/bean.go index 60fe73a6a1..e2dfdbebc7 100644 --- a/pkg/deployment/common/bean/bean.go +++ b/pkg/deployment/common/bean/bean.go @@ -2,20 +2,319 @@ package bean import ( "fmt" + apiGitOpsBean "github.com/devtron-labs/devtron/api/bean/gitOps" + "github.com/devtron-labs/devtron/internal/util" + globalUtil "github.com/devtron-labs/devtron/util" "strconv" "strings" ) +type ReleaseConfigVersion string + +const ( + Version ReleaseConfigVersion = "v1.0.0" +) + +type ReleaseConfiguration struct { + Version ReleaseConfigVersion `json:"version"` + ArgoCDSpec ArgoCDSpec `json:"argoCDSpec"` +} + +type ArgoCDSpec struct { + Metadata ApplicationMetadata `json:"metadata"` + Spec ApplicationSpec `json:"spec"` +} + +func (a *ArgoCDSpec) SetApplicationObjectClusterId(clusterId int) { + a.Metadata.ClusterId = clusterId +} + +type ApplicationMetadata struct { + ClusterId int `json:"clusterId"` + Namespace string `json:"namespace"` + Name string `json:"name"` +} + +type ApplicationSpec struct { + Destination *Destination `json:"destination,omitempty"` + Source *ApplicationSource `json:"source,omitempty"` + SyncPolicy *SyncPolicyAutomated `json:"syncPolicy,omitempty"` +} + +type ApplicationSource struct { + // RepoURL is the URL to the repository (Git or Helm) that contains the application manifests + RepoURL string `json:"repoURL"` + // Path is a directory path within the Git repository, and is only valid for applications sourced from Git. + Path string `json:"path,omitempty"` + // TargetRevision defines the revision of the source to sync the application to. + // In case of Git, this can be commit, tag, or branch. If omitted, will equal to HEAD. + // In case of Helm, this is a semver tag for the Chart's version. + TargetRevision string `json:"targetRevision,omitempty"` + // Helm holds helm specific options + Helm *ApplicationSourceHelm `json:"helm,omitempty"` + // Chart is a Helm chart name, and must be specified for applications sourced from a Helm repo. + Chart string `json:"chart,omitempty"` + // Ref is reference to another source within sources field. This field will not be used if used with a `source` tag. + Ref string `json:"ref,omitempty"` +} + +// ApplicationSourceHelm holds helm specific options +type ApplicationSourceHelm struct { + // ValuesFiles is a list of Helm value files to use when generating a template + ValueFiles []string `json:"valueFiles,omitempty"` + // Parameters is a list of Helm parameters which are passed to the helm template command upon manifest generation + Parameters []HelmParameter `json:"parameters,omitempty"` + // ReleaseName is the Helm release name to use. If omitted it will use the application name + ReleaseName string `json:"releaseName,omitempty"` + // Values specifies Helm values to be passed to helm template, typically defined as a block + Values string `json:"values,omitempty"` + // FileParameters are file parameters to the helm template + FileParameters []HelmFileParameter `json:"fileParameters,omitempty"` + // Version is the Helm version to use for templating ("3") + Version string `json:"version,omitempty"` + // PassCredentials pass credentials to all domains (Helm's --pass-credentials) + PassCredentials bool `json:"passCredentials,omitempty"` + // IgnoreMissingValueFiles prevents helm template from failing when valueFiles do not exist locally by not appending them to helm template --values + IgnoreMissingValueFiles bool `json:"ignoreMissingValueFiles,omitempty"` + // SkipCrds skips custom resource definition installation step (Helm's --skip-crds) + SkipCrds bool `json:"skipCrds,omitempty"` +} + +type HelmParameter struct { + // Name is the name of the Helm parameter + Name string `json:"name,omitempty"` + // Value is the value for the Helm parameter + Value string `json:"value,omitempty"` + // ForceString determines whether to tell Helm to interpret booleans and numbers as strings + ForceString bool `json:"forceString,omitempty"` +} + +// HelmFileParameter is a file parameter that's passed to helm template during manifest generation +type HelmFileParameter struct { + // Name is the name of the Helm parameter + Name string `json:"name,omitempty"` + // Path is the path to the file containing the values for the Helm parameter + Path string `json:"path,omitempty"` +} + +type Destination struct { + Namespace string `json:"namespace,omitempty"` // deployed application namespace + Server string `json:"server,omitempty"` // deployed application cluster url +} + +type Automated struct { + Prune bool `json:"prune"` +} + +type SyncPolicy struct { + Automated *SyncPolicyAutomated `json:"automated,omitempty"` + SyncOptions SyncOptions `json:"syncOptions,omitempty"` + Retry *RetryStrategy `json:"retry,omitempty"` + ManagedNamespaceMetadata *ManagedNamespaceMetadata `json:"managedNamespaceMetadata,omitempty"` +} + +type SyncPolicyAutomated struct { + Prune bool `json:"prune,omitempty"` + SelfHeal bool `json:"selfHeal,omitempty"` + AllowEmpty bool `json:"allowEmpty,omitempty"` +} + +type SyncOptions []string + +// RetryStrategy contains information about the strategy to apply when a sync failed +type RetryStrategy struct { + // Limit is the maximum number of attempts for retrying a failed sync. If set to 0, no retries will be performed. + Limit int64 `json:"limit,omitempty"` + // Backoff controls how to backoff on subsequent retries of failed syncs + Backoff *Backoff `json:"backoff,omitempty"` +} + +// Backoff is the backoff strategy to use on subsequent retries for failing syncs +type Backoff struct { + // Duration is the amount to back off. Default unit is seconds, but could also be a duration (e.g. "2m", "1h") + Duration string `json:"duration,omitempty"` + // Factor is a factor to multiply the base duration after each failed retry + Factor *int64 `json:"factor,omitempty"` + // MaxDuration is the maximum amount of time allowed for the backoff strategy + MaxDuration string `json:"maxDuration,omitempty"` +} + +type ManagedNamespaceMetadata struct { + Labels map[string]string `json:"labels,omitempty"` + Annotations map[string]string `json:"annotations,omitempty"` +} + +type DeploymentConfigMin struct { + DeploymentAppType string + ReleaseMode string + GitRepoUrl string + IsGitOpsRepoConfigured bool +} + +func (d *DeploymentConfigMin) IsLinkedRelease() bool { + return d.ReleaseMode == util.PIPELINE_RELEASE_MODE_LINK +} + type DeploymentConfig struct { - Id int - AppId int - EnvironmentId int - ConfigType string - DeploymentAppType string - RepoURL string - RepoName string - ReleaseMode string - Active bool + Id int + AppId int + EnvironmentId int + ConfigType string + DeploymentAppType string + ReleaseMode string + RepoURL string // DEPRECATED; + RepoName string + Active bool + ReleaseConfiguration *ReleaseConfiguration +} + +func (d *DeploymentConfig) IsAcdRelease() bool { + return d.DeploymentAppType == util.PIPELINE_DEPLOYMENT_TYPE_ACD +} + +func (d *DeploymentConfig) IsLinkedRelease() bool { + return d.ReleaseMode == util.PIPELINE_RELEASE_MODE_LINK +} + +func (d *DeploymentConfig) IsArgoCdClientSupported() bool { + return d.IsAcdRelease() && !d.IsLinkedRelease() +} + +func (d *DeploymentConfig) IsArgoAppSyncAndRefreshSupported() bool { + return d.IsAcdRelease() && !d.IsLinkedRelease() +} + +func (d *DeploymentConfig) IsArgoAppPatchSupported() bool { + return d.IsAcdRelease() && !d.IsLinkedRelease() +} + +func (d *DeploymentConfig) IsArgoAppCreationRequired(deploymentAppCreated bool) bool { + if !d.IsAcdRelease() { + return false + } + if deploymentAppCreated { + return false + } + if d.IsLinkedRelease() { + return false + } + return true +} + +func (d *DeploymentConfig) IsEmpty() bool { + return d == nil || d.Id == 0 +} + +func (d *DeploymentConfig) IsPipelineGitOpsRepoConfigured(isAppLevelGitOpsConfigured bool) bool { + return isAppLevelGitOpsConfigured || !apiGitOpsBean.IsGitOpsRepoNotConfigured(d.GetRepoURL()) +} + +func (d *DeploymentConfig) GetRepoURL() string { + if d.ReleaseConfiguration == nil || d.ReleaseConfiguration.ArgoCDSpec.Spec.Source == nil { + return d.RepoURL + } + return d.ReleaseConfiguration.ArgoCDSpec.Spec.Source.RepoURL +} + +func (d *DeploymentConfig) GetTargetRevision() string { + if d.ReleaseConfiguration == nil || + d.ReleaseConfiguration.ArgoCDSpec.Spec.Source == nil || + len(d.ReleaseConfiguration.ArgoCDSpec.Spec.Source.TargetRevision) == 0 { + return globalUtil.GetDefaultTargetRevision() + } + return d.ReleaseConfiguration.ArgoCDSpec.Spec.Source.TargetRevision +} + +func (d *DeploymentConfig) GetValuesFilePath() string { + if d.ReleaseConfiguration == nil || d.ReleaseConfiguration.ArgoCDSpec.Spec.Source == nil { + return "" + } + // currently we only support a single value file + if len(d.ReleaseConfiguration.ArgoCDSpec.Spec.Source.Helm.ValueFiles) != 0 { + return d.ReleaseConfiguration.ArgoCDSpec.Spec.Source.Helm.ValueFiles[0] + } else { + return "" + } +} + +func (d *DeploymentConfig) GetChartLocation() string { + if d.ReleaseConfiguration == nil || d.ReleaseConfiguration.ArgoCDSpec.Spec.Source == nil { + return "" + } + return d.ReleaseConfiguration.ArgoCDSpec.Spec.Source.Path +} + +func (d *DeploymentConfig) SetRepoURL(repoURL string) *DeploymentConfig { + d.RepoURL = repoURL // maintain for backward compatibility + if d.ReleaseConfiguration == nil || d.ReleaseConfiguration.ArgoCDSpec.Spec.Source == nil { + return d + } + d.ReleaseConfiguration.ArgoCDSpec.Spec.Source.RepoURL = repoURL + return d +} + +func (d *DeploymentConfig) SetChartLocation(chartLocation string) { + if d.ReleaseConfiguration == nil || d.ReleaseConfiguration.ArgoCDSpec.Spec.Source == nil { + return + } + d.ReleaseConfiguration.ArgoCDSpec.Spec.Source.Path = chartLocation +} + +func (d *DeploymentConfig) GetRevision() string { + if d.ReleaseConfiguration == nil || d.ReleaseConfiguration.ArgoCDSpec.Spec.Source == nil { + return "" + } + return d.ReleaseConfiguration.ArgoCDSpec.Spec.Source.TargetRevision +} + +func (d *DeploymentConfig) GetAcdAppName() string { + if d.ReleaseConfiguration == nil { + return "" + } + return d.ReleaseConfiguration.ArgoCDSpec.Metadata.Name +} + +func (d *DeploymentConfig) GetValuesFileName() string { + if d.ReleaseConfiguration == nil || d.ReleaseConfiguration.ArgoCDSpec.Spec.Source == nil || + d.ReleaseConfiguration.ArgoCDSpec.Spec.Source.Helm == nil { + return "" + } + return d.ReleaseConfiguration.ArgoCDSpec.Spec.Source.Helm.ValueFiles[0] +} + +func (d *DeploymentConfig) GetDestinationClusterURL() string { + if d.ReleaseConfiguration == nil || d.ReleaseConfiguration.ArgoCDSpec.Spec.Destination == nil { + return "" + } + return d.ReleaseConfiguration.ArgoCDSpec.Spec.Destination.Server +} + +func (d *DeploymentConfig) GetDestinationNamespace() string { + if d.ReleaseConfiguration == nil || d.ReleaseConfiguration.ArgoCDSpec.Spec.Destination == nil { + return "" + } + return d.ReleaseConfiguration.ArgoCDSpec.Spec.Destination.Namespace +} + +func (d *DeploymentConfig) SetApplicationObjectClusterId(id int) { + if d.ReleaseConfiguration == nil { + return + } + d.ReleaseConfiguration.ArgoCDSpec.SetApplicationObjectClusterId(id) +} + +func (d *DeploymentConfig) GetApplicationObjectClusterId() int { + if d.ReleaseConfiguration == nil { + return 0 + } + return d.ReleaseConfiguration.ArgoCDSpec.Metadata.ClusterId +} + +func (d *DeploymentConfig) GetApplicationObjectNamespace() string { + if d.ReleaseConfiguration == nil { + return "" + } + return d.ReleaseConfiguration.ArgoCDSpec.Metadata.Namespace } type UniqueDeploymentConfigIdentifier string @@ -64,5 +363,34 @@ func (d DeploymentConfigCredentialType) String() string { return string(d) } +type ExternalReleaseType string + +func (e ExternalReleaseType) IsArgoApplication() bool { + return e == ArgoApplication +} + +const ( + ArgoApplication ExternalReleaseType = "argoApplication" + HelmRelease ExternalReleaseType = "helmRelease" + Undefined ExternalReleaseType = "" +) + +func (d *DeploymentConfig) GetMigratedFrom() (migratedFrom ExternalReleaseType, isLinked bool) { + if d.IsLinkedRelease() { + if d.DeploymentAppType == util.PIPELINE_DEPLOYMENT_TYPE_ACD { + return ArgoApplication, true + } else if d.DeploymentAppType == util.PIPELINE_DEPLOYMENT_TYPE_HELM { + return HelmRelease, true + } + } + return Undefined, false +} + +type DevtronArgoCdAppInfo struct { + ArgoCdAppName string + ArgoAppClusterId int + ArgoAppNamespace string +} + // DefaultStopTemplate default Stop template for system charts const DefaultStopTemplate = `{"replicaCount":0,"autoscaling":{"MinReplicas":0,"MaxReplicas":0,"enabled":false},"kedaAutoscaling":{"minReplicaCount":0,"maxReplicaCount":0,"enabled":false},"secondaryWorkload":{"replicaCount":0,"autoscaling":{"enabled":false,"MinReplicas":0,"MaxReplicas":0}}}` diff --git a/pkg/deployment/common/deploymentConfigService.go b/pkg/deployment/common/deploymentConfigService.go index 8ac8eaffaf..1e648c4b5c 100644 --- a/pkg/deployment/common/deploymentConfigService.go +++ b/pkg/deployment/common/deploymentConfigService.go @@ -17,8 +17,10 @@ package common import ( + "errors" "fmt" - "github.com/devtron-labs/devtron/api/bean/gitOps" + "github.com/devtron-labs/common-lib/utils/k8s/commonBean" + "github.com/devtron-labs/devtron/client/argocdServer" appRepository "github.com/devtron-labs/devtron/internal/sql/repository/app" "github.com/devtron-labs/devtron/internal/sql/repository/deploymentConfig" "github.com/devtron-labs/devtron/internal/sql/repository/pipelineConfig" @@ -26,26 +28,37 @@ import ( installedAppReader "github.com/devtron-labs/devtron/pkg/appStore/installedApp/read" bean3 "github.com/devtron-labs/devtron/pkg/auth/user/bean" chartRepoRepository "github.com/devtron-labs/devtron/pkg/chartRepo/repository" + clusterBean "github.com/devtron-labs/devtron/pkg/cluster/bean" + bean4 "github.com/devtron-labs/devtron/pkg/cluster/environment/bean" + "github.com/devtron-labs/devtron/pkg/cluster/environment/repository" + "github.com/devtron-labs/devtron/pkg/deployment/common/adapter" "github.com/devtron-labs/devtron/pkg/deployment/common/bean" + commonErr "github.com/devtron-labs/devtron/pkg/deployment/common/errors" + read2 "github.com/devtron-labs/devtron/pkg/deployment/common/read" + "github.com/devtron-labs/devtron/pkg/deployment/manifest/deploymentTemplate/read" + util3 "github.com/devtron-labs/devtron/pkg/util" "github.com/devtron-labs/devtron/util" "github.com/go-pg/pg" "go.uber.org/zap" + "path/filepath" ) type DeploymentConfigService interface { CreateOrUpdateConfig(tx *pg.Tx, config *bean.DeploymentConfig, userId int32) (*bean.DeploymentConfig, error) CreateOrUpdateConfigInBulk(tx *pg.Tx, configToBeCreated, configToBeUpdated []*bean.DeploymentConfig, userId int32) error - IsDeploymentConfigUsed() bool GetConfigForDevtronApps(appId, envId int) (*bean.DeploymentConfig, error) GetAndMigrateConfigIfAbsentForDevtronApps(appId, envId int) (*bean.DeploymentConfig, error) GetConfigForHelmApps(appId, envId int) (*bean.DeploymentConfig, error) IsChartStoreAppManagedByArgoCd(appId int) (bool, error) GetConfigEvenIfInactive(appId, envId int) (*bean.DeploymentConfig, error) GetAndMigrateConfigIfAbsentForHelmApp(appId, envId int) (*bean.DeploymentConfig, error) - GetAppLevelConfigForDevtronApp(appId int) (*bean.DeploymentConfig, error) UpdateRepoUrlForAppAndEnvId(repoURL string, appId, envId int) error - GetDeploymentAppTypeForCDInBulk(pipelines []*pipelineConfig.Pipeline) (map[int]string, error) GetConfigsByAppIds(appIds []int) ([]*bean.DeploymentConfig, error) + UpdateChartLocationInDeploymentConfig(appId, envId, chartRefId int, userId int32, chartVersion string) error + GetAllArgoAppInfosByDeploymentAppNames(deploymentAppNames []string) ([]*bean.DevtronArgoCdAppInfo, error) + GetExternalReleaseType(appId, environmentId int) (bean.ExternalReleaseType, error) + CheckIfURLAlreadyPresent(repoURL string) (bool, error) + FilterPipelinesByApplicationClusterIdAndNamespace(pipelines []pipelineConfig.Pipeline, applicationObjectClusterId int, applicationObjectNamespace string) (pipelineConfig.Pipeline, error) } type DeploymentConfigServiceImpl struct { @@ -56,6 +69,11 @@ type DeploymentConfigServiceImpl struct { appRepository appRepository.AppRepository installedAppReadService installedAppReader.InstalledAppReadServiceEA deploymentServiceTypeConfig *util.DeploymentServiceTypeConfig + envConfigOverrideService read.EnvConfigOverrideService + environmentRepository repository.EnvironmentRepository + chartRefRepository chartRepoRepository.ChartRefRepository + deploymentConfigReadService read2.DeploymentConfigReadService + acdAuthConfig *util3.ACDAuthConfig } func NewDeploymentConfigServiceImpl( @@ -66,7 +84,13 @@ func NewDeploymentConfigServiceImpl( appRepository appRepository.AppRepository, installedAppReadService installedAppReader.InstalledAppReadServiceEA, envVariables *util.EnvironmentVariables, + envConfigOverrideService read.EnvConfigOverrideService, + environmentRepository repository.EnvironmentRepository, + chartRefRepository chartRepoRepository.ChartRefRepository, + deploymentConfigReadService read2.DeploymentConfigReadService, + acdAuthConfig *util3.ACDAuthConfig, ) *DeploymentConfigServiceImpl { + return &DeploymentConfigServiceImpl{ deploymentConfigRepository: deploymentConfigRepository, logger: logger, @@ -75,20 +99,27 @@ func NewDeploymentConfigServiceImpl( appRepository: appRepository, installedAppReadService: installedAppReadService, deploymentServiceTypeConfig: envVariables.DeploymentServiceTypeConfig, + envConfigOverrideService: envConfigOverrideService, + environmentRepository: environmentRepository, + chartRefRepository: chartRefRepository, + deploymentConfigReadService: deploymentConfigReadService, + acdAuthConfig: acdAuthConfig, } } func (impl *DeploymentConfigServiceImpl) CreateOrUpdateConfig(tx *pg.Tx, config *bean.DeploymentConfig, userId int32) (*bean.DeploymentConfig, error) { + newDBObj, err := adapter.ConvertDeploymentConfigDTOToDbObj(config) + if err != nil { + impl.logger.Errorw("error in converting deployment config DTO to db object", "appId", config.AppId, "envId", config.EnvironmentId) + return nil, err + } configDbObj, err := impl.GetConfigDBObj(config.AppId, config.EnvironmentId) - if err != nil && err != pg.ErrNoRows { + if err != nil && !errors.Is(err, pg.ErrNoRows) { impl.logger.Errorw("error in fetching deployment config from DB by appId and envId", "appId", config.AppId, "envId", config.EnvironmentId, "err", err) } - - newDBObj := ConvertDeploymentConfigDTOToDbObj(config) - - if configDbObj == nil || (configDbObj != nil && configDbObj.Id == 0) { + if configDbObj == nil || configDbObj.Id == 0 { newDBObj.AuditLog.CreateAuditLog(userId) newDBObj, err = impl.deploymentConfigRepository.Save(tx, newDBObj) if err != nil { @@ -105,22 +136,34 @@ func (impl *DeploymentConfigServiceImpl) CreateOrUpdateConfig(tx *pg.Tx, config return nil, err } } - - return ConvertDeploymentConfigDbObjToDTO(newDBObj), nil + newObj, err := adapter.ConvertDeploymentConfigDbObjToDTO(newDBObj) + if err != nil { + impl.logger.Errorw("error in converting deployment config DTO to db object", "appId", config.AppId, "envId", config.EnvironmentId) + return nil, err + } + return newObj, nil } func (impl *DeploymentConfigServiceImpl) CreateOrUpdateConfigInBulk(tx *pg.Tx, configToBeCreated, configToBeUpdated []*bean.DeploymentConfig, userId int32) error { dbObjCreate := make([]*deploymentConfig.DeploymentConfig, 0, len(configToBeCreated)) for i := range configToBeCreated { - dbObj := ConvertDeploymentConfigDTOToDbObj(configToBeCreated[i]) + dbObj, err := adapter.ConvertDeploymentConfigDTOToDbObj(configToBeCreated[i]) + if err != nil { + impl.logger.Errorw("error in converting deployment config DTO to db object", "appId", configToBeCreated[i].AppId, "envId", configToBeCreated[i].EnvironmentId) + return err + } dbObj.AuditLog.CreateAuditLog(userId) dbObjCreate = append(dbObjCreate, dbObj) } dbObjUpdate := make([]*deploymentConfig.DeploymentConfig, 0, len(configToBeUpdated)) for i := range configToBeUpdated { - dbObj := ConvertDeploymentConfigDTOToDbObj(configToBeUpdated[i]) + dbObj, err := adapter.ConvertDeploymentConfigDTOToDbObj(configToBeUpdated[i]) + if err != nil { + impl.logger.Errorw("error in converting deployment config DTO to db object", "appId", configToBeUpdated[i].AppId, "envId", configToBeUpdated[i].EnvironmentId) + return err + } dbObj.AuditLog.UpdateAuditLog(userId) dbObjUpdate = append(dbObjUpdate, dbObj) } @@ -144,316 +187,302 @@ func (impl *DeploymentConfigServiceImpl) CreateOrUpdateConfigInBulk(tx *pg.Tx, c return nil } -func (impl *DeploymentConfigServiceImpl) IsDeploymentConfigUsed() bool { - return impl.deploymentServiceTypeConfig.UseDeploymentConfigData -} - func (impl *DeploymentConfigServiceImpl) GetConfigForDevtronApps(appId, envId int) (*bean.DeploymentConfig, error) { - if !impl.deploymentServiceTypeConfig.UseDeploymentConfigData { - configFromOldData, err := impl.parseFromOldTablesForDevtronApps(appId, envId) - if err != nil { - impl.logger.Errorw("error in parsing config from charts and pipeline repository", "appId", appId, "envId", envId, "err", err) - return nil, err - } - if envId > 0 { - // add columns added after migration (of deployment app type and repo url) here - appAndEnvLevelConfig, err := impl.deploymentConfigRepository.GetByAppIdAndEnvId(appId, envId) - if err != nil && err != pg.ErrNoRows { - impl.logger.Errorw("error in getting deployment config db object by appId and envId", "appId", appId, "envId", envId, "err", err) - return nil, err - } - if err == pg.ErrNoRows { - // deployment config is not done - configFromOldData.ReleaseMode = util2.PIPELINE_RELEASE_MODE_CREATE - } else { - configFromOldData.ReleaseMode = appAndEnvLevelConfig.ReleaseMode - } - } - return configFromOldData, nil - } - - // if USE_DEPLOYMENT_CONFIG_DATA is true, first try to fetch data from deployment_config table and if not found use charts and pipeline respectively - - appLevelConfigDbObj, err := impl.deploymentConfigRepository.GetAppLevelConfigForDevtronApps(appId) - if err != nil && err != pg.ErrNoRows { - impl.logger.Errorw("error in getting deployment config db object by appId", "appId", appId, "err", err) + appLevelConfig, err := impl.getAppLevelConfigForDevtronApps(appId, false) + if err != nil { + impl.logger.Errorw("error in getting app level Config for devtron apps", "appId", appId, "envId", envId, "err", err) return nil, err } - if err == pg.ErrNoRows { - appLevelConfigDbObj, err = impl.parseAppLevelConfigForDevtronApps(appId) - if err != nil { - impl.logger.Errorw("error in migrating app level config to deployment config", "appId", appId, "err", err) - return nil, err - } - } + if envId > 0 { - // if envId>0 then only env level config will be returned, for getting app level config envId should be zero - appAndEnvLevelConfig, err := impl.deploymentConfigRepository.GetByAppIdAndEnvId(appId, envId) - if err != nil && err != pg.ErrNoRows { - impl.logger.Errorw("error in getting deployment config db object by appId and envId", "appId", appId, "envId", envId, "err", err) + // if envId > 0 then only env level config will be returned, + // for getting app level config envId should be zero + envLevelConfig, err := impl.getEnvLevelDataForDevtronApps(appId, envId, appLevelConfig, false) + if err != nil { + impl.logger.Errorw("error in getting env level data for devtron apps", "appId", appId, "envId", envId, "err", err) return nil, err } - if err == pg.ErrNoRows { - appAndEnvLevelConfig, err = impl.parseEnvLevelConfigForDevtronApps(appLevelConfigDbObj, appId, envId) - if err != nil { - impl.logger.Errorw("error in migrating app level config to deployment config", "appId", appId, "err", err) - return nil, err - } - } else if gitOps.IsGitOpsRepoNotConfigured(appAndEnvLevelConfig.RepoUrl) && gitOps.IsGitOpsRepoConfigured(appLevelConfigDbObj.RepoUrl) { - // if url is present at app level and not at env level then copy app level url to env level config - appAndEnvLevelConfig.RepoUrl = appLevelConfigDbObj.RepoUrl - } - - return ConvertDeploymentConfigDbObjToDTO(appAndEnvLevelConfig), nil + return envLevelConfig, nil } - return ConvertDeploymentConfigDbObjToDTO(appLevelConfigDbObj), nil + return appLevelConfig, nil } func (impl *DeploymentConfigServiceImpl) GetAndMigrateConfigIfAbsentForDevtronApps(appId, envId int) (*bean.DeploymentConfig, error) { - - appLevelConfigDbObj, err := impl.deploymentConfigRepository.GetAppLevelConfigForDevtronApps(appId) - if err != nil && err != pg.ErrNoRows { - impl.logger.Errorw("error in getting deployment config db object by appId", "appId", appId, "err", err) + migrateDeploymentConfigData := impl.deploymentServiceTypeConfig.MigrateDeploymentConfigData + appLevelConfig, err := impl.getAppLevelConfigForDevtronApps(appId, migrateDeploymentConfigData) + if err != nil { + impl.logger.Errorw("error in getting app level Config for devtron apps", "appId", appId, "envId", envId, "err", err) return nil, err } - if err == pg.ErrNoRows { - impl.logger.Infow("app level deployment config not found, migrating data from charts to deployment_config", "appId", appId, "err", err) - appLevelConfigDbObj, err = impl.migrateChartsDataToDeploymentConfig(appId) - if err != nil { - impl.logger.Errorw("error in migrating app level config to deployment config", "appId", appId, "err", err) - return nil, err - } - } var envLevelConfig *bean.DeploymentConfig if envId > 0 { - appAndEnvLevelConfig, err := impl.deploymentConfigRepository.GetByAppIdAndEnvId(appId, envId) - if err != nil && err != pg.ErrNoRows { - impl.logger.Errorw("error in getting deployment config db object by appId and envId", "appId", appId, "envId", envId, "err", err) - return nil, err - } - if err == pg.ErrNoRows { - impl.logger.Infow("env level deployment config not found, migrating data from pipeline to deployment_config", "appId", appId, "envId", envId, "err", err) - appAndEnvLevelConfig, err = impl.migrateDevtronAppsPipelineDataToDeploymentConfig(appLevelConfigDbObj, appId, envId) - if err != nil { - impl.logger.Errorw("error in migrating app level config to deployment config", "appId", appId, "err", err) - return nil, err - } - } else if gitOps.IsGitOpsRepoNotConfigured(appAndEnvLevelConfig.RepoUrl) && gitOps.IsGitOpsRepoConfigured(appLevelConfigDbObj.RepoUrl) { - // if url is present at app level and not at env level then copy app level url to env level config - // will happen when custom gitOps is enabled and app is cloned. In this case when user configure app level gitOps , env level gitOps will not be updated - appAndEnvLevelConfig.RepoUrl = appLevelConfigDbObj.RepoUrl - appAndEnvLevelConfig.AuditLog.UpdateAuditLog(1) - appAndEnvLevelConfig, err = impl.deploymentConfigRepository.Update(nil, appAndEnvLevelConfig) - if err != nil { - impl.logger.Errorw("error in updating deploymentConfig", "appId", appAndEnvLevelConfig.AppId, "envId", appAndEnvLevelConfig.EnvironmentId, "err", err) - return nil, err - } - } - envLevelConfig = ConvertDeploymentConfigDbObjToDTO(appAndEnvLevelConfig) - } - - if !impl.deploymentServiceTypeConfig.UseDeploymentConfigData { - configFromOldData, err := impl.parseFromOldTablesForDevtronApps(appId, envId) + // if envId > 0 then only env level config will be returned, + // for getting app level config envId should be zero + envLevelConfig, err = impl.getEnvLevelDataForDevtronApps(appId, envId, appLevelConfig, migrateDeploymentConfigData) if err != nil { - impl.logger.Errorw("error in parsing config from charts and pipeline repository", "appId", appId, "envId", envId, "err", err) + impl.logger.Errorw("error in getting env level data for devtron apps", "appId", appId, "envId", envId, "err", err) return nil, err } - if envId > 0 { - configFromOldData.ReleaseMode = envLevelConfig.ReleaseMode - } - return configFromOldData, nil - } - - if envId > 0 { return envLevelConfig, nil } + return appLevelConfig, nil +} - return ConvertDeploymentConfigDbObjToDTO(appLevelConfigDbObj), nil +func (impl *DeploymentConfigServiceImpl) GetConfigForHelmApps(appId, envId int) (*bean.DeploymentConfig, error) { + helmDeploymentConfig, err := impl.getConfigForHelmApps(appId, envId, false) + if err != nil { + impl.logger.Errorw("error in getting deployment config for helm app", "appId", appId, "envId", envId, "err", err) + return nil, err + } + return helmDeploymentConfig, nil } -func (impl *DeploymentConfigServiceImpl) migrateChartsDataToDeploymentConfig(appId int) (*deploymentConfig.DeploymentConfig, error) { +func (impl *DeploymentConfigServiceImpl) IsChartStoreAppManagedByArgoCd(appId int) (bool, error) { + deploymentAppType, err := impl.deploymentConfigRepository.GetDeploymentAppTypeForChartStoreAppByAppId(appId) + if err != nil && !util2.IsErrNoRows(err) { + impl.logger.Errorw("error in GetDeploymentAppTypeForChartStoreAppByAppId", "appId", appId, "err", err) + return false, err + } else if util2.IsErrNoRows(err) { + return impl.installedAppReadService.IsChartStoreAppManagedByArgoCd(appId) + } + return util2.IsAcdApp(deploymentAppType), nil +} - configDbObj, err := impl.parseAppLevelConfigForDevtronApps(appId) +func (impl *DeploymentConfigServiceImpl) GetConfigEvenIfInactive(appId, envId int) (*bean.DeploymentConfig, error) { + dbConfig, err := impl.deploymentConfigRepository.GetByAppIdAndEnvIdEvenIfInactive(appId, envId) if err != nil { - impl.logger.Errorw("error in parsing charts data for devtron apps", "appId", appId, "err", err) + impl.logger.Errorw("error in getting deployment config by appId and envId", "appId", appId, "envId", envId, "err", err) return nil, err } - configDbObj.AuditLog.CreateAuditLog(1) - configDbObj, err = impl.deploymentConfigRepository.Save(nil, configDbObj) + config, err := adapter.ConvertDeploymentConfigDbObjToDTO(dbConfig) if err != nil { - impl.logger.Errorw("error in saving deployment config in DB", "appId", appId, "err", err) + impl.logger.Errorw("error in converting deployment config db obj to dto", "appId", appId, "envId", envId, "err", err) return nil, err } - return configDbObj, nil + return config, nil } -func (impl *DeploymentConfigServiceImpl) parseAppLevelConfigForDevtronApps(appId int) (*deploymentConfig.DeploymentConfig, error) { - chart, err := impl.chartRepository.FindLatestChartForAppByAppId(appId) +func (impl *DeploymentConfigServiceImpl) GetAndMigrateConfigIfAbsentForHelmApp(appId, envId int) (*bean.DeploymentConfig, error) { + migrateDataIfAbsent := impl.deploymentServiceTypeConfig.MigrateDeploymentConfigData + helmDeploymentConfig, err := impl.getConfigForHelmApps(appId, envId, migrateDataIfAbsent) if err != nil { - impl.logger.Errorw("error in fetch chart for git repo migration by appId", "appId", appId, "err", err) + impl.logger.Errorw("error in getting deployment config for helm app", "appId", appId, "envId", envId, "err", err) return nil, err } - ConfigDbObj := &deploymentConfig.DeploymentConfig{ - ConfigType: GetDeploymentConfigType(chart.IsCustomGitRepository), - AppId: appId, - Active: true, - RepoUrl: chart.GitRepoUrl, - } - return ConfigDbObj, nil + return helmDeploymentConfig, nil } -func (impl *DeploymentConfigServiceImpl) migrateDevtronAppsPipelineDataToDeploymentConfig(appLevelConfig *deploymentConfig.DeploymentConfig, appId int, envId int) (*deploymentConfig.DeploymentConfig, error) { +func (impl *DeploymentConfigServiceImpl) UpdateRepoUrlForAppAndEnvId(repoURL string, appId, envId int) error { - configDbObj, err := impl.parseEnvLevelConfigForDevtronApps(appLevelConfig, appId, envId) + dbObj, err := impl.deploymentConfigRepository.GetByAppIdAndEnvId(appId, envId) if err != nil { - impl.logger.Errorw("error in parsing config for cd pipeline from appId and envId", "appId", appId, "envId", envId, "err", err) - return nil, err + impl.logger.Errorw("error in getting deployment config by appId", "appId", appId, "envId", envId, "err", err) + return err } - configDbObj.AuditLog.CreateAuditLog(bean3.SYSTEM_USER_ID) - configDbObj, err = impl.deploymentConfigRepository.Save(nil, configDbObj) + config, err := adapter.ConvertDeploymentConfigDbObjToDTO(dbObj) if err != nil { - impl.logger.Errorw("error in saving deployment config in DB", "appId", appId, "envId", envId, "err", err) - return nil, err + impl.logger.Errorw("error in converting deployment config to DTO", "appId", appId, "envId", envId, "err", err) + return err } - return configDbObj, nil -} - -func (impl *DeploymentConfigServiceImpl) parseEnvLevelConfigForDevtronApps(appLevelConfig *deploymentConfig.DeploymentConfig, appId int, envId int) (*deploymentConfig.DeploymentConfig, error) { + config.SetRepoURL(repoURL) - configDbObj := &deploymentConfig.DeploymentConfig{ - AppId: appId, - EnvironmentId: envId, - ConfigType: appLevelConfig.ConfigType, - RepoUrl: appLevelConfig.RepoUrl, - ReleaseMode: util2.PIPELINE_RELEASE_MODE_CREATE, //for migration it is always equal to create as migration is happening for old cd pipelines - Active: true, - } - - deploymentAppType, err := impl.pipelineRepository.FindDeploymentAppTypeByAppIdAndEnvId(appId, envId) + dbObj, err = impl.deploymentConfigRepository.Update(nil, dbObj) if err != nil { - impl.logger.Errorw("error in getting deployment app type by appId and envId", "appId", appId, "envId", envId, "err", err) - return nil, err + impl.logger.Errorw("error in updating deployment config", appId, "envId", envId, "err", err) + return err } - configDbObj.DeploymentAppType = deploymentAppType - return configDbObj, nil + return nil } -func (impl *DeploymentConfigServiceImpl) GetConfigDBObj(appId, envId int) (*deploymentConfig.DeploymentConfig, error) { - var configDbObj *deploymentConfig.DeploymentConfig - var err error - if envId == 0 { - configDbObj, err = impl.deploymentConfigRepository.GetAppLevelConfigForDevtronApps(appId) - if err != nil { - impl.logger.Errorw("error in getting deployment config db object by appId", "appId", configDbObj.AppId, "err", err) - return nil, err - } - } else { - configDbObj, err = impl.deploymentConfigRepository.GetByAppIdAndEnvId(appId, envId) +func (impl *DeploymentConfigServiceImpl) GetConfigsByAppIds(appIds []int) ([]*bean.DeploymentConfig, error) { + if len(appIds) == 0 { + return nil, nil + } + configs, err := impl.deploymentConfigRepository.GetConfigByAppIds(appIds) + if err != nil { + impl.logger.Errorw("error in getting deployment config db object by appIds", "appIds", appIds, "err", err) + return nil, err + } + resp := make([]*bean.DeploymentConfig, 0, len(configs)) + for _, config := range configs { + newObj, err := adapter.ConvertDeploymentConfigDbObjToDTO(config) if err != nil { - impl.logger.Errorw("error in getting deployment config db object by appId and envId", "appId", configDbObj.AppId, "envId", configDbObj.EnvironmentId, "err", err) + impl.logger.Errorw("error in converting deployment config DTO to db object", "appId", config.AppId, "envId", config.EnvironmentId) return nil, err } + resp = append(resp, newObj) } - return configDbObj, nil + return resp, nil } -func (impl *DeploymentConfigServiceImpl) GetConfigForHelmApps(appId, envId int) (*bean.DeploymentConfig, error) { +func (impl *DeploymentConfigServiceImpl) UpdateChartLocationInDeploymentConfig(appId, envId, chartRefId int, userId int32, chartVersion string) error { + + pipeline, err := impl.pipelineRepository.FindOneByAppIdAndEnvId(appId, envId) + if err != nil && !errors.Is(err, pg.ErrNoRows) { + impl.logger.Errorw("error in finding pipeline by app id and env id", "appId", appId, "envId", envId, "err", err) + return err + } + // no need to update deployment config if pipeline is not present + if errors.Is(err, pg.ErrNoRows) || (pipeline != nil && pipeline.Id == 0) { + return nil + } - if !impl.deploymentServiceTypeConfig.UseDeploymentConfigData { - configFromOldData, err := impl.parseConfigForHelmApps(appId, envId) + config, err := impl.GetConfigForDevtronApps(appId, envId) + if err != nil { + impl.logger.Errorw("error, GetConfigForDevtronApps", "appId", appId, "envId", envId, "err", err) + return err + } + if config.ReleaseMode == util2.PIPELINE_RELEASE_MODE_CREATE && config.DeploymentAppType == bean4.PIPELINE_DEPLOYMENT_TYPE_ACD { + chartRef, err := impl.chartRefRepository.FindById(chartRefId) if err != nil { - impl.logger.Errorw("error in parsing config from charts and pipeline repository", "appId", appId, "envId", envId, "err", err) - return nil, err + impl.logger.Errorw("error in chartRefRepository.FindById", "chartRefId", chartRefId, "err", err) + return err + } + chartLocation := filepath.Join(chartRef.Location, chartVersion) + config.SetChartLocation(chartLocation) + config, err = impl.CreateOrUpdateConfig(nil, config, userId) + if err != nil { + impl.logger.Errorw("error in CreateOrUpdateConfig", "appId", appId, "envId", envId, "err", err) + return err } - return ConvertDeploymentConfigDbObjToDTO(configFromOldData), nil } + return nil +} - helmDeploymentConfig, err := impl.deploymentConfigRepository.GetByAppIdAndEnvId(appId, envId) - if err != nil && err != pg.ErrNoRows { - impl.logger.Errorw("error in fetching deployment config by by appId and envId", "appId", appId, "envId", envId, "err", err) - return nil, err +func (impl *DeploymentConfigServiceImpl) GetAllArgoAppInfosByDeploymentAppNames(deploymentAppNames []string) ([]*bean.DevtronArgoCdAppInfo, error) { + allDevtronManagedArgoAppsInfo := make([]*bean.DevtronArgoCdAppInfo, 0) + linkedReleaseConfig, err := impl.getAllEnvLevelConfigsForLinkedReleases() + if err != nil { + impl.logger.Errorw("error while fetching linked release configs", "deploymentAppNames", deploymentAppNames, "error", err) + return allDevtronManagedArgoAppsInfo, err } - - if err == pg.ErrNoRows { - helmDeploymentConfig, err = impl.parseConfigForHelmApps(appId, envId) - if err != nil { - impl.logger.Errorw("error in migrating helm deployment config", "appId", appId, "envId", envId, "err", err) - return nil, err + linkedReleaseConfigMap := make(map[string]*bean.DeploymentConfig) + for _, config := range linkedReleaseConfig { + uniqueKey := fmt.Sprintf("%d-%d", config.AppId, config.EnvironmentId) + linkedReleaseConfigMap[uniqueKey] = config + } + devtronArgoAppsInfo, err := impl.pipelineRepository.GetAllArgoAppInfoByDeploymentAppNames(deploymentAppNames) + if err != nil { + impl.logger.Errorw("error while fetching argo app names", "deploymentAppNames", deploymentAppNames, "error", err) + return allDevtronManagedArgoAppsInfo, err + } + for _, acdAppInfo := range devtronArgoAppsInfo { + uniqueKey := fmt.Sprintf("%d-%d", acdAppInfo.AppId, acdAppInfo.EnvironmentId) + var devtronArgoCdAppInfo *bean.DevtronArgoCdAppInfo + if config, ok := linkedReleaseConfigMap[uniqueKey]; ok && + config.IsAcdRelease() && config.IsLinkedRelease() { + acdAppClusterId := config.GetApplicationObjectClusterId() + acdDefaultNamespace := config.GetApplicationObjectNamespace() + devtronArgoCdAppInfo = adapter.GetDevtronArgoCdAppInfo(acdAppInfo.DeploymentAppName, acdAppClusterId, acdDefaultNamespace) + } else { + devtronArgoCdAppInfo = adapter.GetDevtronArgoCdAppInfo(acdAppInfo.DeploymentAppName, clusterBean.DefaultClusterId, impl.acdAuthConfig.ACDConfigMapNamespace) } + allDevtronManagedArgoAppsInfo = append(allDevtronManagedArgoAppsInfo, devtronArgoCdAppInfo) + } + chartStoreArgoAppNames, err := impl.installedAppReadService.GetAllArgoAppNamesByDeploymentAppNames(deploymentAppNames) + if err != nil { + impl.logger.Errorw("error while fetching argo app names from chart store", "deploymentAppNames", deploymentAppNames, "error", err) + return allDevtronManagedArgoAppsInfo, err + } + for _, chartStoreArgoAppName := range chartStoreArgoAppNames { + // NOTE: Chart Store doesn't support linked releases + chartStoreArgoCdAppInfo := adapter.GetDevtronArgoCdAppInfo(chartStoreArgoAppName, clusterBean.DefaultClusterId, impl.acdAuthConfig.ACDConfigMapNamespace) + allDevtronManagedArgoAppsInfo = append(allDevtronManagedArgoAppsInfo, chartStoreArgoCdAppInfo) } - return ConvertDeploymentConfigDbObjToDTO(helmDeploymentConfig), nil + return allDevtronManagedArgoAppsInfo, nil } -func (impl *DeploymentConfigServiceImpl) IsChartStoreAppManagedByArgoCd(appId int) (bool, error) { - deploymentAppType, err := impl.deploymentConfigRepository.GetDeploymentAppTypeForChartStoreAppByAppId(appId) - if err != nil && !util2.IsErrNoRows(err) { - impl.logger.Errorw("error in GetDeploymentAppTypeForChartStoreAppByAppId", "appId", appId, "err", err) - return false, err - } else if util2.IsErrNoRows(err) { - return impl.installedAppReadService.IsChartStoreAppManagedByArgoCd(appId) +func (impl *DeploymentConfigServiceImpl) GetExternalReleaseType(appId, environmentId int) (bean.ExternalReleaseType, error) { + config, err := impl.GetConfigForDevtronApps(appId, environmentId) + if err != nil && !errors.Is(err, pg.ErrNoRows) { + impl.logger.Errorw("error in getting deployment config by appId and envId", "appId", appId, "envId", environmentId, "err", err) + return bean.Undefined, err + } else if errors.Is(err, pg.ErrNoRows) { + return bean.Undefined, nil } - return util2.IsAcdApp(deploymentAppType), nil + externalHelmReleaseType, _ := config.GetMigratedFrom() + return externalHelmReleaseType, nil } -func (impl *DeploymentConfigServiceImpl) GetConfigEvenIfInactive(appId, envId int) (*bean.DeploymentConfig, error) { - config, err := impl.deploymentConfigRepository.GetByAppIdAndEnvIdEvenIfInactive(appId, envId) +func (impl *DeploymentConfigServiceImpl) CheckIfURLAlreadyPresent(repoURL string) (bool, error) { + //TODO: optimisation + configs, err := impl.getAllAppLevelConfigsWithCustomGitOpsURL() if err != nil { - impl.logger.Errorw("error in getting deployment config by appId and envId", "appId", appId, "envId", envId, "err", err) - return nil, err + impl.logger.Errorw("error in getting all configs", "err", err) + return false, err } - return ConvertDeploymentConfigDbObjToDTO(config), nil + for _, dc := range configs { + if dc.GetRepoURL() == repoURL { + impl.logger.Warnw("repository is already in use for helm app", "repoUrl", repoURL) + return true, nil + } + } + return false, nil } -func (impl *DeploymentConfigServiceImpl) GetAndMigrateConfigIfAbsentForHelmApp(appId, envId int) (*bean.DeploymentConfig, error) { +func (impl *DeploymentConfigServiceImpl) FilterPipelinesByApplicationClusterIdAndNamespace(pipelines []pipelineConfig.Pipeline, applicationObjectClusterId int, applicationObjectNamespace string) (pipelineConfig.Pipeline, error) { + pipeline := pipelineConfig.Pipeline{} + for _, p := range pipelines { + dc, err := impl.GetConfigForDevtronApps(p.AppId, p.EnvironmentId) + if err != nil { + impl.logger.Errorw("error, GetConfigForDevtronApps", "appId", p.AppId, "environmentId", p.EnvironmentId, "err", err) + return pipeline, err + } + if dc.GetApplicationObjectClusterId() == applicationObjectClusterId && + dc.GetApplicationObjectNamespace() == applicationObjectNamespace { + return p, nil + } + } + return pipeline, commonErr.PipelineNotFoundError +} - helmDeploymentConfig, err := impl.deploymentConfigRepository.GetByAppIdAndEnvId(appId, envId) - if err != nil && err != pg.ErrNoRows { +func (impl *DeploymentConfigServiceImpl) getConfigForHelmApps(appId int, envId int, migrateIfAbsent bool) (*bean.DeploymentConfig, error) { + var ( + helmDeploymentConfig *bean.DeploymentConfig + isMigrationNeeded bool + ) + config, err := impl.deploymentConfigRepository.GetByAppIdAndEnvId(appId, envId) + if err != nil && !errors.Is(err, pg.ErrNoRows) { impl.logger.Errorw("error in fetching deployment config by by appId and envId", "appId", appId, "envId", envId, "err", err) return nil, err - } - - if err == pg.ErrNoRows { - helmDeploymentConfig, err = impl.migrateHelmAppDataToDeploymentConfig(appId, envId) + } else if errors.Is(err, pg.ErrNoRows) { + isMigrationNeeded = true + helmDeploymentConfig, err = impl.parseDeploymentConfigForHelmApps(appId, envId) if err != nil { - impl.logger.Errorw("error in migrating helm deployment config", "appId", appId, "envId", envId, "err", err) + impl.logger.Errorw("error in parsing helm deployment config", "appId", appId, "envId", envId, "err", err) return nil, err } - } - - if !impl.deploymentServiceTypeConfig.UseDeploymentConfigData { - configFromOldData, err := impl.parseConfigForHelmApps(appId, envId) + } else { + helmDeploymentConfig, err = adapter.ConvertDeploymentConfigDbObjToDTO(config) if err != nil { - impl.logger.Errorw("error in parsing config from charts and pipeline repository", "appId", appId, "envId", envId, "err", err) + impl.logger.Errorw("error in converting helm deployment config dbObj to DTO", "appId", appId, "envId", envId, "err", err) return nil, err } - return ConvertDeploymentConfigDbObjToDTO(configFromOldData), nil - } - - return ConvertDeploymentConfigDbObjToDTO(helmDeploymentConfig), nil -} - -func (impl *DeploymentConfigServiceImpl) migrateHelmAppDataToDeploymentConfig(appId, envId int) (*deploymentConfig.DeploymentConfig, error) { - - helmDeploymentConfig, err := impl.parseConfigForHelmApps(appId, envId) - if err != nil { - impl.logger.Errorw("error in parsing deployment config for helm app", "appId", appId, "envId", envId, "err", err) - return helmDeploymentConfig, err + if helmDeploymentConfig.ReleaseConfiguration == nil || len(helmDeploymentConfig.ReleaseConfiguration.Version) == 0 { + isMigrationNeeded = true + releaseConfig, err := impl.parseReleaseConfigForHelmApps(appId, envId, helmDeploymentConfig) + if err != nil { + impl.logger.Errorw("error in parsing release config", "appId", appId, "envId", envId, "err", err) + return nil, err + } + helmDeploymentConfig.ReleaseConfiguration = releaseConfig + } } - - helmDeploymentConfig.CreateAuditLog(bean3.SYSTEM_USER_ID) - helmDeploymentConfig, err = impl.deploymentConfigRepository.Save(nil, helmDeploymentConfig) - if err != nil { - impl.logger.Errorw("error in saving deployment config for helm app", "appId", appId, "envId", envId, "err", err) - return nil, err + if migrateIfAbsent && isMigrationNeeded { + _, err = impl.CreateOrUpdateConfig(nil, helmDeploymentConfig, bean3.SYSTEM_USER_ID) + if err != nil { + impl.logger.Errorw("error in creating helm deployment config ", "appId", appId, "envId", envId, "err", err) + return nil, err + } } - return helmDeploymentConfig, nil + return helmDeploymentConfig, err } -func (impl *DeploymentConfigServiceImpl) parseConfigForHelmApps(appId int, envId int) (*deploymentConfig.DeploymentConfig, error) { +func (impl *DeploymentConfigServiceImpl) parseDeploymentConfigForHelmApps(appId int, envId int) (*bean.DeploymentConfig, error) { installedApp, err := impl.installedAppReadService.GetInstalledAppsByAppId(appId) if err != nil { impl.logger.Errorw("error in getting installed app by appId", "appId", appId, "err", err) @@ -462,100 +491,160 @@ func (impl *DeploymentConfigServiceImpl) parseConfigForHelmApps(appId int, envId if installedApp.EnvironmentId != envId { return nil, pg.ErrNoRows } - helmDeploymentConfig := &deploymentConfig.DeploymentConfig{ + helmDeploymentConfig := &bean.DeploymentConfig{ AppId: appId, EnvironmentId: envId, DeploymentAppType: installedApp.DeploymentAppType, - ConfigType: GetDeploymentConfigType(installedApp.IsCustomRepository), - RepoUrl: installedApp.GitOpsRepoUrl, - RepoName: installedApp.GitOpsRepoName, + ConfigType: adapter.GetDeploymentConfigType(installedApp.IsCustomRepository), + RepoURL: installedApp.GitOpsRepoUrl, Active: true, } - return helmDeploymentConfig, nil -} - -func (impl *DeploymentConfigServiceImpl) parseFromOldTablesForDevtronApps(appId, envId int) (*bean.DeploymentConfig, error) { - appLevelConfig, err := impl.parseAppLevelConfigForDevtronApps(appId) + releaseConfig, err := impl.parseReleaseConfigForHelmApps(appId, envId, helmDeploymentConfig) if err != nil { - impl.logger.Errorw("error in parsing charts data to deployment config", "appId", appId, "err", err) return nil, err } - if envId > 0 { - appAndEnvLevelConfig, err := impl.parseEnvLevelConfigForDevtronApps(appLevelConfig, appId, envId) + helmDeploymentConfig.ReleaseConfiguration = releaseConfig + return helmDeploymentConfig, nil +} + +func (impl *DeploymentConfigServiceImpl) parseReleaseConfigForHelmApps(appId int, envId int, config *bean.DeploymentConfig) (*bean.ReleaseConfiguration, error) { + releaseConfig := &bean.ReleaseConfiguration{} + if config.DeploymentAppType == bean4.PIPELINE_DEPLOYMENT_TYPE_ACD { + releaseConfig.Version = bean.Version + app, err := impl.appRepository.FindById(appId) + if err != nil { + impl.logger.Errorw("error in getting app by id", "appId", appId, "err", err) + return nil, err + } + env, err := impl.environmentRepository.FindById(envId) if err != nil { - impl.logger.Errorw("error in parsing env level config to deployment config", "appId", appId, "err", err) + impl.logger.Errorw("error in getting installed app by environmentId", "appId", appId, "envId", envId, "err", err) return nil, err } - return ConvertDeploymentConfigDbObjToDTO(appAndEnvLevelConfig), nil + + var gitRepoURL string + if len(config.RepoURL) > 0 { + gitRepoURL = config.RepoURL + } else { + installedApp, err := impl.installedAppReadService.GetInstalledAppsByAppId(appId) + if err != nil { + impl.logger.Errorw("error in getting installed app by appId", "appId", appId, "err", err) + return nil, err + } + gitRepoURL = installedApp.GitOpsRepoUrl + } + + releaseConfig = &bean.ReleaseConfiguration{ + Version: bean.Version, + ArgoCDSpec: bean.ArgoCDSpec{ + Metadata: bean.ApplicationMetadata{ + ClusterId: clusterBean.DefaultClusterId, + Namespace: argocdServer.DevtronInstalationNs, + }, + Spec: bean.ApplicationSpec{ + Destination: &bean.Destination{ + Namespace: env.Namespace, + Server: commonBean.DefaultClusterUrl, + }, + Source: &bean.ApplicationSource{ + RepoURL: gitRepoURL, + Path: util.BuildDeployedAppName(app.AppName, env.Name), + Helm: &bean.ApplicationSourceHelm{ + ValueFiles: []string{"values.yaml"}, + }, + TargetRevision: util.GetDefaultTargetRevision(), + }, + }, + }, + } } - return ConvertDeploymentConfigDbObjToDTO(appLevelConfig), nil + return releaseConfig, nil } -func (impl *DeploymentConfigServiceImpl) GetAppLevelConfigForDevtronApp(appId int) (*bean.DeploymentConfig, error) { - appLevelConfigDbObj, err := impl.deploymentConfigRepository.GetAppLevelConfigForDevtronApps(appId) - if err != nil && err != pg.ErrNoRows { - impl.logger.Errorw("error in getting deployment config db object by appId", "appId", appId, "err", err) +func (impl *DeploymentConfigServiceImpl) getAllAppLevelConfigsWithCustomGitOpsURL() ([]*bean.DeploymentConfig, error) { + dbConfigs, err := impl.deploymentConfigRepository.GetAllConfigsForActiveApps() + if err != nil { + impl.logger.Errorw("error in getting all configs with custom gitops url", "err", err) return nil, err } - if err == pg.ErrNoRows { - impl.logger.Infow("app level deployment config not found, migrating data from charts to deployment_config", "appId", appId, "err", err) - appLevelConfigDbObj, err = impl.migrateChartsDataToDeploymentConfig(appId) + var configs []*bean.DeploymentConfig + for _, dbConfig := range dbConfigs { + config, err := adapter.ConvertDeploymentConfigDbObjToDTO(dbConfig) if err != nil { - impl.logger.Errorw("error in migrating app level config to deployment config", "appId", appId, "err", err) + impl.logger.Error("error in converting dbObj to dto", "err", err) return nil, err } + configs = append(configs, config) } - return ConvertDeploymentConfigDbObjToDTO(appLevelConfigDbObj), nil + return configs, nil } -func (impl *DeploymentConfigServiceImpl) UpdateRepoUrlForAppAndEnvId(repoURL string, appId, envId int) error { - err := impl.deploymentConfigRepository.UpdateRepoUrlByAppIdAndEnvId(repoURL, appId, envId) +func (impl *DeploymentConfigServiceImpl) getAllEnvLevelConfigsForLinkedReleases() ([]*bean.DeploymentConfig, error) { + dbConfigs, err := impl.deploymentConfigRepository.GetAllEnvLevelConfigsWithReleaseMode(util2.PIPELINE_RELEASE_MODE_LINK) if err != nil { - impl.logger.Errorw("error in updating repoUrl by app-id and env-id", "appId", appId, "envId", envId, "err", err) - return err + impl.logger.Errorw("error in getting all env level configs with custom gitops url", "err", err) + return nil, err } - return nil + configs := make([]*bean.DeploymentConfig, 0) + for _, dbConfig := range dbConfigs { + config, err := adapter.ConvertDeploymentConfigDbObjToDTO(dbConfig) + if err != nil { + impl.logger.Error("error in converting dbObj to dto", "err", err) + return nil, err + } + configs = append(configs, config) + } + return configs, nil } -func (impl *DeploymentConfigServiceImpl) GetDeploymentAppTypeForCDInBulk(pipelines []*pipelineConfig.Pipeline) (map[int]string, error) { - resp := make(map[int]string, len(pipelines)) //map of pipelineId and deploymentAppType - if impl.deploymentServiceTypeConfig.UseDeploymentConfigData { - appIdEnvIdMapping := make(map[int][]int, len(pipelines)) - appIdEnvIdKeyPipelineIdMap := make(map[string]int, len(pipelines)) - for _, pipeline := range pipelines { - appIdEnvIdMapping[pipeline.AppId] = append(appIdEnvIdMapping[pipeline.AppId], pipeline.EnvironmentId) - appIdEnvIdKeyPipelineIdMap[fmt.Sprintf("%d-%d", pipeline.AppId, pipeline.EnvironmentId)] = pipeline.Id - } - configs, err := impl.deploymentConfigRepository.GetAppAndEnvLevelConfigsInBulk(appIdEnvIdMapping) +func (impl *DeploymentConfigServiceImpl) GetConfigDBObj(appId, envId int) (*deploymentConfig.DeploymentConfig, error) { + var configDbObj *deploymentConfig.DeploymentConfig + var err error + if envId == 0 { + configDbObj, err = impl.deploymentConfigRepository.GetAppLevelConfigForDevtronApps(appId) if err != nil { - impl.logger.Errorw("error, GetAppAndEnvLevelConfigsInBulk", "appIdEnvIdMapping", appIdEnvIdMapping, "err", err) + impl.logger.Errorw("error in getting deployment config db object by appId", "appId", appId, "err", err) return nil, err } - for _, config := range configs { - pipelineId := appIdEnvIdKeyPipelineIdMap[fmt.Sprintf("%d-%d", config.AppId, config.EnvironmentId)] - resp[pipelineId] = config.DeploymentAppType + } else { + configDbObj, err = impl.deploymentConfigRepository.GetByAppIdAndEnvId(appId, envId) + if err != nil { + impl.logger.Errorw("error in getting deployment config db object by appId and envId", "appId", appId, "envId", envId, "err", err) + return nil, err } } - for _, pipeline := range pipelines { - if _, ok := resp[pipeline.Id]; !ok { //not found in map, either flag is disabled or config not migrated yet. Getting from old data - resp[pipeline.Id] = pipeline.DeploymentAppType + return configDbObj, nil +} + +func (impl *DeploymentConfigServiceImpl) getAppLevelConfigForDevtronApps(appId int, migrateDataIfAbsent bool) (*bean.DeploymentConfig, error) { + appLevelConfig, isMigrationNeeded, err := impl.deploymentConfigReadService.GetDeploymentConfigForApp(appId) + if err != nil { + impl.logger.Errorw("error in getting app level Config for devtron apps", "appId", appId, "err", err) + return nil, err + } + if migrateDataIfAbsent && isMigrationNeeded { + _, err := impl.CreateOrUpdateConfig(nil, appLevelConfig, bean3.SYSTEM_USER_ID) + if err != nil { + impl.logger.Errorw("error in migrating app level config to deployment config", "appId", appId, "err", err) + return nil, err } } - return resp, nil + return appLevelConfig, nil + } -func (impl *DeploymentConfigServiceImpl) GetConfigsByAppIds(appIds []int) ([]*bean.DeploymentConfig, error) { - if len(appIds) == 0 { - return nil, nil - } - configs, err := impl.deploymentConfigRepository.GetConfigByAppIds(appIds) +func (impl *DeploymentConfigServiceImpl) getEnvLevelDataForDevtronApps(appId, envId int, appLevelConfig *bean.DeploymentConfig, migrateDataIfAbsent bool) (*bean.DeploymentConfig, error) { + envLevelConfig, isMigrationNeeded, err := impl.deploymentConfigReadService.GetDeploymentConfigForAppAndEnv(appLevelConfig, appId, envId) if err != nil { - impl.logger.Errorw("error in getting deployment config db object by appIds", "appIds", appIds, "err", err) + impl.logger.Errorw("error in getting env level data for devtron apps", "appId", appId, "envId", envId, "appLevelConfig", appLevelConfig, "err", err) return nil, err } - resp := make([]*bean.DeploymentConfig, 0, len(configs)) - for _, config := range configs { - resp = append(resp, ConvertDeploymentConfigDbObjToDTO(config)) + if migrateDataIfAbsent && isMigrationNeeded { + _, err := impl.CreateOrUpdateConfig(nil, envLevelConfig, bean3.SYSTEM_USER_ID) + if err != nil { + impl.logger.Errorw("error in migrating env level config to deployment config", "appId", appId, "envId", envId, "err", err) + return nil, err + } } - return resp, nil + return envLevelConfig, nil } diff --git a/pkg/deployment/common/errors/errors.go b/pkg/deployment/common/errors/errors.go new file mode 100644 index 0000000000..b249869939 --- /dev/null +++ b/pkg/deployment/common/errors/errors.go @@ -0,0 +1,5 @@ +package errors + +import "errors" + +var PipelineNotFoundError = errors.New("pipeline not found") diff --git a/pkg/deployment/common/helper.go b/pkg/deployment/common/helper.go index 46e553146b..3225c59659 100644 --- a/pkg/deployment/common/helper.go +++ b/pkg/deployment/common/helper.go @@ -4,13 +4,6 @@ import ( "github.com/devtron-labs/devtron/pkg/deployment/common/bean" ) -func GetDeploymentConfigType(isCustomGitOpsRepo bool) string { - if isCustomGitOpsRepo { - return string(bean.CUSTOM) - } - return string(bean.SYSTEM_GENERATED) -} - func IsCustomGitOpsRepo(deploymentConfigType string) bool { return deploymentConfigType == bean.CUSTOM.String() } diff --git a/pkg/deployment/common/read/deploymentConfigReadService.go b/pkg/deployment/common/read/deploymentConfigReadService.go index e7a97a29c9..f8c5d8254d 100644 --- a/pkg/deployment/common/read/deploymentConfigReadService.go +++ b/pkg/deployment/common/read/deploymentConfigReadService.go @@ -1,16 +1,413 @@ package read import ( + "fmt" + "github.com/devtron-labs/common-lib/utils/k8s/commonBean" + apiGitOpsBean "github.com/devtron-labs/devtron/api/bean/gitOps" + "github.com/devtron-labs/devtron/client/argocdServer" + "github.com/devtron-labs/devtron/internal/sql/repository/app" "github.com/devtron-labs/devtron/internal/sql/repository/deploymentConfig" + "github.com/devtron-labs/devtron/internal/sql/repository/pipelineConfig" + interalUtil "github.com/devtron-labs/devtron/internal/util" + serviceBean "github.com/devtron-labs/devtron/pkg/bean" + chartRepoRepository "github.com/devtron-labs/devtron/pkg/chartRepo/repository" + clusterBean "github.com/devtron-labs/devtron/pkg/cluster/bean" + "github.com/devtron-labs/devtron/pkg/cluster/environment/repository" + "github.com/devtron-labs/devtron/pkg/deployment/common/adapter" "github.com/devtron-labs/devtron/pkg/deployment/common/bean" + "github.com/devtron-labs/devtron/pkg/deployment/manifest/deploymentTemplate/read" + "github.com/devtron-labs/devtron/pkg/deployment/trigger/devtronApps/helper" + "github.com/devtron-labs/devtron/util" + "github.com/juju/errors" "go.uber.org/zap" + "path/filepath" ) type DeploymentConfigReadService interface { - GetByAppIdAndEnvId(appId, envId int) (*bean.DeploymentConfig, error) + GetDeploymentConfigMinForAppAndEnv(appId, envId int) (*bean.DeploymentConfigMin, error) + GetDeploymentAppTypeForCDInBulk(pipelines []*serviceBean.CDPipelineMinConfig, appIdToGitOpsConfiguredMap map[int]bool) (map[int]*bean.DeploymentConfigMin, error) + + GetDeploymentConfigForApp(appId int) (*bean.DeploymentConfig, bool, error) + GetDeploymentConfigForAppAndEnv(appLevelConfig *bean.DeploymentConfig, appId, envId int) (*bean.DeploymentConfig, bool, error) + ParseEnvLevelReleaseConfigForDevtronApp(config *bean.DeploymentConfig, appId int, envId int) (*bean.ReleaseConfiguration, error) } type DeploymentConfigReadServiceImpl struct { - deploymentConfigRepository deploymentConfig.Repository - logger *zap.SugaredLogger + logger *zap.SugaredLogger + deploymentConfigRepository deploymentConfig.Repository + deploymentServiceTypeConfig *util.DeploymentServiceTypeConfig + chartRepository chartRepoRepository.ChartRepository + pipelineRepository pipelineConfig.PipelineRepository + appRepository app.AppRepository + environmentRepository repository.EnvironmentRepository + envConfigOverrideService read.EnvConfigOverrideService +} + +func NewDeploymentConfigReadServiceImpl(logger *zap.SugaredLogger, + deploymentConfigRepository deploymentConfig.Repository, + envVariables *util.EnvironmentVariables, + chartRepository chartRepoRepository.ChartRepository, + pipelineRepository pipelineConfig.PipelineRepository, + appRepository app.AppRepository, + environmentRepository repository.EnvironmentRepository, + envConfigOverrideService read.EnvConfigOverrideService, +) *DeploymentConfigReadServiceImpl { + return &DeploymentConfigReadServiceImpl{ + logger: logger, + deploymentConfigRepository: deploymentConfigRepository, + deploymentServiceTypeConfig: envVariables.DeploymentServiceTypeConfig, + chartRepository: chartRepository, + pipelineRepository: pipelineRepository, + appRepository: appRepository, + environmentRepository: environmentRepository, + envConfigOverrideService: envConfigOverrideService, + } +} + +func (impl *DeploymentConfigReadServiceImpl) GetDeploymentConfigMinForAppAndEnv(appId, envId int) (*bean.DeploymentConfigMin, error) { + deploymentDetail := &bean.DeploymentConfigMin{} + configBean, err := impl.getDeploymentConfigMinForAppAndEnv(appId, envId) + if err != nil && !interalUtil.IsErrNoRows(err) { + impl.logger.Errorw("error in getting deployment config by appId and envId", "appId", appId, "envId", envId, "err", err) + return deploymentDetail, err + } else if interalUtil.IsErrNoRows(err) { + // case: deployment config data not found in app level or env level + // this means deployment template is not yet created for this app and env + deploymentDetail.ReleaseMode = interalUtil.PIPELINE_RELEASE_MODE_CREATE + return deploymentDetail, nil + } + if configBean != nil { + deploymentDetail.DeploymentAppType = configBean.DeploymentAppType + deploymentDetail.ReleaseMode = configBean.ReleaseMode + deploymentDetail.GitRepoUrl = configBean.GetRepoURL() + deploymentDetail.IsGitOpsRepoConfigured = !apiGitOpsBean.IsGitOpsRepoNotConfigured(configBean.GetRepoURL()) + } + return deploymentDetail, nil +} + +func (impl *DeploymentConfigReadServiceImpl) GetDeploymentAppTypeForCDInBulk(pipelines []*serviceBean.CDPipelineMinConfig, appIdToGitOpsConfiguredMap map[int]bool) (map[int]*bean.DeploymentConfigMin, error) { + resp := make(map[int]*bean.DeploymentConfigMin, len(pipelines)) //map of pipelineId and deploymentAppType + appIdEnvIdMapping := make(map[int][]int, len(pipelines)) + appIdEnvIdKeyPipelineIdMap := make(map[string]int, len(pipelines)) + for _, pipeline := range pipelines { + appIdEnvIdMapping[pipeline.AppId] = append(appIdEnvIdMapping[pipeline.AppId], pipeline.EnvironmentId) + appIdEnvIdKeyPipelineIdMap[fmt.Sprintf("%d-%d", pipeline.AppId, pipeline.EnvironmentId)] = pipeline.Id + } + configs, err := impl.deploymentConfigRepository.GetAppAndEnvLevelConfigsInBulk(appIdEnvIdMapping) + if err != nil { + impl.logger.Errorw("error, GetAppAndEnvLevelConfigsInBulk", "appIdEnvIdMapping", appIdEnvIdMapping, "err", err) + return nil, err + } + for _, config := range configs { + configBean, err := adapter.ConvertDeploymentConfigDbObjToDTO(config) + if err != nil { + impl.logger.Errorw("error, ConvertDeploymentConfigDbObjToDTO", "config", config, "err", err) + return nil, err + } + pipelineId := appIdEnvIdKeyPipelineIdMap[fmt.Sprintf("%d-%d", configBean.AppId, configBean.EnvironmentId)] + isGitOpsRepoConfigured := configBean.IsPipelineGitOpsRepoConfigured(appIdToGitOpsConfiguredMap[configBean.AppId]) + resp[pipelineId] = adapter.NewDeploymentConfigMin(configBean.DeploymentAppType, configBean.ReleaseMode, isGitOpsRepoConfigured) + } + for _, pipeline := range pipelines { + if _, ok := resp[pipeline.Id]; !ok { + isGitOpsRepoConfigured := appIdToGitOpsConfiguredMap[pipeline.AppId] + // not found in map, either flag is disabled or config not migrated yet. Getting from old data + resp[pipeline.Id] = adapter.NewDeploymentConfigMin(pipeline.DeploymentAppType, interalUtil.PIPELINE_RELEASE_MODE_CREATE, isGitOpsRepoConfigured) + } + } + return resp, nil +} + +func (impl *DeploymentConfigReadServiceImpl) GetDeploymentConfigForApp(appId int) (*bean.DeploymentConfig, bool, error) { + var ( + appLevelConfig *bean.DeploymentConfig + isMigrationNeeded bool + ) + appLevelConfigDbObj, err := impl.deploymentConfigRepository.GetAppLevelConfigForDevtronApps(appId) + if err != nil && !interalUtil.IsErrNoRows(err) { + impl.logger.Errorw("error in getting deployment config db object by appId", "appId", appId, "err", err) + return appLevelConfig, isMigrationNeeded, err + } else if interalUtil.IsErrNoRows(err) { + isMigrationNeeded = true + appLevelConfig, err = impl.parseAppLevelMigrationDataForDevtronApps(appId) + if err != nil { + impl.logger.Errorw("error in migrating app level config to deployment config", "appId", appId, "err", err) + return appLevelConfig, isMigrationNeeded, err + } + } else { + appLevelConfig, err = adapter.ConvertDeploymentConfigDbObjToDTO(appLevelConfigDbObj) + if err != nil { + impl.logger.Errorw("error in converting deployment config db object", "appId", appId, "err", err) + return appLevelConfig, isMigrationNeeded, err + } + if appLevelConfig.ReleaseConfiguration == nil || len(appLevelConfig.ReleaseConfiguration.Version) == 0 { + isMigrationNeeded = true + releaseConfig, err := impl.parseAppLevelReleaseConfigForDevtronApp(appId, appLevelConfig) + if err != nil { + impl.logger.Errorw("error in parsing release configuration for app", "appId", appId, "err", err) + return appLevelConfig, isMigrationNeeded, err + } + appLevelConfig.ReleaseConfiguration = releaseConfig + } + } + return appLevelConfig, isMigrationNeeded, nil +} + +func (impl *DeploymentConfigReadServiceImpl) GetDeploymentConfigForAppAndEnv(appLevelConfig *bean.DeploymentConfig, appId, envId int) (*bean.DeploymentConfig, bool, error) { + var ( + envLevelConfig *bean.DeploymentConfig + isMigrationNeeded bool + ) + appAndEnvLevelConfigDBObj, err := impl.deploymentConfigRepository.GetByAppIdAndEnvId(appId, envId) + if err != nil && !interalUtil.IsErrNoRows(err) { + impl.logger.Errorw("error in getting deployment config db object by appId and envId", "appId", appId, "envId", envId, "err", err) + return envLevelConfig, isMigrationNeeded, err + } else if interalUtil.IsErrNoRows(err) { + // case: deployment config data is not yet migrated + envLevelConfig, err = impl.parseEnvLevelMigrationDataForDevtronApps(appLevelConfig, appId, envId) + if err != nil { + impl.logger.Errorw("error in parsing env level config to deployment config", "appId", appId, "envId", envId, "err", err) + return envLevelConfig, isMigrationNeeded, err + } + isMigrationNeeded = true + } else { + envLevelConfig, err = adapter.ConvertDeploymentConfigDbObjToDTO(appAndEnvLevelConfigDBObj) + if err != nil { + impl.logger.Errorw("error in converting deployment config db object", "appId", appId, "envId", envId, "err", err) + return envLevelConfig, isMigrationNeeded, err + } + // case: deployment config is migrated; but release config is absent. + if envLevelConfig.ReleaseConfiguration == nil || len(envLevelConfig.ReleaseConfiguration.Version) == 0 { + isMigrationNeeded = true + releaseConfig, err := impl.ParseEnvLevelReleaseConfigForDevtronApp(envLevelConfig, appId, envId) + if err != nil { + impl.logger.Errorw("error in parsing env level release config", "appId", appId, "envId", envId, "err", err) + return envLevelConfig, isMigrationNeeded, err + } + envLevelConfig.ReleaseConfiguration = releaseConfig + } + } + var isRepoUrlUpdated bool + envLevelConfig, isRepoUrlUpdated, err = impl.configureEnvURLByAppURLIfNotConfigured(envLevelConfig, appLevelConfig.GetRepoURL()) + if err != nil { + impl.logger.Errorw("error in configuring env level url with app url", "appId", appId, "envId", envId, "err", err) + return envLevelConfig, isMigrationNeeded, err + } + if isRepoUrlUpdated { + isMigrationNeeded = true + } + return envLevelConfig, isMigrationNeeded, nil +} + +func (impl *DeploymentConfigReadServiceImpl) getDeploymentConfigMinForAppAndEnv(appId, envId int) (*bean.DeploymentConfig, error) { + appLevelConfig, err := impl.getAppLevelConfigForDevtronApps(appId) + if err != nil { + impl.logger.Errorw("error in getting app level config for devtron apps", "appId", appId, "envId", envId, "err", err) + return nil, err + } + + if envId > 0 { + // if envId > 0 then only env level config will be returned, + // for getting app level config envId should be zero + envLevelConfig, err := impl.getEnvLevelDataForDevtronApps(appId, envId, appLevelConfig) + if err != nil { + impl.logger.Errorw("error in getting env level data for devtron apps", "appId", appId, "envId", envId, "err", err) + return nil, err + } + return envLevelConfig, nil + } + return appLevelConfig, nil +} + +func (impl *DeploymentConfigReadServiceImpl) getAppLevelConfigForDevtronApps(appId int) (*bean.DeploymentConfig, error) { + appLevelConfig, _, err := impl.GetDeploymentConfigForApp(appId) + if err != nil { + impl.logger.Errorw("error in getting app level Config for devtron apps", "appId", appId, "err", err) + return nil, err + } + return appLevelConfig, nil +} + +func (impl *DeploymentConfigReadServiceImpl) getEnvLevelDataForDevtronApps(appId, envId int, appLevelConfig *bean.DeploymentConfig) (*bean.DeploymentConfig, error) { + envLevelConfig, _, err := impl.GetDeploymentConfigForAppAndEnv(appLevelConfig, appId, envId) + if err != nil { + impl.logger.Errorw("error in getting env level data for devtron apps", "appId", appId, "envId", envId, "appLevelConfig", appLevelConfig, "err", err) + return nil, err + } + return envLevelConfig, nil +} + +func (impl *DeploymentConfigReadServiceImpl) configureEnvURLByAppURLIfNotConfigured(appAndEnvLevelConfig *bean.DeploymentConfig, appLevelURL string) (*bean.DeploymentConfig, bool, error) { + /* + if custom gitOps is configured in repo + and app is cloned then cloned pipelines repo URL=NOT_CONFIGURED . + In this case User manually configures repoURL. The configured repo_url is saved in app level config but is absent + in env level config. + */ + var isRepoUrlUpdated bool + if apiGitOpsBean.IsGitOpsRepoNotConfigured(appAndEnvLevelConfig.GetRepoURL()) && + apiGitOpsBean.IsGitOpsRepoConfigured(appLevelURL) { + // if url is present at app level and not at env level then copy app level url to env level config + appAndEnvLevelConfig.SetRepoURL(appLevelURL) + // if url is updated then set isRepoUrlUpdated = true + isRepoUrlUpdated = true + return appAndEnvLevelConfig, isRepoUrlUpdated, nil + } + return appAndEnvLevelConfig, isRepoUrlUpdated, nil +} + +func (impl *DeploymentConfigReadServiceImpl) parseEnvLevelMigrationDataForDevtronApps(appLevelConfig *bean.DeploymentConfig, appId, envId int) (*bean.DeploymentConfig, error) { + /* + We can safely assume that no link argoCD pipeline is created if migration is happening + migration case, default values for below fields will be => + 1) repoUrl => same as app level url + 2) chartLocation => we should fetch active envConfigOverride and use chart path from that + 3) valuesFile => _-values.yaml + 4) branch => master + 5) releaseMode => create + 6) Default ClusterId for application object => 1 + 7) Default Namespace for application object => devtroncd + */ + config := &bean.DeploymentConfig{ + AppId: appId, + EnvironmentId: envId, + ConfigType: appLevelConfig.ConfigType, + ReleaseMode: interalUtil.PIPELINE_RELEASE_MODE_CREATE, + Active: true, + } + + deploymentAppType, err := impl.pipelineRepository.FindDeploymentAppTypeByAppIdAndEnvId(appId, envId) + if err != nil { + impl.logger.Errorw("error in getting deployment app type by appId and envId", "appId", appId, "envId", envId, "err", err) + return nil, err + } + config.DeploymentAppType = deploymentAppType + + releaseConfig, err := impl.ParseEnvLevelReleaseConfigForDevtronApp(config, appId, envId) + if err != nil { + impl.logger.Errorw("error in parsing env level release config", "appId", appId, "envId", envId, "err", err) + return nil, err + } + config.ReleaseConfiguration = releaseConfig + + config.RepoURL = config.GetRepoURL() //for backward compatibility + + return config, nil +} + +func (impl *DeploymentConfigReadServiceImpl) getConfigMetaDataForAppAndEnv(appId int, envId int) (environmentId int, deploymentAppName, namespace string, err error) { + pipelineModels, err := impl.pipelineRepository.FindActiveByAppIdAndEnvironmentId(appId, envId) + if err != nil { + impl.logger.Errorw("error in getting deployment app type by appId and envId", "appId", appId, "envId", envId, "err", err) + return environmentId, deploymentAppName, namespace, err + } else if len(pipelineModels) > 1 { + impl.logger.Errorw("error, multiple pipelines found for app and env", "appId", appId, "envId", envId, "pipelineModels", pipelineModels) + return environmentId, deploymentAppName, namespace, errors.New("multiple pipelines found for app and env") + } else if len(pipelineModels) == 0 { + appModel, err := impl.appRepository.FindById(appId) + if err != nil { + impl.logger.Errorw("error in fetch app", "appId", appId, "err", err) + return environmentId, deploymentAppName, namespace, err + } + envModel, err := impl.environmentRepository.FindById(envId) + if err != nil { + impl.logger.Errorw("error in finding environment by id", "envId", envId, "err", err) + return environmentId, deploymentAppName, namespace, err + } + deploymentAppName = util.BuildDeployedAppName(appModel.AppName, envModel.Name) + environmentId = envModel.Id + namespace = envModel.Namespace + } else { + pipelineModel := pipelineModels[0] + deploymentAppName = pipelineModel.DeploymentAppName + environmentId = pipelineModel.EnvironmentId + namespace = pipelineModel.Environment.Namespace + } + return environmentId, deploymentAppName, namespace, nil +} + +func (impl *DeploymentConfigReadServiceImpl) ParseEnvLevelReleaseConfigForDevtronApp(config *bean.DeploymentConfig, appId int, envId int) (*bean.ReleaseConfiguration, error) { + releaseConfig := &bean.ReleaseConfiguration{} + if config.DeploymentAppType == interalUtil.PIPELINE_DEPLOYMENT_TYPE_ACD { + releaseConfig.Version = bean.Version + envOverride, err := impl.envConfigOverrideService.FindLatestChartForAppByAppIdAndEnvId(appId, envId) + if err != nil && !errors.IsNotFound(err) { + impl.logger.Errorw("error in fetch") + return nil, err + } + var latestChart *chartRepoRepository.Chart + if !envOverride.IsOverridden() { + latestChart, err = impl.chartRepository.FindLatestChartForAppByAppId(appId) + if err != nil { + return nil, err + } + } else { + // if chart is overrides in env, it means it may have different version than app level. + latestChart = envOverride.Chart + } + gitRepoUrl := latestChart.GitRepoUrl + if len(config.RepoURL) > 0 { + gitRepoUrl = config.RepoURL + } + environmentId, deploymentAppName, namespace, err := impl.getConfigMetaDataForAppAndEnv(appId, envId) + if err != nil { + impl.logger.Errorw("error in getting config meta data for app and env", "appId", appId, "envId", envId, "err", err) + return nil, err + } + releaseConfig.ArgoCDSpec = bean.ArgoCDSpec{ + Metadata: bean.ApplicationMetadata{ + Name: deploymentAppName, + ClusterId: clusterBean.DefaultClusterId, + Namespace: argocdServer.DevtronInstalationNs, + }, + Spec: bean.ApplicationSpec{ + Source: &bean.ApplicationSource{ + RepoURL: gitRepoUrl, + Path: latestChart.ChartLocation, + Helm: &bean.ApplicationSourceHelm{ + ValueFiles: []string{helper.GetValuesFileForEnv(environmentId)}, + }, + TargetRevision: util.GetDefaultTargetRevision(), + }, + Destination: &bean.Destination{ + Namespace: namespace, + Server: commonBean.DefaultClusterUrl, + }, + }, + } + } + return releaseConfig, nil +} + +func (impl *DeploymentConfigReadServiceImpl) parseAppLevelMigrationDataForDevtronApps(appId int) (*bean.DeploymentConfig, error) { + chart, err := impl.chartRepository.FindLatestChartForAppByAppId(appId) + if err != nil { + return nil, err + } + + chartLocation := filepath.Join(chart.ReferenceTemplate, chart.ChartVersion) + releaseConfig := adapter.NewAppLevelReleaseConfigFromChart(chart.GitRepoUrl, chartLocation) + config := &bean.DeploymentConfig{ + AppId: appId, + ConfigType: adapter.GetDeploymentConfigType(chart.IsCustomGitRepository), + Active: true, + RepoURL: chart.GitRepoUrl, //for backward compatibility + ReleaseConfiguration: releaseConfig, + } + return config, nil +} + +func (impl *DeploymentConfigReadServiceImpl) parseAppLevelReleaseConfigForDevtronApp(appId int, appLevelConfig *bean.DeploymentConfig) (*bean.ReleaseConfiguration, error) { + chart, err := impl.chartRepository.FindLatestChartForAppByAppId(appId) + if err != nil { + return nil, err + } + + repoURL := chart.GitRepoUrl + if len(appLevelConfig.RepoURL) > 0 { + repoURL = appLevelConfig.RepoURL + } + chartLocation := filepath.Join(chart.ReferenceTemplate, chart.ChartVersion) + releaseConfig := adapter.NewAppLevelReleaseConfigFromChart(repoURL, chartLocation) + return releaseConfig, nil } diff --git a/pkg/deployment/common/wire_deploymentConfig.go b/pkg/deployment/common/wire_deploymentConfig.go new file mode 100644 index 0000000000..9980825f0f --- /dev/null +++ b/pkg/deployment/common/wire_deploymentConfig.go @@ -0,0 +1,34 @@ +/* + * Copyright (c) 2020-2024. Devtron Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package common + +import ( + "github.com/devtron-labs/devtron/internal/sql/repository/deploymentConfig" + "github.com/devtron-labs/devtron/pkg/deployment/common/read" + "github.com/google/wire" +) + +var WireSet = wire.NewSet( + deploymentConfig.NewRepositoryImpl, + wire.Bind(new(deploymentConfig.Repository), new(*deploymentConfig.RepositoryImpl)), + + read.NewDeploymentConfigReadServiceImpl, + wire.Bind(new(read.DeploymentConfigReadService), new(*read.DeploymentConfigReadServiceImpl)), + + NewDeploymentConfigServiceImpl, + wire.Bind(new(DeploymentConfigService), new(*DeploymentConfigServiceImpl)), +) diff --git a/pkg/deployment/deployedApp/status/resourceTree/ResourceTreeService.go b/pkg/deployment/deployedApp/status/resourceTree/ResourceTreeService.go index 14046cebbf..b6fd6b5f69 100644 --- a/pkg/deployment/deployedApp/status/resourceTree/ResourceTreeService.go +++ b/pkg/deployment/deployedApp/status/resourceTree/ResourceTreeService.go @@ -35,6 +35,8 @@ import ( "github.com/devtron-labs/devtron/pkg/app" "github.com/devtron-labs/devtron/pkg/appStatus" argoApplication2 "github.com/devtron-labs/devtron/pkg/argoApplication" + bean2 "github.com/devtron-labs/devtron/pkg/argoApplication/bean" + read2 "github.com/devtron-labs/devtron/pkg/cluster/environment/read" commonBean "github.com/devtron-labs/devtron/pkg/deployment/common/bean" "github.com/devtron-labs/devtron/pkg/k8s" application2 "github.com/devtron-labs/devtron/pkg/k8s/application" @@ -63,6 +65,7 @@ type ServiceImpl struct { helmAppService service.HelmAppService k8sApplicationService application2.K8sApplicationService k8sCommonService k8s.K8sCommonService + environmentReadService read2.EnvironmentReadService } func NewServiceImpl(logger *zap.SugaredLogger, @@ -73,7 +76,9 @@ func NewServiceImpl(logger *zap.SugaredLogger, helmAppReadService read.HelmAppReadService, helmAppService service.HelmAppService, k8sApplicationService application2.K8sApplicationService, - k8sCommonService k8s.K8sCommonService) *ServiceImpl { + k8sCommonService k8s.K8sCommonService, + environmentReadService read2.EnvironmentReadService, +) *ServiceImpl { serviceImpl := &ServiceImpl{ logger: logger, appListingService: appListingService, @@ -84,6 +89,7 @@ func NewServiceImpl(logger *zap.SugaredLogger, helmAppService: helmAppService, k8sApplicationService: k8sApplicationService, k8sCommonService: k8sCommonService, + environmentReadService: environmentReadService, } return serviceImpl } @@ -102,7 +108,25 @@ func (impl *ServiceImpl) FetchResourceTree(ctx context.Context, appId int, envId ApplicationName: &cdPipeline.DeploymentAppName, } start := time.Now() - resp, err := impl.argoApplicationService.ResourceTree(ctx, query) + acdQueryRequest := bean2.NewImperativeQueryRequest(query) + if deploymentConfig.IsLinkedRelease() { + if argocdAppNamespace := deploymentConfig.GetApplicationObjectNamespace(); argocdAppNamespace != "" { + query.AppNamespace = &argocdAppNamespace + } + targetClusterId := cdPipeline.Environment.ClusterId + if targetClusterId == 0 { + clusterId, err := impl.environmentReadService.GetClusterIdByEnvId(cdPipeline.EnvironmentId) + if err != nil && !util.IsErrNoRows(err) { + impl.logger.Errorw("error in fetching cluster id by env id", "envId", cdPipeline.EnvironmentId, "err", err) + return resourceTree, err + } + targetClusterId = clusterId + } + acdQueryRequest = bean2.NewDeclarativeQueryRequest(query). + WithArgoClusterId(deploymentConfig.GetApplicationObjectClusterId()). + WithTargetClusterId(targetClusterId) + } + resp, err := impl.argoApplicationService.GetResourceTree(ctx, acdQueryRequest) elapsed := time.Since(start) impl.logger.Debugw("FetchAppDetailsV2, time elapsed in fetching application for environment ", "elapsed", elapsed, "appId", appId, "envId", envId) if err != nil { @@ -139,14 +163,6 @@ func (impl *ServiceImpl) FetchResourceTree(ctx context.Context, appId int, envId resp.Status = argoApplication.HIBERNATING } } - if resp.Status == string(health.HealthStatusDegraded) { - count, err := impl.appListingService.GetReleaseCount(appId, envId) - if err != nil { - impl.logger.Errorw("service err, FetchAppDetailsV2, release count", "err", err, "app", appId, "env", envId) - } else if count == 0 { - resp.Status = app.NotDeployed - } - } resourceTree = util2.InterfaceToMapAdapter(resp) go func() { if resp.Status == string(health.HealthStatusHealthy) { diff --git a/pkg/deployment/gitOps/adapter/adapter.go b/pkg/deployment/gitOps/adapter/adapter.go new file mode 100644 index 0000000000..d16489bc69 --- /dev/null +++ b/pkg/deployment/gitOps/adapter/adapter.go @@ -0,0 +1,47 @@ +/* + * Copyright (c) 2024. Devtron Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package adapter + +import ( + apiBean "github.com/devtron-labs/devtron/api/bean" + apiGitOpsBean "github.com/devtron-labs/devtron/api/bean/gitOps" + "github.com/devtron-labs/devtron/internal/sql/repository" +) + +func GetGitOpsConfigBean(model *repository.GitOpsConfig) *apiGitOpsBean.GitOpsConfigDto { + return &apiGitOpsBean.GitOpsConfigDto{ + Id: model.Id, + Provider: model.Provider, + GitHubOrgId: model.GitHubOrgId, + GitLabGroupId: model.GitLabGroupId, + Active: model.Active, + Token: model.Token, + Host: model.Host, + Username: model.Username, + UserId: model.CreatedBy, + AzureProjectName: model.AzureProject, + BitBucketWorkspaceId: model.BitBucketWorkspaceId, + BitBucketProjectKey: model.BitBucketProjectKey, + AllowCustomRepository: model.AllowCustomRepository, + EnableTLSVerification: true, + TLSConfig: &apiBean.TLSConfig{ + CaData: model.CaCert, + TLSCertData: model.TlsCert, + TLSKeyData: model.TlsKey, + }, + } +} diff --git a/pkg/deployment/gitOps/common/bean/bean.go b/pkg/deployment/gitOps/common/bean/bean.go index 94897e3346..0eff939e45 100644 --- a/pkg/deployment/gitOps/common/bean/bean.go +++ b/pkg/deployment/gitOps/common/bean/bean.go @@ -18,7 +18,7 @@ package bean // TODO : rename type ChartGitAttribute struct { - RepoUrl, ChartLocation string + RepoUrl, ChartLocation , TargetRevision string IsNewRepo bool IsRepoEmpty bool } diff --git a/pkg/deployment/gitOps/config/GitOpsConfigReadService.go b/pkg/deployment/gitOps/config/GitOpsConfigReadService.go index b4828fec12..22617b6ec9 100644 --- a/pkg/deployment/gitOps/config/GitOpsConfigReadService.go +++ b/pkg/deployment/gitOps/config/GitOpsConfigReadService.go @@ -21,13 +21,20 @@ import ( "fmt" bean3 "github.com/devtron-labs/devtron/api/bean" bean2 "github.com/devtron-labs/devtron/api/bean/gitOps" + "github.com/devtron-labs/devtron/internal/constants" "github.com/devtron-labs/devtron/internal/sql/repository" + internalUtil "github.com/devtron-labs/devtron/internal/util" "github.com/devtron-labs/devtron/pkg/auth/user" + "github.com/devtron-labs/devtron/pkg/deployment/gitOps/adapter" "github.com/devtron-labs/devtron/pkg/deployment/gitOps/config/bean" + moduleBean "github.com/devtron-labs/devtron/pkg/module/bean" + moduleRead "github.com/devtron-labs/devtron/pkg/module/read" + moduleErr "github.com/devtron-labs/devtron/pkg/module/read/error" "github.com/devtron-labs/devtron/util" "github.com/devtron-labs/devtron/util/gitUtil" "github.com/go-pg/pg" "go.uber.org/zap" + "net/http" "regexp" "strings" ) @@ -41,7 +48,6 @@ type GitOpsConfigReadService interface { GetGitOpsConfigActive() (*bean2.GitOpsConfigDto, error) GetConfiguredGitOpsCount() (int, error) GetGitOpsProviderByRepoURL(gitRepoUrl string) (*bean2.GitOpsConfigDto, error) - GetGitOpsProviderMapByRepoURL(allGitRepoUrls []string) (map[string]*bean2.GitOpsConfigDto, error) GetGitOpsById(id int) (*bean2.GitOpsConfigDto, error) } @@ -50,22 +56,31 @@ type GitOpsConfigReadServiceImpl struct { gitOpsRepository repository.GitOpsConfigRepository userService user.UserService globalEnvVariables *util.GlobalEnvVariables + moduleReadService moduleRead.ModuleReadService } func NewGitOpsConfigReadServiceImpl(logger *zap.SugaredLogger, gitOpsRepository repository.GitOpsConfigRepository, userService user.UserService, - envVariables *util.EnvironmentVariables) *GitOpsConfigReadServiceImpl { + envVariables *util.EnvironmentVariables, + moduleReadService moduleRead.ModuleReadService) *GitOpsConfigReadServiceImpl { return &GitOpsConfigReadServiceImpl{ logger: logger, gitOpsRepository: gitOpsRepository, userService: userService, globalEnvVariables: envVariables.GlobalEnvVariables, + moduleReadService: moduleReadService, } } func (impl *GitOpsConfigReadServiceImpl) IsGitOpsConfigured() (*bean.GitOpsConfigurationStatus, error) { gitOpsConfigurationStatus := &bean.GitOpsConfigurationStatus{} + argoModule, err := impl.moduleReadService.GetModuleInfoByName(moduleBean.ModuleNameArgoCd) + if err != nil && !errors.Is(err, moduleErr.ModuleNotFoundError) { + impl.logger.Errorw("error in getting argo module", "error", err) + return gitOpsConfigurationStatus, err + } + gitOpsConfigurationStatus.IsArgoCdInstalled = argoModule.IsInstalled() gitOpsConfig, err := impl.gitOpsRepository.GetGitOpsConfigActive() if err != nil && !errors.Is(err, pg.ErrNoRows) { impl.logger.Errorw("GetGitOpsConfigActive, error while getting", "err", err) @@ -141,28 +156,7 @@ func (impl *GitOpsConfigReadServiceImpl) GetGitOpsConfigActive() (*bean2.GitOpsC impl.logger.Errorw("error, GetGitOpsConfigActive", "err", err) return nil, err } - config := &bean2.GitOpsConfigDto{ - Id: model.Id, - Provider: model.Provider, - GitHubOrgId: model.GitHubOrgId, - GitLabGroupId: model.GitLabGroupId, - Active: model.Active, - Token: model.Token, - Host: model.Host, - Username: model.Username, - UserId: model.CreatedBy, - AzureProjectName: model.AzureProject, - BitBucketWorkspaceId: model.BitBucketWorkspaceId, - BitBucketProjectKey: model.BitBucketProjectKey, - AllowCustomRepository: model.AllowCustomRepository, - EnableTLSVerification: true, - TLSConfig: &bean3.TLSConfig{ - CaData: model.CaCert, - TLSCertData: model.TlsCert, - TLSKeyData: model.TlsKey, - }, - } - return config, err + return adapter.GetGitOpsConfigBean(model), err } func (impl *GitOpsConfigReadServiceImpl) GetConfiguredGitOpsCount() (int, error) { @@ -187,104 +181,28 @@ func (impl *GitOpsConfigReadServiceImpl) GetGitOpsProviderByRepoURL(gitRepoUrl s return nil, err } - var gitOpsConfig *bean2.GitOpsConfigDto - - requestHost, err := util.GetHost(gitRepoUrl) + inputHostURL, inputScheme, err := util.GetHost(gitRepoUrl) if err != nil { return nil, fmt.Errorf("unable to parse host from repo URL: %s", gitRepoUrl) } for _, model := range models { - host, err := util.GetHost(model.Host) + configBean := adapter.GetGitOpsConfigBean(model) + hostURL, scheme, err := util.GetHost(configBean.GetHostUrl()) if err != nil { return nil, fmt.Errorf("unable to parse host from repo URL: %s", gitRepoUrl) } - if host == requestHost { - gitOpsConfig = &bean2.GitOpsConfigDto{ - Id: model.Id, - Provider: model.Provider, - GitHubOrgId: model.GitHubOrgId, - GitLabGroupId: model.GitLabGroupId, - Active: model.Active, - Token: model.Token, - Host: model.Host, - Username: model.Username, - UserId: model.CreatedBy, - AzureProjectName: model.AzureProject, - BitBucketWorkspaceId: model.BitBucketWorkspaceId, - BitBucketProjectKey: model.BitBucketProjectKey, - AllowCustomRepository: model.AllowCustomRepository, - } + if len(hostURL) > 0 && len(inputHostURL) > 0 && + strings.HasPrefix(inputHostURL, hostURL) && + scheme == inputScheme { // written with assumption that only one GitOpsConfig is present in DB for each provider(github, gitlab, etc) - break - } - } - if gitOpsConfig == nil { - return nil, fmt.Errorf("no gitops config found in DB for given repoURL: %s", gitRepoUrl) - } - - return gitOpsConfig, nil -} - -func (impl *GitOpsConfigReadServiceImpl) GetGitOpsProviderMapByRepoURL(allGitRepoUrls []string) (map[string]*bean2.GitOpsConfigDto, error) { - - models, err := impl.gitOpsRepository.GetAllGitOpsConfig() - if err != nil { - impl.logger.Errorw("error, GetGitOpsConfigActive", "err", err) - return nil, err - } - - modelHostToConfigMapping := make(map[string]*bean2.GitOpsConfigDto) - for _, model := range models { - host, err := util.GetHost(model.Host) - if err != nil { - return nil, fmt.Errorf("unable to parse host from repo URL: %s", model.Host) - } - gitOpsConfig := &bean2.GitOpsConfigDto{ - Id: model.Id, - Provider: model.Provider, - GitHubOrgId: model.GitHubOrgId, - GitLabGroupId: model.GitLabGroupId, - Active: model.Active, - Token: model.Token, - Host: model.Host, - Username: model.Username, - UserId: model.CreatedBy, - AzureProjectName: model.AzureProject, - BitBucketWorkspaceId: model.BitBucketWorkspaceId, - BitBucketProjectKey: model.BitBucketProjectKey, - AllowCustomRepository: model.AllowCustomRepository, - } - modelHostToConfigMapping[host] = gitOpsConfig - } - - activeConfig, err := impl.GetGitOpsConfigActive() - if err != nil { - impl.logger.Errorw("error in getting active gitOps config", "err", err) - return nil, err - } - - repoUrlTOConfigMap := make(map[string]*bean2.GitOpsConfigDto) - - for _, gitRepoUrl := range allGitRepoUrls { - if gitRepoUrl == bean2.GIT_REPO_NOT_CONFIGURED { - repoUrlTOConfigMap[gitRepoUrl] = activeConfig - continue - } - requestHost, err := util.GetHost(gitRepoUrl) - if err != nil { - return nil, fmt.Errorf("unable to parse host from repo URL: %s", gitRepoUrl) - } - - if config, ok := modelHostToConfigMapping[requestHost]; ok { - repoUrlTOConfigMap[gitRepoUrl] = config - } else if !ok { - impl.logger.Infow("no gitops config found in DB for given url", "repoURL", gitRepoUrl) - repoUrlTOConfigMap[gitRepoUrl] = activeConfig // default behaviour + return configBean, nil } } - return repoUrlTOConfigMap, nil + errMsg := fmt.Sprintf("no gitops config found in DB for given repository: %q", gitRepoUrl) + return nil, internalUtil.NewApiError(http.StatusBadRequest, errMsg, errMsg). + WithCode(constants.InvalidGitOpsRepoUrlForPipeline) } func (impl *GitOpsConfigReadServiceImpl) GetGitOpsById(id int) (*bean2.GitOpsConfigDto, error) { diff --git a/pkg/deployment/gitOps/config/bean/bean.go b/pkg/deployment/gitOps/config/bean/bean.go index 670981949c..c140bc77f7 100644 --- a/pkg/deployment/gitOps/config/bean/bean.go +++ b/pkg/deployment/gitOps/config/bean/bean.go @@ -31,6 +31,11 @@ const BITBUCKET_PROVIDER = "BITBUCKET_CLOUD" type GitOpsConfigurationStatus struct { IsGitOpsConfigured bool + IsArgoCdInstalled bool AllowCustomRepository bool Provider string } + +func (g *GitOpsConfigurationStatus) IsGitOpsConfiguredAndArgoCdInstalled() bool { + return g.IsGitOpsConfigured && g.IsArgoCdInstalled +} diff --git a/pkg/deployment/gitOps/git/GitOperationService.go b/pkg/deployment/gitOps/git/GitOperationService.go index 1b3a8eea81..a9b4e1c820 100644 --- a/pkg/deployment/gitOps/git/GitOperationService.go +++ b/pkg/deployment/gitOps/git/GitOperationService.go @@ -42,18 +42,18 @@ import ( ) type GitOperationService interface { - CreateGitRepositoryForDevtronApp(ctx context.Context, gitOpsRepoName string, userId int32) (chartGitAttribute *commonBean.ChartGitAttribute, err error) - CreateReadmeInGitRepo(ctx context.Context, gitOpsRepoName string, userId int32) error - GitPull(clonedDir string, repoUrl string) error + CreateGitRepositoryForDevtronApp(ctx context.Context, gitOpsRepoName string, targetRevision string, userId int32) (chartGitAttribute *commonBean.ChartGitAttribute, err error) + CreateReadmeInGitRepo(ctx context.Context, gitOpsRepoName string, targetRevision string, userId int32) error + GitPull(clonedDir string, repoUrl string, targetRevision string) error CommitValues(ctx context.Context, chartGitAttr *ChartConfig) (commitHash string, commitTime time.Time, err error) - PushChartToGitRepo(ctx context.Context, gitOpsRepoName, referenceTemplate, version, tempReferenceTemplateDir, repoUrl string, userId int32) (err error) - PushChartToGitOpsRepoForHelmApp(ctx context.Context, PushChartToGitRequest *bean.PushChartToGitRequestDTO, requirementsConfig *ChartConfig, valuesConfig *ChartConfig) (*commonBean.ChartGitAttribute, string, error) + PushChartToGitRepo(ctx context.Context, gitOpsRepoName, chartLocation, tempReferenceTemplateDir, repoUrl, targetRevision string, userId int32) (err error) + PushChartToGitOpsRepoForHelmApp(ctx context.Context, pushChartToGitRequest *bean.PushChartToGitRequestDTO, requirementsConfig, valuesConfig *ChartConfig) (*commonBean.ChartGitAttribute, string, error) CreateRepository(ctx context.Context, dto *apiBean.GitOpsConfigDto, userId int32) (string, bool, bool, error) GetRepoUrlByRepoName(repoName string) (string, error) - CloneInDir(repoUrl, chartDir string) (string, error) + GetClonedDir(ctx context.Context, chartDir, repoUrl, targetRevision string) (string, error) ReloadGitOpsProvider() error UpdateGitHostUrlByProvider(request *apiBean.GitOpsConfigDto) error GetRepoUrlWithUserName(url string) (string, error) @@ -81,8 +81,8 @@ func NewGitOperationServiceImpl(logger *zap.SugaredLogger, gitFactory *GitFactor } -func (impl *GitOperationServiceImpl) CreateGitRepositoryForDevtronApp(ctx context.Context, gitOpsRepoName string, userId int32) (chartGitAttribute *commonBean.ChartGitAttribute, err error) { - //baseTemplateName replace whitespace +func (impl *GitOperationServiceImpl) CreateGitRepositoryForDevtronApp(ctx context.Context, gitOpsRepoName string, targetRevision string, userId int32) (chartGitAttribute *commonBean.ChartGitAttribute, err error) { + // baseTemplateName replace whitespace space := regexp.MustCompile(`\s+`) gitOpsRepoName = space.ReplaceAllString(gitOpsRepoName, "-") @@ -94,6 +94,7 @@ func (impl *GitOperationServiceImpl) CreateGitRepositoryForDevtronApp(ctx contex //getting username & emailId for commit author data gitRepoRequest := &apiBean.GitOpsConfigDto{ GitRepoName: gitOpsRepoName, + TargetRevision: targetRevision, Description: fmt.Sprintf("helm chart for " + gitOpsRepoName), BitBucketWorkspaceId: bitbucketMetadata.BitBucketWorkspaceId, BitBucketProjectKey: bitbucketMetadata.BitBucketProjectKey, @@ -103,30 +104,35 @@ func (impl *GitOperationServiceImpl) CreateGitRepositoryForDevtronApp(ctx contex impl.logger.Errorw("error in creating git project", "name", gitOpsRepoName, "err", err) return nil, err } - return &commonBean.ChartGitAttribute{RepoUrl: repoUrl, IsNewRepo: isNew, IsRepoEmpty: isEmpty}, nil + return &commonBean.ChartGitAttribute{ + RepoUrl: repoUrl, + TargetRevision: targetRevision, + IsNewRepo: isNew, + IsRepoEmpty: isEmpty, + }, nil } func getChartDirPathFromCloneDir(cloneDirPath string) (string, error) { return filepath.Rel(GIT_WORKING_DIR, cloneDirPath) } -func (impl *GitOperationServiceImpl) PushChartToGitRepo(ctx context.Context, gitOpsRepoName, referenceTemplate, version, tempReferenceTemplateDir, repoUrl string, userId int32) (err error) { +func (impl *GitOperationServiceImpl) PushChartToGitRepo(ctx context.Context, gitOpsRepoName, chartLocation, tempReferenceTemplateDir, repoUrl, targetRevision string, userId int32) (err error) { newCtx, span := otel.Tracer("orchestrator").Start(ctx, "GitOperationServiceImpl.PushChartToGitRepo") defer span.End() chartDir := fmt.Sprintf("%s-%s", gitOpsRepoName, impl.chartTemplateService.GetDir()) - clonedDir, err := impl.getClonedDir(newCtx, chartDir, repoUrl) + clonedDir, err := impl.GetClonedDir(newCtx, chartDir, repoUrl, targetRevision) defer impl.chartTemplateService.CleanDir(clonedDir) if err != nil { impl.logger.Errorw("error in cloning repo", "url", repoUrl, "err", err) return err } // TODO: verify if GitPull is required or not; remove if not at all required. - err = impl.GitPull(clonedDir, repoUrl) + err = impl.GitPull(clonedDir, repoUrl, targetRevision) if err != nil { impl.logger.Errorw("error in pulling git repo", "url", repoUrl, "err", err) return err } - dir := filepath.Join(clonedDir, referenceTemplate, version) + dir := filepath.Join(clonedDir, chartLocation) performFirstCommitPush := true //if chart already exists don't overrides it by reference template @@ -161,11 +167,11 @@ func (impl *GitOperationServiceImpl) PushChartToGitRepo(ctx context.Context, git // if push needed, then only push if performFirstCommitPush { userEmailId, userName := impl.gitOpsConfigReadService.GetUserEmailIdAndNameForGitOpsCommit(userId) - commit, err := impl.gitFactory.GitOpsHelper.CommitAndPushAllChanges(newCtx, clonedDir, "first commit", userName, userEmailId) + commit, err := impl.gitFactory.GitOpsHelper.CommitAndPushAllChanges(newCtx, clonedDir, targetRevision, "first commit", userName, userEmailId) if err != nil { impl.logger.Errorw("error in pushing git", "err", err) callback := func() error { - commit, err = impl.updateRepoAndPushAllChanges(newCtx, clonedDir, repoUrl, + commit, err = impl.updateRepoAndPushAllChanges(newCtx, clonedDir, repoUrl, targetRevision, tempReferenceTemplateDir, dir, userName, userEmailId, impl.gitFactory.GitOpsHelper) return err } @@ -185,10 +191,10 @@ func (impl *GitOperationServiceImpl) PushChartToGitRepo(ctx context.Context, git return nil } -func (impl *GitOperationServiceImpl) updateRepoAndPushAllChanges(ctx context.Context, clonedDir, repoUrl, +func (impl *GitOperationServiceImpl) updateRepoAndPushAllChanges(ctx context.Context, clonedDir, repoUrl, targetRevision, tempReferenceTemplateDir, dir, userName, userEmailId string, gitOpsHelper *GitOpsHelper) (commit string, err error) { impl.logger.Warn("re-trying, taking pull and then push again") - err = impl.GitPull(clonedDir, repoUrl) + err = impl.GitPull(clonedDir, repoUrl, targetRevision) if err != nil { return commit, err } @@ -197,7 +203,7 @@ func (impl *GitOperationServiceImpl) updateRepoAndPushAllChanges(ctx context.Con impl.logger.Errorw("error copying dir", "err", err) return commit, err } - commit, err = gitOpsHelper.CommitAndPushAllChanges(ctx, clonedDir, "first commit", userName, userEmailId) + commit, err = gitOpsHelper.CommitAndPushAllChanges(ctx, clonedDir, targetRevision, "first commit", userName, userEmailId) if err != nil { impl.logger.Errorw("error in pushing git", "err", err) return commit, retryFunc.NewRetryableError(err) @@ -205,7 +211,7 @@ func (impl *GitOperationServiceImpl) updateRepoAndPushAllChanges(ctx context.Con return commit, nil } -func (impl *GitOperationServiceImpl) CreateReadmeInGitRepo(ctx context.Context, gitOpsRepoName string, userId int32) error { +func (impl *GitOperationServiceImpl) CreateReadmeInGitRepo(ctx context.Context, gitOpsRepoName string, targetRevision string, userId int32) error { userEmailId, userName := impl.gitOpsConfigReadService.GetUserEmailIdAndNameForGitOpsCommit(userId) gitOpsConfig, err := impl.gitOpsConfigReadService.GetGitOpsConfigActive() if err != nil { @@ -217,6 +223,7 @@ func (impl *GitOperationServiceImpl) CreateReadmeInGitRepo(ctx context.Context, gitOpsConfig.UserEmailId = userEmailId gitOpsConfig.Username = userName gitOpsConfig.GitRepoName = gitOpsRepoName + gitOpsConfig.TargetRevision = targetRevision } _, err = impl.gitFactory.Client.CreateReadme(ctx, gitOpsConfig) if err != nil { @@ -226,8 +233,8 @@ func (impl *GitOperationServiceImpl) CreateReadmeInGitRepo(ctx context.Context, return nil } -func (impl *GitOperationServiceImpl) GitPull(clonedDir string, repoUrl string) error { - err := impl.gitFactory.GitOpsHelper.Pull(clonedDir) +func (impl *GitOperationServiceImpl) GitPull(clonedDir string, repoUrl string, targetRevision string) error { + err := impl.gitFactory.GitOpsHelper.Pull(clonedDir, targetRevision) if err != nil { impl.logger.Errorw("error in pulling git", "clonedDir", clonedDir, "err", err) impl.chartTemplateService.CleanDir(clonedDir) @@ -236,7 +243,7 @@ func (impl *GitOperationServiceImpl) GitPull(clonedDir string, repoUrl string) e impl.logger.Errorw("error in getting chart dir from cloned dir", "clonedDir", clonedDir, "err", err) return err } - _, err = impl.gitFactory.GitOpsHelper.Clone(repoUrl, chartDir) + _, err = impl.gitFactory.GitOpsHelper.Clone(repoUrl, chartDir, targetRevision) if err != nil { impl.logger.Errorw("error in cloning repo", "url", repoUrl, "err", err) return err @@ -318,75 +325,79 @@ func (impl *GitOperationServiceImpl) GetRepoUrlByRepoName(repoName string) (stri // PushChartToGitOpsRepoForHelmApp pushes built chart to GitOps repo (Specific implementation for Helm Apps) // TODO refactoring: Make a common method for both PushChartToGitRepo and PushChartToGitOpsRepoForHelmApp -func (impl *GitOperationServiceImpl) PushChartToGitOpsRepoForHelmApp(ctx context.Context, PushChartToGitRequest *bean.PushChartToGitRequestDTO, requirementsConfig *ChartConfig, valuesConfig *ChartConfig) (*commonBean.ChartGitAttribute, string, error) { - chartDir := fmt.Sprintf("%s-%s", PushChartToGitRequest.AppName, impl.chartTemplateService.GetDir()) +func (impl *GitOperationServiceImpl) PushChartToGitOpsRepoForHelmApp(ctx context.Context, pushChartToGitRequest *bean.PushChartToGitRequestDTO, requirementsConfig, valuesConfig *ChartConfig) (*commonBean.ChartGitAttribute, string, error) { + chartDir := fmt.Sprintf("%s-%s", pushChartToGitRequest.AppName, impl.chartTemplateService.GetDir()) clonedDir := impl.gitFactory.GitOpsHelper.GetCloneDirectory(chartDir) if _, err := os.Stat(clonedDir); os.IsNotExist(err) { - clonedDir, err = impl.gitFactory.GitOpsHelper.Clone(PushChartToGitRequest.RepoURL, chartDir) + clonedDir, err = impl.gitFactory.GitOpsHelper.Clone(pushChartToGitRequest.RepoURL, chartDir, pushChartToGitRequest.TargetRevision) if err != nil { - impl.logger.Errorw("error in cloning repo", "url", PushChartToGitRequest.RepoURL, "err", err) + impl.logger.Errorw("error in cloning repo", "url", pushChartToGitRequest.RepoURL, "err", err) return nil, "", err } } else { - err = impl.GitPull(clonedDir, PushChartToGitRequest.RepoURL) + err = impl.GitPull(clonedDir, pushChartToGitRequest.RepoURL, pushChartToGitRequest.TargetRevision) if err != nil { return nil, "", err } } - acdAppName := globalUtil.BuildDeployedAppName(PushChartToGitRequest.AppName, PushChartToGitRequest.EnvName) - dir := filepath.Join(clonedDir, acdAppName) + gitOpsChartLocation := fmt.Sprintf("%s-%s", pushChartToGitRequest.AppName, pushChartToGitRequest.EnvName) + dir := filepath.Join(clonedDir, gitOpsChartLocation) err := os.MkdirAll(dir, os.ModePerm) if err != nil { impl.logger.Errorw("error in making dir", "err", err) return nil, "", err } - err = dirCopy.Copy(PushChartToGitRequest.TempChartRefDir, dir) + err = dirCopy.Copy(pushChartToGitRequest.TempChartRefDir, dir) if err != nil { impl.logger.Errorw("error copying dir", "err", err) return nil, "", err } err = impl.addConfigFileToChart(requirementsConfig, dir, clonedDir) if err != nil { - impl.logger.Errorw("error in adding requirements.yaml to chart", "err", err, "appName", PushChartToGitRequest.AppName) + impl.logger.Errorw("error in adding requirements.yaml to chart", "err", err, "appName", pushChartToGitRequest.AppName) return nil, "", err } err = impl.addConfigFileToChart(valuesConfig, dir, clonedDir) if err != nil { - impl.logger.Errorw("error in adding values.yaml to chart", "err", err, "appName", PushChartToGitRequest.AppName) + impl.logger.Errorw("error in adding values.yaml to chart", "err", err, "appName", pushChartToGitRequest.AppName) return nil, "", err } - userEmailId, userName := impl.gitOpsConfigReadService.GetUserEmailIdAndNameForGitOpsCommit(PushChartToGitRequest.UserId) - commit, err := impl.gitFactory.GitOpsHelper.CommitAndPushAllChanges(ctx, clonedDir, "first commit", userName, userEmailId) + userEmailId, userName := impl.gitOpsConfigReadService.GetUserEmailIdAndNameForGitOpsCommit(pushChartToGitRequest.UserId) + commit, err := impl.gitFactory.GitOpsHelper.CommitAndPushAllChanges(ctx, clonedDir, pushChartToGitRequest.TargetRevision, "first commit", userName, userEmailId) if err != nil { impl.logger.Errorw("error in pushing git", "err", err) impl.logger.Warn("re-trying, taking pull and then push again") - err = impl.GitPull(clonedDir, PushChartToGitRequest.RepoURL) + err = impl.GitPull(clonedDir, pushChartToGitRequest.RepoURL, pushChartToGitRequest.TargetRevision) if err != nil { - impl.logger.Errorw("error in git pull", "err", err, "appName", acdAppName) + impl.logger.Errorw("error in git pull", "err", err, "appName", gitOpsChartLocation) return nil, "", err } - err = dirCopy.Copy(PushChartToGitRequest.TempChartRefDir, dir) + err = dirCopy.Copy(pushChartToGitRequest.TempChartRefDir, dir) if err != nil { impl.logger.Errorw("error copying dir", "err", err) return nil, "", err } - commit, err = impl.gitFactory.GitOpsHelper.CommitAndPushAllChanges(ctx, clonedDir, "first commit", userName, userEmailId) + commit, err = impl.gitFactory.GitOpsHelper.CommitAndPushAllChanges(ctx, clonedDir, pushChartToGitRequest.TargetRevision, "first commit", userName, userEmailId) if err != nil { impl.logger.Errorw("error in pushing git", "err", err) return nil, "", err } } - impl.logger.Debugw("template committed", "url", PushChartToGitRequest.RepoURL, "commit", commit) + impl.logger.Debugw("template committed", "url", pushChartToGitRequest.RepoURL, "commit", commit) defer impl.chartTemplateService.CleanDir(clonedDir) - return &commonBean.ChartGitAttribute{RepoUrl: PushChartToGitRequest.RepoURL, ChartLocation: acdAppName}, commit, err + return &commonBean.ChartGitAttribute{ + RepoUrl: pushChartToGitRequest.RepoURL, + ChartLocation: gitOpsChartLocation, + TargetRevision: pushChartToGitRequest.TargetRevision, + }, commit, err } -func (impl *GitOperationServiceImpl) getClonedDir(ctx context.Context, chartDir, repoUrl string) (string, error) { - _, span := otel.Tracer("orchestrator").Start(ctx, "GitOperationServiceImpl.getClonedDir") +func (impl *GitOperationServiceImpl) GetClonedDir(ctx context.Context, chartDir, repoUrl, targetRevision string) (string, error) { + _, span := otel.Tracer("orchestrator").Start(ctx, "GitOperationServiceImpl.GetClonedDir") defer span.End() clonedDir := impl.gitFactory.GitOpsHelper.GetCloneDirectory(chartDir) if _, err := os.Stat(clonedDir); os.IsNotExist(err) { - return impl.CloneInDir(repoUrl, chartDir) + return impl.cloneInDir(repoUrl, chartDir, targetRevision) } else if err != nil { impl.logger.Errorw("error in cloning repo", "url", repoUrl, "err", err) return "", err @@ -394,8 +405,8 @@ func (impl *GitOperationServiceImpl) getClonedDir(ctx context.Context, chartDir, return clonedDir, nil } -func (impl *GitOperationServiceImpl) CloneInDir(repoUrl, chartDir string) (string, error) { - clonedDir, err := impl.gitFactory.GitOpsHelper.Clone(repoUrl, chartDir) +func (impl *GitOperationServiceImpl) cloneInDir(repoUrl, chartDir, targetRevision string) (string, error) { + clonedDir, err := impl.gitFactory.GitOpsHelper.Clone(repoUrl, chartDir, targetRevision) if err != nil { impl.logger.Errorw("error in cloning repo", "url", repoUrl, "err", err) return "", err diff --git a/pkg/deployment/gitOps/git/GitOpsHelper.go b/pkg/deployment/gitOps/git/GitOpsHelper.go index bc7add2ff9..9c5d02c7d4 100644 --- a/pkg/deployment/gitOps/git/GitOpsHelper.go +++ b/pkg/deployment/gitOps/git/GitOpsHelper.go @@ -65,7 +65,7 @@ func (impl *GitOpsHelper) GetCloneDirectory(targetDir string) (clonedDir string) return clonedDir } -func (impl *GitOpsHelper) Clone(url, targetDir string) (clonedDir string, err error) { +func (impl *GitOpsHelper) Clone(url, targetDir, targetRevision string) (clonedDir string, err error) { start := time.Now() defer func() { util.TriggerGitOpsMetrics("Clone", "GitService", start, err) @@ -82,7 +82,7 @@ func (impl *GitOpsHelper) Clone(url, targetDir string) (clonedDir string, err er _, errMsg, err := impl.gitCommandManager.Fetch(ctx, clonedDir) if err == nil && errMsg == "" { impl.logger.Debugw("git fetch completed, pulling master branch data from remote origin") - _, errMsg, err := impl.pullFromBranch(ctx, clonedDir) + _, errMsg, err := impl.pullFromBranch(ctx, clonedDir, targetRevision) if err != nil { impl.logger.Errorw("error on git pull", "err", err) return errMsg, err @@ -95,19 +95,19 @@ func (impl *GitOpsHelper) Clone(url, targetDir string) (clonedDir string, err er return clonedDir, nil } -func (impl *GitOpsHelper) Pull(repoRoot string) (err error) { +func (impl *GitOpsHelper) Pull(repoRoot, targetRevision string) (err error) { start := time.Now() defer func() { util.TriggerGitOpsMetrics("Pull", "GitService", start, err) }() ctx := git.BuildGitContext(context.Background()).WithCredentials(impl.Auth). WithTLSData(impl.tlsConfig.CaData, impl.tlsConfig.TLSKeyData, impl.tlsConfig.TLSCertData, impl.isTlsEnabled) - return impl.gitCommandManager.Pull(ctx, repoRoot) + return impl.gitCommandManager.Pull(ctx, targetRevision, repoRoot) } const PushErrorMessage = "failed to push some refs" -func (impl *GitOpsHelper) CommitAndPushAllChanges(ctx context.Context, repoRoot, commitMsg, name, emailId string) (commitHash string, err error) { +func (impl *GitOpsHelper) CommitAndPushAllChanges(ctx context.Context, repoRoot, targetRevision, commitMsg, name, emailId string) (commitHash string, err error) { start := time.Now() newCtx, span := otel.Tracer("orchestrator").Start(ctx, "GitOpsHelper.CommitAndPushAllChanges") defer func() { @@ -116,15 +116,15 @@ func (impl *GitOpsHelper) CommitAndPushAllChanges(ctx context.Context, repoRoot, }() gitCtx := git.BuildGitContext(newCtx).WithCredentials(impl.Auth). WithTLSData(impl.tlsConfig.CaData, impl.tlsConfig.TLSKeyData, impl.tlsConfig.TLSCertData, impl.isTlsEnabled) - commitHash, err = impl.gitCommandManager.CommitAndPush(gitCtx, repoRoot, commitMsg, name, emailId) + commitHash, err = impl.gitCommandManager.CommitAndPush(gitCtx, repoRoot, targetRevision, commitMsg, name, emailId) if err != nil && strings.Contains(err.Error(), PushErrorMessage) { return commitHash, fmt.Errorf("%s %v", "push failed due to conflicts", err) } return commitHash, nil } -func (impl *GitOpsHelper) pullFromBranch(ctx git.GitContext, rootDir string) (string, string, error) { - branch, err := impl.getBranch(ctx, rootDir) +func (impl *GitOpsHelper) pullFromBranch(ctx git.GitContext, rootDir, targetRevision string) (string, string, error) { + branch, err := impl.getBranch(ctx, rootDir, targetRevision) if err != nil || branch == "" { impl.logger.Warnw("no branch found in git repo", "rootDir", rootDir) return "", "", err @@ -157,7 +157,7 @@ func (impl *GitOpsHelper) init(ctx git.GitContext, rootDir string, remoteUrl str return impl.gitCommandManager.AddRepo(ctx, rootDir, remoteUrl, isBare) } -func (impl *GitOpsHelper) getBranch(ctx git.GitContext, rootDir string) (string, error) { +func (impl *GitOpsHelper) getBranch(ctx git.GitContext, rootDir, targetRevision string) (string, error) { response, errMsg, err := impl.gitCommandManager.ListBranch(ctx, rootDir) if err != nil { impl.logger.Errorw("error on git pull", "response", response, "errMsg", errMsg, "err", err) @@ -165,17 +165,27 @@ func (impl *GitOpsHelper) getBranch(ctx git.GitContext, rootDir string) (string, } branches := strings.Split(response, "\n") impl.logger.Infow("total branch available in git repo", "branch length", len(branches)) - branch := "" + branch := impl.getDefaultBranch(branches, targetRevision) + if strings.HasPrefix(branch, "origin/") { + branch = strings.TrimPrefix(branch, "origin/") + } + return branch, nil +} + +func (impl *GitOpsHelper) getDefaultBranch(branches []string, targetRevision string) (branch string) { + // the preferred branch is bean.TargetRevisionMaster for _, item := range branches { - if strings.TrimSpace(item) == git.ORIGIN_MASTER { - branch = git.Branch_Master + if len(targetRevision) != 0 && item == targetRevision { + return targetRevision + } else if util.IsDefaultTargetRevision(item) { + return util.GetDefaultTargetRevision() } } - //if git repo has some branch take pull of the first branch, but eventually proxy chart will push into master branch - if len(branch) == 0 && branches != nil { - branch = strings.ReplaceAll(branches[0], "origin/", "") + // if git repo has some branch take pull of the first branch, but eventually proxy chart will push into master branch + if len(branches) != 0 { + return strings.TrimSpace(branches[0]) } - return branch, nil + return util.GetDefaultTargetRevision() } /* @@ -190,7 +200,7 @@ Case AZURE_DEVOPS_PROVIDER: - The clone URL format https://@dev.azure.com///_git/ - Here the can differ from user to user. SanitiseCustomGitRepoURL will return the repo url in format : https://dev.azure.com///_git/ */ -func SanitiseCustomGitRepoURL(activeGitOpsConfig apiGitOpsBean.GitOpsConfigDto, gitRepoURL string) (sanitisedGitRepoURL string) { +func SanitiseCustomGitRepoURL(activeGitOpsConfig *apiGitOpsBean.GitOpsConfigDto, gitRepoURL string) (sanitisedGitRepoURL string) { sanitisedGitRepoURL = gitRepoURL if activeGitOpsConfig.Provider == BITBUCKET_PROVIDER && strings.Contains(gitRepoURL, fmt.Sprintf("://%s@%s", activeGitOpsConfig.Username, "bitbucket.org/")) { sanitisedGitRepoURL = strings.ReplaceAll(gitRepoURL, fmt.Sprintf("://%s@%s", activeGitOpsConfig.Username, "bitbucket.org/"), "://bitbucket.org/") diff --git a/pkg/deployment/gitOps/git/GitServiceAzure.go b/pkg/deployment/gitOps/git/GitServiceAzure.go index 6b9cab2caf..ff5a6d5dbd 100644 --- a/pkg/deployment/gitOps/git/GitServiceAzure.go +++ b/pkg/deployment/gitOps/git/GitServiceAzure.go @@ -23,6 +23,7 @@ import ( "fmt" bean2 "github.com/devtron-labs/devtron/api/bean/gitOps" globalUtil "github.com/devtron-labs/devtron/util" + "github.com/devtron-labs/devtron/util/gitUtil" "github.com/devtron-labs/devtron/util/retryFunc" "github.com/microsoft/azure-devops-go-api/azuredevops" "github.com/microsoft/azure-devops-go-api/azuredevops/git" @@ -162,8 +163,7 @@ func (impl GitAzureClient) CreateRepository(ctx context.Context, config *bean2.G } isEmpty = false //As we have created readme, repo is no longer empty detailedErrorGitOpsConfigActions.SuccessfulStages = append(detailedErrorGitOpsConfigActions.SuccessfulStages, CreateReadmeStage) - - validated, err = impl.ensureProjectAvailabilityOnSsh(impl.project, *operationReference.WebUrl) + validated, err = impl.ensureProjectAvailabilityOnSsh(impl.project, *operationReference.WebUrl, config.TargetRevision) if err != nil { impl.logger.Errorw("error in ensuring project availability azure", "project", config.GitRepoName, "err", err) detailedErrorGitOpsConfigActions.StageErrorMap[CloneSshStage] = err @@ -192,6 +192,7 @@ func (impl GitAzureClient) CreateReadme(ctx context.Context, config *bean2.GitOp FileContent: "@devtron", ReleaseMessage: "readme", ChartRepoName: config.GitRepoName, + TargetRevision: config.TargetRevision, UserName: config.Username, UserEmailId: config.UserEmailId, } @@ -203,8 +204,11 @@ func (impl GitAzureClient) CreateReadme(ctx context.Context, config *bean2.GitOp } func (impl GitAzureClient) CommitValues(ctx context.Context, config *ChartConfig, gitOpsConfig *bean2.GitOpsConfigDto) (commitHash string, commitTime time.Time, err error) { - branch := "master" - branchfull := "refs/heads/master" + branch := config.TargetRevision + if len(branch) == 0 { + branch = globalUtil.GetDefaultTargetRevision() + } + branchRefHead := gitUtil.GetRefBranchHead(branch) path := filepath.Join(config.ChartLocation, config.FileName) newFile := true oldObjId := "0000000000000000000000000000000000000000" //default commit hash @@ -241,7 +245,7 @@ func (impl GitAzureClient) CommitValues(ctx context.Context, config *ChartConfig var refUpdates []git.GitRefUpdate refUpdates = append(refUpdates, git.GitRefUpdate{ - Name: &branchfull, + Name: &branchRefHead, OldObjectId: &oldObjId, }) var changeType git.VersionControlChangeType @@ -357,9 +361,9 @@ func (impl GitAzureClient) ensureProjectAvailabilityOnHttp(repoName string) (boo return false, nil } -func (impl GitAzureClient) ensureProjectAvailabilityOnSsh(projectName string, repoUrl string) (bool, error) { +func (impl GitAzureClient) ensureProjectAvailabilityOnSsh(projectName string, repoUrl string, targetRevision string) (bool, error) { for count := 0; count < 8; count++ { - _, err := impl.gitOpsHelper.Clone(repoUrl, fmt.Sprintf("/ensure-clone/%s", projectName)) + _, err := impl.gitOpsHelper.Clone(repoUrl, fmt.Sprintf("/ensure-clone/%s", projectName), targetRevision) if err == nil { impl.logger.Infow("ensureProjectAvailability clone passed azure", "try count", count, "repoUrl", repoUrl) return true, nil diff --git a/pkg/deployment/gitOps/git/GitServiceBitbucket.go b/pkg/deployment/gitOps/git/GitServiceBitbucket.go index 65825ef2d1..243f399e11 100644 --- a/pkg/deployment/gitOps/git/GitServiceBitbucket.go +++ b/pkg/deployment/gitOps/git/GitServiceBitbucket.go @@ -169,7 +169,7 @@ func (impl GitBitbucketClient) CreateRepository(ctx context.Context, config *bea } detailedErrorGitOpsConfigActions.SuccessfulStages = append(detailedErrorGitOpsConfigActions.SuccessfulStages, CreateReadmeStage) - validated, err = impl.ensureProjectAvailabilityOnSsh(repoOptions) + validated, err = impl.ensureProjectAvailabilityOnSsh(repoOptions, config.TargetRevision) if err != nil { impl.logger.Errorw("error in ensuring project availability bitbucket", "project", config.GitRepoName, "err", err) detailedErrorGitOpsConfigActions.StageErrorMap[CloneSshStage] = err @@ -237,6 +237,7 @@ func (impl GitBitbucketClient) CreateReadme(ctx context.Context, config *bean2.G FileContent: "@devtron", ReleaseMessage: "pushing readme", ChartRepoName: config.GitRepoName, + TargetRevision: config.TargetRevision, UserName: config.Username, UserEmailId: config.UserEmailId, } @@ -248,10 +249,10 @@ func (impl GitBitbucketClient) CreateReadme(ctx context.Context, config *bean2.G return hash, err } -func (impl GitBitbucketClient) ensureProjectAvailabilityOnSsh(repoOptions *bitbucket.RepositoryOptions) (bool, error) { +func (impl GitBitbucketClient) ensureProjectAvailabilityOnSsh(repoOptions *bitbucket.RepositoryOptions, targetRevision string) (bool, error) { repoUrl := fmt.Sprintf(BITBUCKET_CLONE_BASE_URL+"%s/%s.git", repoOptions.Owner, repoOptions.RepoSlug) for count := 0; count < 5; count++ { - _, err := impl.gitOpsHelper.Clone(repoUrl, fmt.Sprintf("/ensure-clone/%s", repoOptions.RepoSlug)) + _, err := impl.gitOpsHelper.Clone(repoUrl, fmt.Sprintf("/ensure-clone/%s", repoOptions.RepoSlug), targetRevision) if err == nil { impl.logger.Infow("ensureProjectAvailability clone passed Bitbucket", "try count", count, "repoUrl", repoUrl) return true, nil @@ -298,6 +299,10 @@ func (impl GitBitbucketClient) CommitValues(ctx context.Context, config *ChartCo } fileName := filepath.Join(config.ChartLocation, config.FileName) + branch := config.TargetRevision + if len(branch) == 0 { + branch = util.GetDefaultTargetRevision() + } //bitbucket needs author as - "Name " authorBitbucket := fmt.Sprintf("%s <%s>", config.UserName, config.UserEmailId) repoWriteOptions := &bitbucket.RepositoryBlobWriteOptions{ @@ -306,7 +311,7 @@ func (impl GitBitbucketClient) CommitValues(ctx context.Context, config *ChartCo FilePath: bitbucketCommitFilePath, FileName: fileName, Message: config.ReleaseMessage, - Branch: "master", + Branch: branch, Author: authorBitbucket, } repoWriteOptions.WithContext(ctx) @@ -321,7 +326,7 @@ func (impl GitBitbucketClient) CommitValues(ctx context.Context, config *ChartCo commitOptions := &bitbucket.CommitsOptions{ RepoSlug: config.ChartRepoName, Owner: gitOpsConfig.BitBucketWorkspaceId, - Branchortag: "master", + Branchortag: config.TargetRevision, } commits, err := impl.client.Repositories.Commits.GetCommits(commitOptions) if err != nil { diff --git a/pkg/deployment/gitOps/git/GitServiceGithub.go b/pkg/deployment/gitOps/git/GitServiceGithub.go index 893075fb51..2ddb364e07 100644 --- a/pkg/deployment/gitOps/git/GitServiceGithub.go +++ b/pkg/deployment/gitOps/git/GitServiceGithub.go @@ -169,7 +169,7 @@ func (impl GitHubClient) CreateRepository(ctx context.Context, config *bean2.Git isEmpty = false //As we have created readme, repo is no longer empty detailedErrorGitOpsConfigActions.SuccessfulStages = append(detailedErrorGitOpsConfigActions.SuccessfulStages, CreateReadmeStage) - validated, err = impl.ensureProjectAvailabilityOnSsh(config.GitRepoName, *r.CloneURL) + validated, err = impl.ensureProjectAvailabilityOnSsh(config.GitRepoName, *r.CloneURL, config.TargetRevision) if err != nil { impl.logger.Errorw("error in ensuring project availability github", "project", config.GitRepoName, "err", err) detailedErrorGitOpsConfigActions.StageErrorMap[CloneSshStage] = err @@ -198,6 +198,7 @@ func (impl GitHubClient) CreateReadme(ctx context.Context, config *bean2.GitOpsC FileContent: "@devtron", ReleaseMessage: "readme", ChartRepoName: config.GitRepoName, + TargetRevision: config.TargetRevision, UserName: config.Username, UserEmailId: config.UserEmailId, } @@ -215,7 +216,10 @@ func (impl GitHubClient) CommitValues(ctx context.Context, config *ChartConfig, globalUtil.TriggerGitOpsMetrics("CommitValues", "GitHubClient", start, err) }() - branch := "master" + branch := config.TargetRevision + if len(branch) == 0 { + branch = globalUtil.GetDefaultTargetRevision() + } path := filepath.Join(config.ChartLocation, config.FileName) newFile := false fc, _, _, err := impl.client.Repositories.GetContents(ctx, impl.org, config.ChartRepoName, path, &github.RepositoryContentGetOptions{Ref: branch}) @@ -313,7 +317,7 @@ func (impl GitHubClient) ensureProjectAvailabilityOnHttp(config *bean2.GitOpsCon return false, nil } -func (impl GitHubClient) ensureProjectAvailabilityOnSsh(projectName string, repoUrl string) (bool, error) { +func (impl GitHubClient) ensureProjectAvailabilityOnSsh(projectName string, repoUrl, targetRevision string) (bool, error) { var err error start := time.Now() defer func() { @@ -323,12 +327,11 @@ func (impl GitHubClient) ensureProjectAvailabilityOnSsh(projectName string, repo count := 0 for count < 3 { count = count + 1 - _, err := impl.gitOpsHelper.Clone(repoUrl, fmt.Sprintf("/ensure-clone/%s", projectName)) + _, err := impl.gitOpsHelper.Clone(repoUrl, fmt.Sprintf("/ensure-clone/%s", projectName), targetRevision) if err == nil { impl.logger.Infow("github ensureProjectAvailability clone passed", "try count", count, "repoUrl", repoUrl) return true, nil - } - if err != nil { + } else { impl.logger.Errorw("github ensureProjectAvailability clone failed", "try count", count, "err", err) } time.Sleep(10 * time.Second) diff --git a/pkg/deployment/gitOps/git/GitServiceGitlab.go b/pkg/deployment/gitOps/git/GitServiceGitlab.go index cf5374f6c2..bcabae1d5b 100644 --- a/pkg/deployment/gitOps/git/GitServiceGitlab.go +++ b/pkg/deployment/gitOps/git/GitServiceGitlab.go @@ -184,7 +184,7 @@ func (impl GitLabClient) CreateRepository(ctx context.Context, config *bean2.Git } isEmpty = false //As we have created readme, repo is no longer empty detailedErrorGitOpsConfigActions.SuccessfulStages = append(detailedErrorGitOpsConfigActions.SuccessfulStages, CreateReadmeStage) - validated, err = impl.ensureProjectAvailabilityOnSsh(config.GitRepoName, repoUrl) + validated, err = impl.ensureProjectAvailabilityOnSsh(config.GitRepoName, repoUrl, config.TargetRevision) if err != nil { impl.logger.Errorw("error in ensuring project availability ", "gitlab project", config.GitRepoName, "err", err) detailedErrorGitOpsConfigActions.StageErrorMap[CloneSshStage] = err @@ -264,7 +264,7 @@ func (impl GitLabClient) ensureProjectAvailability(projectName string) (bool, er return false, nil } -func (impl GitLabClient) ensureProjectAvailabilityOnSsh(projectName string, repoUrl string) (bool, error) { +func (impl GitLabClient) ensureProjectAvailabilityOnSsh(projectName string, repoUrl, targetRevision string) (bool, error) { var err error start := time.Now() defer func() { @@ -274,7 +274,7 @@ func (impl GitLabClient) ensureProjectAvailabilityOnSsh(projectName string, repo count := 0 for count < 3 { count = count + 1 - _, err := impl.gitOpsHelper.Clone(repoUrl, fmt.Sprintf("/ensure-clone/%s", projectName)) + _, err := impl.gitOpsHelper.Clone(repoUrl, fmt.Sprintf("/ensure-clone/%s", projectName), targetRevision) if err == nil { impl.logger.Infow("gitlab ensureProjectAvailability clone passed", "try count", count, "repoUrl", repoUrl) return true, nil @@ -319,13 +319,13 @@ func (impl GitLabClient) CreateReadme(ctx context.Context, config *bean2.GitOpsC fileAction := gitlab.FileCreate filePath := "README.md" fileContent := "devtron licence" - exists, _ := impl.checkIfFileExists(config.GitRepoName, "master", filePath) + exists, _ := impl.checkIfFileExists(config.GitRepoName, config.TargetRevision, filePath) if exists { fileAction = gitlab.FileUpdate } actions := &gitlab.CreateCommitOptions{ - Branch: gitlab.String("master"), - CommitMessage: gitlab.String("test commit"), + Branch: gitlab.Ptr(config.TargetRevision), + CommitMessage: gitlab.Ptr("test commit"), Actions: []*gitlab.CommitActionOptions{{Action: &fileAction, FilePath: &filePath, Content: &fileContent}}, AuthorEmail: &config.UserEmailId, AuthorName: &config.Username, @@ -351,7 +351,10 @@ func (impl GitLabClient) CommitValues(ctx context.Context, config *ChartConfig, util.TriggerGitOpsMetrics("CommitValues", "GitLabClient", start, err) }() - branch := "master" + branch := config.TargetRevision + if len(branch) == 0 { + branch = util.GetDefaultTargetRevision() + } path := filepath.Join(config.ChartLocation, config.FileName) exists, err := impl.checkIfFileExists(config.ChartRepoName, branch, path) var fileAction gitlab.FileActionValue diff --git a/pkg/deployment/gitOps/git/bean/bean.go b/pkg/deployment/gitOps/git/bean/bean.go index 6eb0bc3cfd..2eacaeb8d1 100644 --- a/pkg/deployment/gitOps/git/bean/bean.go +++ b/pkg/deployment/gitOps/git/bean/bean.go @@ -45,6 +45,7 @@ type PushChartToGitRequestDTO struct { EnvName string ChartAppStoreName string RepoURL string + TargetRevision string TempChartRefDir string UserId int32 } diff --git a/pkg/deployment/gitOps/git/commandManager/GitCliManager.go b/pkg/deployment/gitOps/git/commandManager/GitCliManager.go index b5b3a3a146..b7ca057492 100644 --- a/pkg/deployment/gitOps/git/commandManager/GitCliManager.go +++ b/pkg/deployment/gitOps/git/commandManager/GitCliManager.go @@ -37,7 +37,7 @@ func (impl *GitCliManagerImpl) AddRepo(ctx GitContext, rootDir string, remoteUrl return impl.gitCreateRemote(ctx, rootDir, remoteUrl) } -func (impl *GitCliManagerImpl) CommitAndPush(ctx GitContext, repoRoot, commitMsg, name, emailId string) (commitHash string, err error) { +func (impl *GitCliManagerImpl) CommitAndPush(ctx GitContext, repoRoot, targetRevision, commitMsg, name, emailId string) (commitHash string, err error) { start := time.Now() defer func() { util.TriggerGitOpsMetrics("CommitAndPushAllChanges", "GitService", start, err) @@ -61,12 +61,12 @@ func (impl *GitCliManagerImpl) CommitAndPush(ctx GitContext, repoRoot, commitMsg } impl.logger.Debugw("git hash", "repo", repoRoot, "hash", commit) - _, _, err = impl.push(ctx, repoRoot) + _, _, err = impl.push(ctx, repoRoot, targetRevision) return commit, err } -func (impl *GitCliManagerImpl) Pull(ctx GitContext, repoRoot string) (err error) { +func (impl *GitCliManagerImpl) Pull(ctx GitContext, targetRevision string, repoRoot string) (err error) { start := time.Now() defer func() { util.TriggerGitOpsMetrics("Pull", "GitService", start, err) @@ -76,7 +76,7 @@ func (impl *GitCliManagerImpl) Pull(ctx GitContext, repoRoot string) (err error) if err != nil { return err } - response, errMsg, err := impl.PullCli(ctx, repoRoot, "origin/master") + response, errMsg, err := impl.PullCli(ctx, repoRoot, targetRevision) if err != nil { impl.logger.Errorw("error in git pull from cli", "errMsg", errMsg, "err", err) } @@ -157,9 +157,9 @@ func (impl *GitCliManagerImpl) add(ctx GitContext, rootDir string) (response, er return output, errMsg, err } -func (impl *GitCliManagerImpl) push(ctx GitContext, rootDir string) (response, errMsg string, err error) { +func (impl *GitCliManagerImpl) push(ctx GitContext, rootDir, targetRevision string) (response, errMsg string, err error) { impl.logger.Debugw("git push", "location", rootDir) - cmd, cancel := impl.createCmdWithContext(ctx, "git", "-C", rootDir, "push", "origin", "master") + cmd, cancel := impl.createCmdWithContext(ctx, "git", "-C", rootDir, "push", "origin", targetRevision) defer cancel() tlsPathInfo, err := git_manager.CreateFilesForTlsData(git_manager.BuildTlsData(ctx.TLSKey, ctx.TLSCertificate, ctx.CACert, ctx.TLSVerificationEnabled), TLS_FOLDER) if err != nil { diff --git a/pkg/deployment/gitOps/git/commandManager/GitCommandManager.go b/pkg/deployment/gitOps/git/commandManager/GitCommandManager.go index 44bf551b1d..d62b315e4b 100644 --- a/pkg/deployment/gitOps/git/commandManager/GitCommandManager.go +++ b/pkg/deployment/gitOps/git/commandManager/GitCommandManager.go @@ -24,8 +24,8 @@ import ( type GitCommandManager interface { GitCommandManagerBase AddRepo(ctx GitContext, rootDir string, remoteUrl string, isBare bool) error - CommitAndPush(ctx GitContext, repoRoot, commitMsg, name, emailId string) (string, error) - Pull(ctx GitContext, repoRoot string) (err error) + CommitAndPush(ctx GitContext, repoRoot, targetRevision, commitMsg, name, emailId string) (string, error) + Pull(ctx GitContext, targetRevision string, repoRoot string) (err error) } func NewGitCommandManager(logger *zap.SugaredLogger) GitCommandManager { @@ -56,6 +56,3 @@ func ParseConfiguration() (*configuration, error) { } const GIT_ASK_PASS = "/git-ask-pass.sh" - -const Branch_Master = "master" -const ORIGIN_MASTER = "origin/master" diff --git a/pkg/deployment/gitOps/git/commandManager/GoGitSdkManager.go b/pkg/deployment/gitOps/git/commandManager/GoGitSdkManager.go index 3d70a73c31..139851354b 100644 --- a/pkg/deployment/gitOps/git/commandManager/GoGitSdkManager.go +++ b/pkg/deployment/gitOps/git/commandManager/GoGitSdkManager.go @@ -41,7 +41,7 @@ func (impl *GoGitSDKManagerImpl) AddRepo(ctx GitContext, rootDir string, remoteU return err } -func (impl GoGitSDKManagerImpl) Pull(ctx GitContext, repoRoot string) (err error) { +func (impl *GoGitSDKManagerImpl) Pull(ctx GitContext, targetRevision string, repoRoot string) (err error) { _, workTree, err := impl.getRepoAndWorktree(repoRoot) if err != nil { @@ -66,7 +66,7 @@ func (impl GoGitSDKManagerImpl) Pull(ctx GitContext, repoRoot string) (err error return err } -func (impl GoGitSDKManagerImpl) getRepoAndWorktree(repoRoot string) (*git.Repository, *git.Worktree, error) { +func (impl *GoGitSDKManagerImpl) getRepoAndWorktree(repoRoot string) (*git.Repository, *git.Worktree, error) { var err error start := time.Now() defer func() { @@ -80,7 +80,7 @@ func (impl GoGitSDKManagerImpl) getRepoAndWorktree(repoRoot string) (*git.Reposi return r, w, err } -func (impl GoGitSDKManagerImpl) CommitAndPush(ctx GitContext, repoRoot, commitMsg, name, emailId string) (string, error) { +func (impl *GoGitSDKManagerImpl) CommitAndPush(ctx GitContext, repoRoot, targetRevision, commitMsg, name, emailId string) (string, error) { repo, workTree, err := impl.getRepoAndWorktree(repoRoot) if err != nil { return "", err @@ -118,6 +118,7 @@ func (impl GoGitSDKManagerImpl) CommitAndPush(ctx GitContext, repoRoot, commitMs err = repo.PushContext(ctx, pushOptions) return commit.String(), err } + func (auth *BasicAuth) ToBasicAuth() *http.BasicAuth { return &http.BasicAuth{ Username: auth.Username, diff --git a/pkg/deployment/gitOps/git/models.go b/pkg/deployment/gitOps/git/models.go index 77c8229203..19b0b2ac88 100644 --- a/pkg/deployment/gitOps/git/models.go +++ b/pkg/deployment/gitOps/git/models.go @@ -35,6 +35,7 @@ type ChartConfig struct { FileContent string ReleaseMessage string ChartRepoName string + TargetRevision string UserName string UserEmailId string bitBucketBaseDir string // base directory is required for bitbucket to load the diff --git a/pkg/deployment/gitOps/validation/bean/bean.go b/pkg/deployment/gitOps/validation/bean/bean.go index c9c759cf95..af8baa2e22 100644 --- a/pkg/deployment/gitOps/validation/bean/bean.go +++ b/pkg/deployment/gitOps/validation/bean/bean.go @@ -18,8 +18,15 @@ package bean type ExtraValidationStageType int -type ValidateCustomGitRepoURLRequest struct { +type ValidateGitOpsRepoUrlRequest struct { + RequestedGitUrl string + DesiredGitUrl string + UseActiveGitOps bool +} + +type ValidateGitOpsRepoRequest struct { GitRepoURL string + TargetRevision string AppName string UserId int32 GitOpsProvider string diff --git a/pkg/deployment/gitOps/validation/gitOpsValidationService.go b/pkg/deployment/gitOps/validation/gitOpsValidationService.go index ea419c0a02..3f3bd92ae6 100644 --- a/pkg/deployment/gitOps/validation/gitOpsValidationService.go +++ b/pkg/deployment/gitOps/validation/gitOpsValidationService.go @@ -20,6 +20,7 @@ import ( "context" "fmt" apiBean "github.com/devtron-labs/devtron/api/bean/gitOps" + "github.com/devtron-labs/devtron/internal/constants" "github.com/devtron-labs/devtron/internal/util" "github.com/devtron-labs/devtron/pkg/appStore/installedApp/service/FullMode" chartService "github.com/devtron-labs/devtron/pkg/chart" @@ -27,10 +28,11 @@ import ( "github.com/devtron-labs/devtron/pkg/deployment/gitOps/config/bean" "github.com/devtron-labs/devtron/pkg/deployment/gitOps/git" gitOpsBean "github.com/devtron-labs/devtron/pkg/deployment/gitOps/validation/bean" - util2 "github.com/devtron-labs/devtron/util" + globalUtil "github.com/devtron-labs/devtron/util" "github.com/microsoft/azure-devops-go-api/azuredevops" "github.com/xanzy/go-gitlab" "go.uber.org/zap" + "net/http" "os" "strings" "time" @@ -40,11 +42,15 @@ type GitOpsValidationService interface { // GitOpsValidateDryRun performs the following validations: // "Get Repo URL", "Create Repo (if it doesn't exist)", "Create Readme", "Clone Http", "Clone Ssh", "Commit On Rest", "Push", "Delete Repo" // And returns: gitOps.DetailedErrorGitOpsConfigResponse - GitOpsValidateDryRun(config *apiBean.GitOpsConfigDto) apiBean.DetailedErrorGitOpsConfigResponse - // ValidateCustomGitRepoURL performs the following validations: + GitOpsValidateDryRun(isArgoModuleInstalled bool, config *apiBean.GitOpsConfigDto) apiBean.DetailedErrorGitOpsConfigResponse + // ValidateGitOpsRepoUrl performs the following validations: + // "Organisational URL Validation", "Unique GitOps Repo" + // And returns: SanitisedRepoUrl and error + ValidateGitOpsRepoUrl(request *gitOpsBean.ValidateGitOpsRepoUrlRequest) (string, error) + // ValidateCustomGitOpsConfig performs the following validations: // "Get Repo URL", "Create Repo (if it doesn't exist)", "Organisational URL Validation", "Unique GitOps Repo" // And returns: RepoUrl and isNew Repository url and error - ValidateCustomGitRepoURL(request gitOpsBean.ValidateCustomGitRepoURLRequest) (string, bool, error) + ValidateCustomGitOpsConfig(request gitOpsBean.ValidateGitOpsRepoRequest) (string, bool, error) } type GitOpsValidationServiceImpl struct { @@ -75,8 +81,8 @@ func NewGitOpsValidationServiceImpl(Logger *zap.SugaredLogger, } } -func (impl *GitOpsValidationServiceImpl) GitOpsValidateDryRun(config *apiBean.GitOpsConfigDto) apiBean.DetailedErrorGitOpsConfigResponse { - if config.AllowCustomRepository { +func (impl *GitOpsValidationServiceImpl) GitOpsValidateDryRun(isArgoModuleInstalled bool, config *apiBean.GitOpsConfigDto) apiBean.DetailedErrorGitOpsConfigResponse { + if config.AllowCustomRepository || !isArgoModuleInstalled { return apiBean.DetailedErrorGitOpsConfigResponse{ ValidationSkipped: true, } @@ -96,11 +102,12 @@ func (impl *GitOpsValidationServiceImpl) GitOpsValidateDryRun(config *apiBean.Gi detailedErrorGitOpsConfigResponse := impl.convertDetailedErrorToResponse(detailedErrorGitOpsConfigActions) return detailedErrorGitOpsConfigResponse } - appName := gitOpsBean.DryrunRepoName + util2.Generate(6) + appName := gitOpsBean.DryrunRepoName + globalUtil.Generate(6) //getting user name & emailId for commit author data userEmailId, userName := impl.gitOpsConfigReadService.GetUserEmailIdAndNameForGitOpsCommit(config.UserId) config.UserEmailId = userEmailId config.GitRepoName = appName + config.TargetRevision = globalUtil.GetDefaultTargetRevision() ctx := context.Background() repoUrl, _, _, detailedErrorCreateRepo := client.CreateRepository(ctx, config) @@ -126,7 +133,7 @@ func (impl *GitOpsValidationServiceImpl) GitOpsValidateDryRun(config *apiBean.Gi chartDir := fmt.Sprintf("%s-%s", appName, impl.chartTemplateService.GetDir()) clonedDir := gitService.GetCloneDirectory(chartDir) if _, err := os.Stat(clonedDir); os.IsNotExist(err) { - clonedDir, err = gitService.Clone(repoUrl, chartDir) + clonedDir, err = gitService.Clone(repoUrl, chartDir, config.TargetRevision) if err != nil { impl.logger.Errorw("error in cloning repo", "url", repoUrl, "err", err) detailedErrorGitOpsConfigActions.StageErrorMap[gitOpsBean.CloneStage] = err @@ -135,7 +142,7 @@ func (impl *GitOpsValidationServiceImpl) GitOpsValidateDryRun(config *apiBean.Gi } } - commit, err := gitService.CommitAndPushAllChanges(ctx, clonedDir, "first commit", userName, userEmailId) + commit, err := gitService.CommitAndPushAllChanges(ctx, clonedDir, config.TargetRevision, "first commit", userName, userEmailId) if err != nil { impl.logger.Errorw("error in commit and pushing git", "err", err) if commit == "" { @@ -162,16 +169,39 @@ func (impl *GitOpsValidationServiceImpl) GitOpsValidateDryRun(config *apiBean.Gi return detailedErrorGitOpsConfigResponse } -func (impl *GitOpsValidationServiceImpl) ValidateCustomGitRepoURL(request gitOpsBean.ValidateCustomGitRepoURLRequest) (string, bool, error) { +func (impl *GitOpsValidationServiceImpl) ValidateGitOpsRepoUrl(request *gitOpsBean.ValidateGitOpsRepoUrlRequest) (string, error) { + // Validate: Organisational URL starts + sanitiseGitRepoUrl, err := impl.validateForGitOpsOrg(request) + if err != nil { + impl.logger.Errorw("non-organisational custom gitops repo validation error", "err", err) + return sanitiseGitRepoUrl, err + } + // Validate: Organisational URL ends + + // Validate: Unique GitOps repository URL starts + isValid := impl.validateUniqueGitOpsRepo(sanitiseGitRepoUrl) + if !isValid { + impl.logger.Errorw("git repo url already exists", "repo url", request.RequestedGitUrl) + errMsg := fmt.Sprintf("invalid git repository! '%s' is already in use by another application! Use a different repository", request.RequestedGitUrl) + return sanitiseGitRepoUrl, util.NewApiError(http.StatusBadRequest, errMsg, errMsg). + WithCode(constants.InvalidGitOpsRepoUrlForPipeline) + } + // Validate: Unique GitOps repository URL ends + return sanitiseGitRepoUrl, nil +} + +func (impl *GitOpsValidationServiceImpl) ValidateCustomGitOpsConfig(request gitOpsBean.ValidateGitOpsRepoRequest) (string, bool, error) { gitOpsRepoName := "" if request.GitRepoURL == apiBean.GIT_REPO_DEFAULT || len(request.GitRepoURL) == 0 { gitOpsRepoName = impl.gitOpsConfigReadService.GetGitOpsRepoName(request.AppName) } else { gitOpsRepoName = impl.gitOpsConfigReadService.GetGitOpsRepoNameFromUrl(request.GitRepoURL) } - + if len(request.TargetRevision) == 0 { + request.TargetRevision = globalUtil.GetDefaultTargetRevision() + } // CreateGitRepositoryForDevtronApp will try to create repository if not present, and returns a sanitized repo url, use this repo url to maintain uniformity - chartGitAttribute, err := impl.gitOperationService.CreateGitRepositoryForDevtronApp(context.Background(), gitOpsRepoName, request.UserId) + chartGitAttribute, err := impl.gitOperationService.CreateGitRepositoryForDevtronApp(context.Background(), gitOpsRepoName, request.TargetRevision, request.UserId) if err != nil { impl.logger.Errorw("error in validating custom gitops repo", "err", err) return "", false, impl.extractErrorMessageByProvider(err, request.GitOpsProvider) @@ -180,35 +210,78 @@ func (impl *GitOpsValidationServiceImpl) ValidateCustomGitRepoURL(request gitOps if request.GitRepoURL != apiBean.GIT_REPO_DEFAULT && len(request.GitRepoURL) != 0 { // For custom git repo; we expect the chart is not present hence setting isNew flag to be true. chartGitAttribute.IsNewRepo = true - - // Validate: Organisational URL starts - activeGitOpsConfig, err := impl.gitOpsConfigReadService.GetGitOpsConfigActive() - if err != nil { - impl.logger.Errorw("error in fetching active gitOps config", "err", err) - return "", false, err + validateGitRepoRequest := &gitOpsBean.ValidateGitOpsRepoUrlRequest{ + RequestedGitUrl: request.GitRepoURL, + DesiredGitUrl: chartGitAttribute.RepoUrl, + UseActiveGitOps: true, } - repoUrl := git.SanitiseCustomGitRepoURL(*activeGitOpsConfig, request.GitRepoURL) - orgRepoUrl := strings.TrimSuffix(chartGitAttribute.RepoUrl, ".git") - if !strings.Contains(strings.ToLower(repoUrl), strings.ToLower(orgRepoUrl)) { - impl.logger.Errorw("non-organisational custom gitops repo", "expected repo", chartGitAttribute.RepoUrl, "user given repo", repoUrl) - nonOrgErr := impl.getValidationErrorForNonOrganisationalURL(*activeGitOpsConfig) - if nonOrgErr != nil { - impl.logger.Errorw("non-organisational custom gitops repo validation error", "err", err) - return "", false, nonOrgErr - } + _, validationErr := impl.ValidateGitOpsRepoUrl(validateGitRepoRequest) + if validationErr != nil { + impl.logger.Errorw("error in validating gitops repo url", "err", validationErr) + return "", false, validationErr } - // Validate: Organisational URL ends } + return chartGitAttribute.RepoUrl, chartGitAttribute.IsNewRepo, nil +} - // Validate: Unique GitOps repository URL starts - isValid := impl.validateUniqueGitOpsRepo(chartGitAttribute.RepoUrl) - if !isValid { - impl.logger.Errorw("git repo url already exists", "repo url", chartGitAttribute.RepoUrl) - return "", false, fmt.Errorf("invalid git repository! '%s' is already in use by another application! Use a different repository", chartGitAttribute.RepoUrl) +func (impl *GitOpsValidationServiceImpl) getDesiredGitRepoUrl(request *gitOpsBean.ValidateGitOpsRepoUrlRequest, gitOpsConfig *apiBean.GitOpsConfigDto) (string, error) { + if len(request.DesiredGitUrl) != 0 { + return request.DesiredGitUrl, nil } - // Validate: Unique GitOps repository URL ends + client, _, clientErr := impl.gitFactory.NewClientForValidation(gitOpsConfig) + if clientErr != nil { + impl.logger.Errorw("error in creating new client for validation", "clientErr", clientErr, "request", request) + return "", clientErr + } + gitOpsConfig.GitRepoName = impl.gitOpsConfigReadService.GetGitOpsRepoNameFromUrl(request.RequestedGitUrl) + desiredRepoUrl, _, err := client.GetRepoUrl(gitOpsConfig) + if err != nil { + impl.logger.Errorw("error in getting repo url", "err", err, "request", request) + return "", err + } + return desiredRepoUrl, nil +} - return chartGitAttribute.RepoUrl, chartGitAttribute.IsNewRepo, nil +func (impl *GitOpsValidationServiceImpl) getMatchedGitopsConfig(request *gitOpsBean.ValidateGitOpsRepoUrlRequest) (*apiBean.GitOpsConfigDto, error) { + if request.UseActiveGitOps { + matchedGitopsConfig, err := impl.gitOpsConfigReadService.GetGitOpsConfigActive() + if err != nil { + impl.logger.Errorw("error in fetching active gitOps provider", "err", err) + return nil, err + } + return matchedGitopsConfig, err + } + matchedGitopsConfig, err := impl.gitOpsConfigReadService.GetGitOpsProviderByRepoURL(request.RequestedGitUrl) + if err != nil { + impl.logger.Errorw("error in fetching gitOps provider by repo url", "err", err) + return nil, err + } + return matchedGitopsConfig, err +} + +func (impl *GitOpsValidationServiceImpl) validateForGitOpsOrg(request *gitOpsBean.ValidateGitOpsRepoUrlRequest) (string, error) { + matchedGitopsConfig, err := impl.getMatchedGitopsConfig(request) + if err != nil { + impl.logger.Errorw("error in getting matched gitops config", "err", err, "request", request) + errMsg := fmt.Sprintf("error in getting matched gitops config: %s", err.Error()) + return "", util.NewApiError(http.StatusBadRequest, errMsg, errMsg). + WithCode(constants.InvalidGitOpsRepoUrlForPipeline) + } + desiredRepoUrl, gitErr := impl.getDesiredGitRepoUrl(request, matchedGitopsConfig) + if gitErr != nil { + impl.logger.Errorw("error in getting desired git repo url", "err", gitErr, "request", request) + errMsg := fmt.Sprintf("error in getting desired git repo url: %s", gitErr.Error()) + return "", util.NewApiError(http.StatusBadRequest, errMsg, errMsg). + WithCode(constants.InvalidGitOpsRepoUrlForPipeline) + } + sanitiseGitRepoUrl := git.SanitiseCustomGitRepoURL(matchedGitopsConfig, request.RequestedGitUrl) + orgRepoUrl := strings.TrimSuffix(desiredRepoUrl, ".git") + if !strings.Contains(strings.ToLower(sanitiseGitRepoUrl), strings.ToLower(orgRepoUrl)) { + // If the repo is non-organizational, then return error + impl.logger.Debugw("non-organisational custom gitops repo", "expected repo", desiredRepoUrl, "user given repo", sanitiseGitRepoUrl, "request", request) + return "", impl.getValidationErrorForNonOrganisationalURL(matchedGitopsConfig) + } + return desiredRepoUrl, nil } func (impl *GitOpsValidationServiceImpl) extractErrorMessageByProvider(err error, provider string) error { @@ -248,7 +321,7 @@ func (impl *GitOpsValidationServiceImpl) convertDetailedErrorToResponse(detailed return detailedErrorResponse } -func (impl *GitOpsValidationServiceImpl) getValidationErrorForNonOrganisationalURL(activeGitOpsConfig apiBean.GitOpsConfigDto) error { +func (impl *GitOpsValidationServiceImpl) getValidationErrorForNonOrganisationalURL(activeGitOpsConfig *apiBean.GitOpsConfigDto) error { var errorMessageKey, errorMessage string switch strings.ToUpper(activeGitOpsConfig.Provider) { case git.GITHUB_PROVIDER: @@ -267,7 +340,9 @@ func (impl *GitOpsValidationServiceImpl) getValidationErrorForNonOrganisationalU errorMessageKey = "The repository must belong to Azure DevOps Project" errorMessage = fmt.Sprintf("%s as configured in global configurations > GitOps", activeGitOpsConfig.AzureProjectName) } - return fmt.Errorf("%s: %s", errorMessageKey, errorMessage) + apiErrorMsg := fmt.Sprintf("%s: %s", errorMessageKey, errorMessage) + return util.NewApiError(http.StatusBadRequest, apiErrorMsg, apiErrorMsg). + WithCode(constants.InvalidGitOpsRepoUrlForPipeline) } func (impl *GitOpsValidationServiceImpl) validateUniqueGitOpsRepo(repoUrl string) (isValid bool) { diff --git a/pkg/deployment/manifest/ManifestCreationService.go b/pkg/deployment/manifest/ManifestCreationService.go index ad8986af87..533bf7ed0c 100644 --- a/pkg/deployment/manifest/ManifestCreationService.go +++ b/pkg/deployment/manifest/ManifestCreationService.go @@ -36,6 +36,7 @@ import ( chartRepoRepository "github.com/devtron-labs/devtron/pkg/chartRepo/repository" repository2 "github.com/devtron-labs/devtron/pkg/cluster/environment/repository" "github.com/devtron-labs/devtron/pkg/deployment/common" + deploymentBean "github.com/devtron-labs/devtron/pkg/deployment/common/bean" bean3 "github.com/devtron-labs/devtron/pkg/deployment/manifest/bean" "github.com/devtron-labs/devtron/pkg/deployment/manifest/deployedAppMetrics" "github.com/devtron-labs/devtron/pkg/deployment/manifest/deploymentTemplate" @@ -67,11 +68,10 @@ import ( ) type ManifestCreationService interface { - BuildManifestForTrigger(overrideRequest *bean.ValuesOverrideRequest, triggeredAt time.Time, - ctx context.Context) (valuesOverrideResponse *app.ValuesOverrideResponse, builtChartPath string, err error) + BuildManifestForTrigger(ctx context.Context, overrideRequest *bean.ValuesOverrideRequest, envDeploymentConfig *deploymentBean.DeploymentConfig, triggeredAt time.Time) (valuesOverrideResponse *app.ValuesOverrideResponse, builtChartPath string, err error) //TODO: remove below method - GetValuesOverrideForTrigger(overrideRequest *bean.ValuesOverrideRequest, triggeredAt time.Time, ctx context.Context) (*app.ValuesOverrideResponse, error) + GetValuesOverrideForTrigger(ctx context.Context, overrideRequest *bean.ValuesOverrideRequest, envDeploymentConfig *deploymentBean.DeploymentConfig, triggeredAt time.Time) (*app.ValuesOverrideResponse, error) } type ManifestCreationServiceImpl struct { @@ -155,15 +155,15 @@ func NewManifestCreationServiceImpl(logger *zap.SugaredLogger, } } -func (impl *ManifestCreationServiceImpl) BuildManifestForTrigger(overrideRequest *bean.ValuesOverrideRequest, triggeredAt time.Time, - ctx context.Context) (valuesOverrideResponse *app.ValuesOverrideResponse, builtChartPath string, err error) { - valuesOverrideResponse = &app.ValuesOverrideResponse{} - valuesOverrideResponse, err = impl.GetValuesOverrideForTrigger(overrideRequest, triggeredAt, ctx) +func (impl *ManifestCreationServiceImpl) BuildManifestForTrigger(ctx context.Context, overrideRequest *bean.ValuesOverrideRequest, + envDeploymentConfig *deploymentBean.DeploymentConfig, triggeredAt time.Time) (valuesOverrideResponse *app.ValuesOverrideResponse, builtChartPath string, err error) { + valuesOverrideResponse, err = impl.GetValuesOverrideForTrigger(ctx, overrideRequest, envDeploymentConfig, triggeredAt) if err != nil { impl.logger.Errorw("error in fetching values for trigger", "err", err) return valuesOverrideResponse, "", err } - builtChartPath, err = impl.deploymentTemplateService.BuildChartAndGetPath(overrideRequest.AppName, valuesOverrideResponse.EnvOverride, ctx) + valuesOverrideResponse.DeploymentConfig = envDeploymentConfig + builtChartPath, err = impl.deploymentTemplateService.BuildChartAndGetPath(overrideRequest.AppName, valuesOverrideResponse.EnvOverride, envDeploymentConfig, ctx) if err != nil { impl.logger.Errorw("error in parsing reference chart", "err", err) return valuesOverrideResponse, "", err @@ -171,7 +171,8 @@ func (impl *ManifestCreationServiceImpl) BuildManifestForTrigger(overrideRequest return valuesOverrideResponse, builtChartPath, err } -func (impl *ManifestCreationServiceImpl) GetValuesOverrideForTrigger(overrideRequest *bean.ValuesOverrideRequest, triggeredAt time.Time, ctx context.Context) (*app.ValuesOverrideResponse, error) { +func (impl *ManifestCreationServiceImpl) GetValuesOverrideForTrigger(ctx context.Context, overrideRequest *bean.ValuesOverrideRequest, + envDeploymentConfig *deploymentBean.DeploymentConfig, triggeredAt time.Time) (*app.ValuesOverrideResponse, error) { newCtx, span := otel.Tracer("orchestrator").Start(ctx, "ManifestCreationServiceImpl.GetValuesOverrideForTrigger") defer span.End() helper.ResolveDeploymentTypeAndUpdate(overrideRequest) @@ -284,7 +285,7 @@ func (impl *ManifestCreationServiceImpl) GetValuesOverrideForTrigger(overrideReq // error is not returned as it's not blocking for deployment process // blocking deployments based on this use case can vary for user to user } - mergedValues, err = impl.autoscalingCheckBeforeTrigger(newCtx, appName, envOverride.Namespace, mergedValues, overrideRequest) + mergedValues, err = impl.autoscalingCheckBeforeTrigger(newCtx, appName, envOverride.Namespace, mergedValues, overrideRequest, envDeploymentConfig) if err != nil { impl.logger.Errorw("error in autoscaling check before trigger", "pipelineId", overrideRequest.PipelineId, "err", err) return valuesOverrideResponse, err @@ -1014,12 +1015,12 @@ func (impl *ManifestCreationServiceImpl) updatedExternalCmCsHashForTrigger(ctx c return merged, nil } -func (impl *ManifestCreationServiceImpl) autoscalingCheckBeforeTrigger(ctx context.Context, appName string, namespace string, merged []byte, overrideRequest *bean.ValuesOverrideRequest) ([]byte, error) { +func (impl *ManifestCreationServiceImpl) autoscalingCheckBeforeTrigger(ctx context.Context, appName string, namespace string, merged []byte, + overrideRequest *bean.ValuesOverrideRequest, envDeploymentConfig *deploymentBean.DeploymentConfig) ([]byte, error) { newCtx, span := otel.Tracer("orchestrator").Start(ctx, "ManifestCreationServiceImpl.autoscalingCheckBeforeTrigger") defer span.End() pipelineId := overrideRequest.PipelineId - var appDeploymentType = overrideRequest.DeploymentAppType - var clusterId = overrideRequest.ClusterId + clusterId := overrideRequest.ClusterId deploymentType := overrideRequest.DeploymentType templateMap := make(map[string]interface{}) err := json.Unmarshal(merged, &templateMap) @@ -1032,7 +1033,7 @@ func (impl *ManifestCreationServiceImpl) autoscalingCheckBeforeTrigger(ctx conte impl.logger.Debugw("autoscalingCheckBeforeTrigger", "pipelineId", pipelineId, "hpaResourceRequest", hpaResourceRequest) if hpaResourceRequest.IsEnable { var resourceManifest map[string]interface{} - if util.IsAcdApp(appDeploymentType) { + if envDeploymentConfig.IsArgoCdClientSupported() { resourceManifest, err = impl.getArgoCdHPAResourceManifest(newCtx, appName, namespace, hpaResourceRequest) if err != nil { return merged, err diff --git a/pkg/deployment/manifest/deploymentTemplate/DeploymentTemplateService.go b/pkg/deployment/manifest/deploymentTemplate/DeploymentTemplateService.go index 16bd9d3912..211781d55c 100644 --- a/pkg/deployment/manifest/deploymentTemplate/DeploymentTemplateService.go +++ b/pkg/deployment/manifest/deploymentTemplate/DeploymentTemplateService.go @@ -20,7 +20,10 @@ import ( "context" "fmt" "github.com/devtron-labs/devtron/internal/util" + bean2 "github.com/devtron-labs/devtron/pkg/auth/user/bean" chartRepoRepository "github.com/devtron-labs/devtron/pkg/chartRepo/repository" + "github.com/devtron-labs/devtron/pkg/deployment/common" + bean9 "github.com/devtron-labs/devtron/pkg/deployment/common/bean" "github.com/devtron-labs/devtron/pkg/deployment/manifest/deploymentTemplate/bean" "github.com/devtron-labs/devtron/pkg/deployment/manifest/deploymentTemplate/chartRef" bean4 "github.com/devtron-labs/devtron/pkg/deployment/manifest/deploymentTemplate/chartRef/bean" @@ -34,7 +37,7 @@ import ( ) type DeploymentTemplateService interface { - BuildChartAndGetPath(appName string, envOverride *bean.EnvConfigOverride, ctx context.Context) (string, error) + BuildChartAndGetPath(appName string, envOverride *bean.EnvConfigOverride, envDeploymentConfig *bean9.DeploymentConfig, ctx context.Context) (string, error) } type DeploymentTemplateServiceImpl struct { @@ -42,25 +45,30 @@ type DeploymentTemplateServiceImpl struct { chartRefService chartRef.ChartRefService chartTemplateService util.ChartTemplateService - chartRepository chartRepoRepository.ChartRepository + chartRepository chartRepoRepository.ChartRepository + deploymentConfigService common.DeploymentConfigService } func NewDeploymentTemplateServiceImpl(logger *zap.SugaredLogger, chartRefService chartRef.ChartRefService, chartTemplateService util.ChartTemplateService, - chartRepository chartRepoRepository.ChartRepository) *DeploymentTemplateServiceImpl { + chartRepository chartRepoRepository.ChartRepository, + deploymentConfigService common.DeploymentConfigService) *DeploymentTemplateServiceImpl { return &DeploymentTemplateServiceImpl{ - logger: logger, - chartRefService: chartRefService, - chartTemplateService: chartTemplateService, - chartRepository: chartRepository, + logger: logger, + chartRefService: chartRefService, + chartTemplateService: chartTemplateService, + chartRepository: chartRepository, + deploymentConfigService: deploymentConfigService, } } -func (impl *DeploymentTemplateServiceImpl) BuildChartAndGetPath(appName string, envOverride *bean.EnvConfigOverride, ctx context.Context) (string, error) { - if !strings.HasSuffix(envOverride.Chart.ChartLocation, fmt.Sprintf("%s%s", "/", envOverride.Chart.ChartVersion)) { +func (impl *DeploymentTemplateServiceImpl) BuildChartAndGetPath(appName string, envOverride *bean.EnvConfigOverride, envDeploymentConfig *bean9.DeploymentConfig, ctx context.Context) (string, error) { + if !envDeploymentConfig.IsLinkedRelease() && + (!strings.HasSuffix(envOverride.Chart.ChartLocation, fmt.Sprintf("%s%s", "/", envOverride.Chart.ChartVersion)) || + !strings.HasSuffix(envDeploymentConfig.GetChartLocation(), fmt.Sprintf("%s%s", "/", envOverride.Chart.ChartVersion))) { _, span := otel.Tracer("orchestrator").Start(ctx, "autoHealChartLocationInChart") - err := impl.autoHealChartLocationInChart(ctx, envOverride) + err := impl.autoHealChartLocationInChart(ctx, envOverride, envDeploymentConfig) span.End() if err != nil { return "", err @@ -98,7 +106,7 @@ func (impl *DeploymentTemplateServiceImpl) BuildChartAndGetPath(appName string, return tempReferenceTemplateDir, nil } -func (impl *DeploymentTemplateServiceImpl) autoHealChartLocationInChart(ctx context.Context, envOverride *bean.EnvConfigOverride) error { +func (impl *DeploymentTemplateServiceImpl) autoHealChartLocationInChart(ctx context.Context, envOverride *bean.EnvConfigOverride, envDeploymentConfig *bean9.DeploymentConfig) error { chartId := envOverride.Chart.Id impl.logger.Infow("auto-healing: Chart location in chart not correct. modifying ", "chartId", chartId, "current chartLocation", envOverride.Chart.ChartLocation, "current chartVersion", envOverride.Chart.ChartVersion) @@ -138,5 +146,14 @@ func (impl *DeploymentTemplateServiceImpl) autoHealChartLocationInChart(ctx cont // update newChartLocation in model envOverride.Chart.ChartLocation = newChartLocation + + //TODO: Ayush review + envDeploymentConfig.SetChartLocation(newChartLocation) + envDeploymentConfig, err = impl.deploymentConfigService.CreateOrUpdateConfig(nil, envDeploymentConfig, bean2.SystemUserId) + if err != nil { + impl.logger.Errorw("error occurred while creating or updating config", "appId", chart.AppId, "err", err) + return err + } + return nil } diff --git a/pkg/deployment/manifest/deploymentTemplate/bean/chartEnvConfigOverride.go b/pkg/deployment/manifest/deploymentTemplate/bean/chartEnvConfigOverride.go index a1debfefc7..20ff31154b 100644 --- a/pkg/deployment/manifest/deploymentTemplate/bean/chartEnvConfigOverride.go +++ b/pkg/deployment/manifest/deploymentTemplate/bean/chartEnvConfigOverride.go @@ -36,3 +36,7 @@ type EnvConfigOverride struct { //ResolvedEnvOverrideValuesForCS string VariableSnapshotForCS map[string]string } + +func (e *EnvConfigOverride) IsOverridden() bool { + return e != nil && e.Id != 0 && e.IsOverride +} diff --git a/pkg/deployment/manifest/deploymentTemplate/chartRef/ChartRefService.go b/pkg/deployment/manifest/deploymentTemplate/chartRef/ChartRefService.go index 8dde120246..7ac9d8915e 100644 --- a/pkg/deployment/manifest/deploymentTemplate/chartRef/ChartRefService.go +++ b/pkg/deployment/manifest/deploymentTemplate/chartRef/ChartRefService.go @@ -27,6 +27,7 @@ import ( chartRepoRepository "github.com/devtron-labs/devtron/pkg/chartRepo/repository" "github.com/devtron-labs/devtron/pkg/deployment/manifest/deploymentTemplate/chartRef/adapter" "github.com/devtron-labs/devtron/pkg/deployment/manifest/deploymentTemplate/chartRef/bean" + "github.com/devtron-labs/devtron/pkg/deployment/manifest/deploymentTemplate/chartRef/read" util2 "github.com/devtron-labs/devtron/util" dirCopy "github.com/otiai10/copy" "go.uber.org/zap" @@ -80,6 +81,7 @@ type ChartRefFileOpService interface { type ChartRefServiceImpl struct { logger *zap.SugaredLogger chartRefRepository chartRepoRepository.ChartRefRepository + chartRefReadService read.ChartRefReadService chartTemplateService util.ChartTemplateService mergeUtil util.MergeUtil chartRepository chartRepoRepository.ChartRepository @@ -87,6 +89,7 @@ type ChartRefServiceImpl struct { func NewChartRefServiceImpl(logger *zap.SugaredLogger, chartRefRepository chartRepoRepository.ChartRefRepository, + chartRefReadService read.ChartRefReadService, chartTemplateService util.ChartTemplateService, chartRepository chartRepoRepository.ChartRepository, mergeUtil util.MergeUtil) *ChartRefServiceImpl { @@ -96,6 +99,7 @@ func NewChartRefServiceImpl(logger *zap.SugaredLogger, return &ChartRefServiceImpl{ logger: logger, chartRefRepository: chartRefRepository, + chartRefReadService: chartRefReadService, chartTemplateService: chartTemplateService, mergeUtil: mergeUtil, chartRepository: chartRepository, @@ -160,21 +164,11 @@ func (impl *ChartRefServiceImpl) ChartRefIdsCompatible(oldChartRefId int, newCha } func (impl *ChartRefServiceImpl) FindById(chartRefId int) (*bean.ChartRefDto, error) { - chartRef, err := impl.chartRefRepository.FindById(chartRefId) - if err != nil { - impl.logger.Errorw("error in getting chartRef by id", "err", err, "chartRefId", chartRefId) - return nil, err - } - return adapter.ConvertChartRefDbObjToBean(chartRef), nil + return impl.chartRefReadService.FindById(chartRefId) } func (impl *ChartRefServiceImpl) FindByVersionAndName(version, name string) (*bean.ChartRefDto, error) { - chartRef, err := impl.chartRefRepository.FindByVersionAndName(name, version) - if err != nil { - impl.logger.Errorw("error in getting chartRef by version and name", "err", err, "version", version, "name", name) - return nil, err - } - return adapter.ConvertChartRefDbObjToBean(chartRef), nil + return impl.chartRefReadService.FindByVersionAndName(version, name) } func (impl *ChartRefServiceImpl) FetchInfoOfChartConfiguredInApp(appId int) (*bean.ChartRefDto, error) { diff --git a/pkg/deployment/manifest/deploymentTemplate/chartRef/bean/bean.go b/pkg/deployment/manifest/deploymentTemplate/chartRef/bean/bean.go index 562dd3c204..a6c7866600 100644 --- a/pkg/deployment/manifest/deploymentTemplate/chartRef/bean/bean.go +++ b/pkg/deployment/manifest/deploymentTemplate/chartRef/bean/bean.go @@ -16,13 +16,31 @@ package bean -import "github.com/devtron-labs/devtron/pkg/sql" +import ( + "github.com/devtron-labs/devtron/pkg/sql" +) const ( - DeploymentChartType = "Deployment" - RolloutChartType = "Rollout Deployment" - ReferenceChart = "reference-chart" - RefChartDirPath = "scripts/devtron-reference-helm-charts" + DeploymentChartType = "Deployment" + DeploymentChartNamePrefix = "deployment-chart_" + + WorkflowChartType = "workflow-chart" + WorkflowChartNamePrefix = "workflow-chart_" + + KnativeChartType = "Knative" + KnativeChartNamePrefix = "knative-chart_" + + StatefulSetChartType = "StatefulSet" + StatefulSetChartNamePrefix = "statefulset-chart_" + + JobAndCronJobType = "Job & CronJob" + JobAndCronJobNamePrefix = "cronjob-chart_" + + RolloutChartType = "Rollout Deployment" + RolloutChartNamePrefix = "reference-chart_" + ReferenceChart = "reference-chart" + RefChartDirPath = "scripts/devtron-reference-helm-charts" + ChartAlreadyExistsInternalError = "Chart exists already, try uploading another chart" ChartNameReservedInternalError = "Change the name of the chart and try uploading again" ) diff --git a/pkg/deployment/manifest/deploymentTemplate/chartRef/read/chartRefReadService.go b/pkg/deployment/manifest/deploymentTemplate/chartRef/read/chartRefReadService.go new file mode 100644 index 0000000000..4abf4e114a --- /dev/null +++ b/pkg/deployment/manifest/deploymentTemplate/chartRef/read/chartRefReadService.go @@ -0,0 +1,60 @@ +/* + * Copyright (c) 2024. Devtron Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package read + +import ( + chartRepoRepository "github.com/devtron-labs/devtron/pkg/chartRepo/repository" + "github.com/devtron-labs/devtron/pkg/deployment/manifest/deploymentTemplate/chartRef/adapter" + "github.com/devtron-labs/devtron/pkg/deployment/manifest/deploymentTemplate/chartRef/bean" + "go.uber.org/zap" +) + +type ChartRefReadService interface { + FindById(chartRefId int) (*bean.ChartRefDto, error) + FindByVersionAndName(version, name string) (*bean.ChartRefDto, error) +} + +type ChartRefReadServiceImpl struct { + logger *zap.SugaredLogger + chartRefRepository chartRepoRepository.ChartRefRepository +} + +func NewChartRefReadServiceImpl(logger *zap.SugaredLogger, + chartRefRepository chartRepoRepository.ChartRefRepository) *ChartRefReadServiceImpl { + return &ChartRefReadServiceImpl{ + logger: logger, + chartRefRepository: chartRefRepository, + } +} + +func (impl *ChartRefReadServiceImpl) FindById(chartRefId int) (*bean.ChartRefDto, error) { + chartRef, err := impl.chartRefRepository.FindById(chartRefId) + if err != nil { + impl.logger.Errorw("error in getting chartRef by id", "err", err, "chartRefId", chartRefId) + return nil, err + } + return adapter.ConvertChartRefDbObjToBean(chartRef), nil +} + +func (impl *ChartRefReadServiceImpl) FindByVersionAndName(version, name string) (*bean.ChartRefDto, error) { + chartRef, err := impl.chartRefRepository.FindByVersionAndName(name, version) + if err != nil { + impl.logger.Errorw("error in getting chartRef by version and name", "err", err, "version", version, "name", name) + return nil, err + } + return adapter.ConvertChartRefDbObjToBean(chartRef), nil +} diff --git a/pkg/deployment/manifest/deploymentTemplate/read/chartEnvConfigOverride.go b/pkg/deployment/manifest/deploymentTemplate/read/chartEnvConfigOverride.go index 92c7574ebd..4b105d14b3 100644 --- a/pkg/deployment/manifest/deploymentTemplate/read/chartEnvConfigOverride.go +++ b/pkg/deployment/manifest/deploymentTemplate/read/chartEnvConfigOverride.go @@ -18,6 +18,7 @@ type EnvConfigOverrideService interface { FindChartByAppIdAndEnvIdAndChartRefId(appId, targetEnvironmentId int, chartRefId int) (*bean.EnvConfigOverride, error) FindChartForAppByAppIdAndEnvId(appId, targetEnvironmentId int) (*bean.EnvConfigOverride, error) GetByAppIdEnvIdAndChartRefId(appId, envId int, chartRefId int) (*bean.EnvConfigOverride, error) + GetAllOverridesForApp(appId int) ([]*bean.EnvConfigOverride, error) } type EnvConfigOverrideReadServiceImpl struct { @@ -130,3 +131,16 @@ func (impl EnvConfigOverrideReadServiceImpl) GetByAppIdEnvIdAndChartRefId(appId, } return adapter.EnvOverrideDBToDTO(overrideDBObj), nil } + +func (impl EnvConfigOverrideReadServiceImpl) GetAllOverridesForApp(appId int) ([]*bean.EnvConfigOverride, error) { + overrideDBObjs, err := impl.envConfigOverrideRepository.GetAllOverridesForApp(appId) + if err != nil { + impl.logger.Errorw("error in getting chart env config override", "appId", appId, "envId", "err", err) + return nil, err + } + envConfigOverrides := make([]*bean.EnvConfigOverride, 0, len(overrideDBObjs)) + for _, dbObj := range overrideDBObjs { + envConfigOverrides = append(envConfigOverrides, adapter.EnvOverrideDBToDTO(&dbObj)) + } + return envConfigOverrides, nil +} diff --git a/pkg/deployment/manifest/deploymentTemplate/wire_deploymentTemplate.go b/pkg/deployment/manifest/deploymentTemplate/wire_deploymentTemplate.go index 33b6db0a34..f8df37c0c0 100644 --- a/pkg/deployment/manifest/deploymentTemplate/wire_deploymentTemplate.go +++ b/pkg/deployment/manifest/deploymentTemplate/wire_deploymentTemplate.go @@ -18,6 +18,7 @@ package deploymentTemplate import ( "github.com/devtron-labs/devtron/pkg/deployment/manifest/deploymentTemplate/chartRef" + chartRefRead "github.com/devtron-labs/devtron/pkg/deployment/manifest/deploymentTemplate/chartRef/read" "github.com/devtron-labs/devtron/pkg/deployment/manifest/deploymentTemplate/read" "github.com/google/wire" ) @@ -31,6 +32,10 @@ var DeploymentTemplateWireSet = wire.NewSet( NewDeploymentTemplateValidationServiceImpl, wire.Bind(new(DeploymentTemplateValidationService), new(*DeploymentTemplateValidationServiceImpl)), + + chartRefRead.NewChartRefReadServiceImpl, + wire.Bind(new(chartRefRead.ChartRefReadService), new(*chartRefRead.ChartRefReadServiceImpl)), + chartRef.NewChartRefServiceImpl, wire.Bind(new(chartRef.ChartRefService), new(*chartRef.ChartRefServiceImpl)), read.NewDeploymentTemplateHistoryReadServiceImpl, diff --git a/pkg/deployment/manifest/publish/ManifestPushService.go b/pkg/deployment/manifest/publish/ManifestPushService.go index a1a99df0e4..6bdb30687a 100644 --- a/pkg/deployment/manifest/publish/ManifestPushService.go +++ b/pkg/deployment/manifest/publish/ManifestPushService.go @@ -34,6 +34,7 @@ import ( "github.com/devtron-labs/devtron/pkg/deployment/gitOps/git" "github.com/devtron-labs/devtron/pkg/deployment/manifest/deploymentTemplate/chartRef" "github.com/devtron-labs/devtron/pkg/sql" + globalUtil "github.com/devtron-labs/devtron/util" "go.opentelemetry.io/otel" "go.uber.org/zap" "time" @@ -96,13 +97,17 @@ func (impl *GitOpsManifestPushServiceImpl) createRepoForGitOperation(manifestPus return manifestPushTemplate.RepoUrl, nil } gitOpsRepoName := impl.gitOpsConfigReadService.GetGitOpsRepoName(manifestPushTemplate.AppName) - chartGitAttr, err := impl.gitOperationService.CreateGitRepositoryForDevtronApp(ctx, gitOpsRepoName, manifestPushTemplate.UserId) + targetRevision := globalUtil.GetDefaultTargetRevision() + if len(manifestPushTemplate.TargetRevision) != 0 { + targetRevision = manifestPushTemplate.TargetRevision + } + chartGitAttr, err := impl.gitOperationService.CreateGitRepositoryForDevtronApp(ctx, gitOpsRepoName, targetRevision, manifestPushTemplate.UserId) if err != nil { impl.logger.Errorw("error in pushing chart to git ", "gitOpsRepoName", gitOpsRepoName, "err", err) return "", fmt.Errorf("No repository configured for Gitops! Error while creating git repository: '%s'", gitOpsRepoName) } chartGitAttr.ChartLocation = manifestPushTemplate.ChartLocation - err = impl.argoClientWrapperService.RegisterGitOpsRepoInArgoWithRetry(ctx, chartGitAttr.RepoUrl, manifestPushTemplate.UserId) + err = impl.argoClientWrapperService.RegisterGitOpsRepoInArgoWithRetry(ctx, chartGitAttr.RepoUrl, chartGitAttr.TargetRevision, manifestPushTemplate.UserId) if err != nil { impl.logger.Errorw("error in registering app in acd", "err", err) return "", fmt.Errorf("Error in registering repository '%s' in ArgoCd", gitOpsRepoName) @@ -110,13 +115,19 @@ func (impl *GitOpsManifestPushServiceImpl) createRepoForGitOperation(manifestPus return chartGitAttr.RepoUrl, nil } -func (impl *GitOpsManifestPushServiceImpl) validateManifestPushRequest(globalGitOpsConfigStatus gitOpsBean.GitOpsConfigurationStatus, manifestPushTemplate bean.ManifestPushTemplate) error { - if !globalGitOpsConfigStatus.IsGitOpsConfigured { - return fmt.Errorf("Gitops integration is not installed/configured. Please install/configure gitops.") - } - if gitOps.IsGitOpsRepoNotConfigured(manifestPushTemplate.RepoUrl) { - if globalGitOpsConfigStatus.AllowCustomRepository { - return fmt.Errorf("GitOps repository is not configured! Please configure gitops repository for application first.") +func (impl *GitOpsManifestPushServiceImpl) validateManifestPushRequest(globalGitOpsConfigStatus *gitOpsBean.GitOpsConfigurationStatus, manifestPushTemplate *bean.ManifestPushTemplate) error { + if manifestPushTemplate.ReleaseMode == util.PIPELINE_RELEASE_MODE_LINK { + if gitOps.IsGitOpsRepoNotConfigured(manifestPushTemplate.RepoUrl) { + return fmt.Errorf("Could not push chart to git. GitOps repository is not found for the pipeline.") + } + } else { + if !globalGitOpsConfigStatus.IsGitOpsConfiguredAndArgoCdInstalled() { + return fmt.Errorf("Gitops integration is not installed/configured. Please install/configure gitops.") + } + if gitOps.IsGitOpsRepoNotConfigured(manifestPushTemplate.RepoUrl) { + if globalGitOpsConfigStatus.AllowCustomRepository { + return fmt.Errorf("GitOps repository is not configured! Please configure gitops repository for application first.") + } } } return nil @@ -135,7 +146,7 @@ func (impl *GitOpsManifestPushServiceImpl) PushChart(ctx context.Context, manife return manifestPushResponse } // 2. Validate Repository for Git Operation - errMsg := impl.validateManifestPushRequest(*globalGitOpsConfigStatus, *manifestPushTemplate) + errMsg := impl.validateManifestPushRequest(globalGitOpsConfigStatus, manifestPushTemplate) if errMsg != nil { manifestPushResponse.Error = errMsg impl.SaveTimelineForError(manifestPushTemplate, errMsg) @@ -205,7 +216,7 @@ func (impl *GitOpsManifestPushServiceImpl) PushChart(ctx context.Context, manife } gitCommitTimeline := impl.pipelineStatusTimelineService.NewDevtronAppPipelineStatusTimelineDbObject(manifestPushTemplate.WorkflowRunnerId, timelineStatus.TIMELINE_STATUS_GIT_COMMIT, timelineStatus.TIMELINE_DESCRIPTION_ARGOCD_GIT_COMMIT, manifestPushTemplate.UserId) timelines := []*pipelineConfig.PipelineStatusTimeline{gitCommitTimeline} - if impl.acdConfig.IsManualSyncEnabled() { + if impl.acdConfig.IsManualSyncEnabled() && manifestPushTemplate.IsArgoSyncSupported { // if manual sync is enabled, add ARGOCD_SYNC_INITIATED_TIMELINE argoCDSyncInitiatedTimeline := impl.pipelineStatusTimelineService.NewDevtronAppPipelineStatusTimelineDbObject(manifestPushTemplate.WorkflowRunnerId, timelineStatus.TIMELINE_STATUS_ARGOCD_SYNC_INITIATED, timelineStatus.TIMELINE_DESCRIPTION_ARGOCD_SYNC_INITIATED, manifestPushTemplate.UserId) timelines = append(timelines, argoCDSyncInitiatedTimeline) @@ -235,7 +246,7 @@ func (impl *GitOpsManifestPushServiceImpl) pushChartToGitRepo(ctx context.Contex impl.logger.Errorw("err in getting chart info", "err", err) return err } - err = impl.gitOperationService.PushChartToGitRepo(newCtx, gitOpsRepoName, manifestPushTemplate.ChartReferenceTemplate, manifestPushTemplate.ChartVersion, manifestPushTemplate.BuiltChartPath, manifestPushTemplate.RepoUrl, manifestPushTemplate.UserId) + err = impl.gitOperationService.PushChartToGitRepo(newCtx, gitOpsRepoName, manifestPushTemplate.ChartLocation, manifestPushTemplate.BuiltChartPath, manifestPushTemplate.RepoUrl, manifestPushTemplate.TargetRevision, manifestPushTemplate.UserId) if err != nil { impl.logger.Errorw("error in pushing chart to git", "err", err) return err @@ -254,12 +265,13 @@ func (impl *GitOpsManifestPushServiceImpl) commitValuesToGit(ctx context.Context userEmailId, userName := impl.gitOpsConfigReadService.GetUserEmailIdAndNameForGitOpsCommit(manifestPushTemplate.UserId) span.End() chartGitAttr := &git.ChartConfig{ - FileName: fmt.Sprintf("_%d-values.yaml", manifestPushTemplate.TargetEnvironmentName), + FileName: manifestPushTemplate.ValuesFilePath, FileContent: manifestPushTemplate.MergedValues, ChartName: manifestPushTemplate.ChartName, ChartLocation: manifestPushTemplate.ChartLocation, ChartRepoName: chartRepoName, - ReleaseMessage: fmt.Sprintf("release-%d-env-%d ", manifestPushTemplate.PipelineOverrideId, manifestPushTemplate.TargetEnvironmentName), + TargetRevision: manifestPushTemplate.TargetRevision, + ReleaseMessage: fmt.Sprintf("release-%d-env-%d ", manifestPushTemplate.PipelineOverrideId, manifestPushTemplate.TargetEnvironmentId), UserName: userName, UserEmailId: userEmailId, } diff --git a/pkg/deployment/providerConfig/DeploymentTypeOverrideService.go b/pkg/deployment/providerConfig/DeploymentTypeOverrideService.go index 6fa6b8f9b0..62309ffb5a 100644 --- a/pkg/deployment/providerConfig/DeploymentTypeOverrideService.go +++ b/pkg/deployment/providerConfig/DeploymentTypeOverrideService.go @@ -17,11 +17,15 @@ package providerConfig import ( + "fmt" + "github.com/devtron-labs/devtron/internal/constants" util2 "github.com/devtron-labs/devtron/internal/util" "github.com/devtron-labs/devtron/pkg/attributes" "github.com/devtron-labs/devtron/util" "go.uber.org/zap" + "golang.org/x/exp/maps" "net/http" + "strings" ) type DeploymentTypeOverrideService interface { @@ -85,6 +89,7 @@ func (impl *DeploymentTypeOverrideServiceImpl) ValidateAndOverrideDeploymentAppT impl.logger.Errorw("GitOps not configured but selected as a deployment app type") err = &util2.ApiError{ HttpStatusCode: http.StatusBadRequest, + Code: constants.InvalidDeploymentAppTypeForPipeline, InternalMessage: "GitOps integration is not installed/configured. Please install/configure GitOps or use helm option.", UserMessage: "GitOps integration is not installed/configured. Please install/configure GitOps or use helm option.", } @@ -107,11 +112,12 @@ func (impl *DeploymentTypeOverrideServiceImpl) validateDeploymentAppType(deploym if validDeploymentConfigReceived(deploymentConfig, deploymentType) { return nil } - + errMsg := fmt.Sprintf("Deployment app type %q is not allowed for this environment. Allowed deployment app types are: %s", deploymentType, strings.Join(maps.Keys(deploymentConfig), ", ")) err := &util2.ApiError{ HttpStatusCode: http.StatusBadRequest, - InternalMessage: "Received deployment app type doesn't match with the allowed deployment app type for this environment.", - UserMessage: "Received deployment app type doesn't match with the allowed deployment app type for this environment.", + Code: constants.InvalidDeploymentAppTypeForPipeline, + InternalMessage: errMsg, + UserMessage: errMsg, } return err } diff --git a/pkg/deployment/trigger/devtronApps/TriggerService.go b/pkg/deployment/trigger/devtronApps/TriggerService.go index ca847d06e8..67fdbb71d7 100644 --- a/pkg/deployment/trigger/devtronApps/TriggerService.go +++ b/pkg/deployment/trigger/devtronApps/TriggerService.go @@ -110,7 +110,7 @@ type TriggerService interface { ManualCdTrigger(triggerContext bean.TriggerContext, overrideRequest *bean3.ValuesOverrideRequest) (int, string, *bean4.ManifestPushTemplate, error) TriggerAutomaticDeployment(request bean.TriggerRequest) error - TriggerRelease(overrideRequest *bean3.ValuesOverrideRequest, envDeploymentConfig *bean9.DeploymentConfig, ctx context.Context, triggeredAt time.Time, triggeredBy int32) (releaseNo int, manifestPushTemplate *bean4.ManifestPushTemplate, err error) + TriggerRelease(ctx context.Context, overrideRequest *bean3.ValuesOverrideRequest, envDeploymentConfig *bean9.DeploymentConfig, triggeredAt time.Time, triggeredBy int32) (releaseNo int, manifestPushTemplate *bean4.ManifestPushTemplate, err error) } type TriggerServiceImpl struct { @@ -760,11 +760,12 @@ func (impl *TriggerServiceImpl) releasePipeline(ctx context.Context, pipeline *p return err } -func (impl *TriggerServiceImpl) triggerAsyncRelease(userDeploymentRequestId int, overrideRequest *bean3.ValuesOverrideRequest, ctx context.Context, triggeredAt time.Time, deployedBy int32) (releaseNo int, manifestPushTemplate *bean4.ManifestPushTemplate, err error) { +func (impl *TriggerServiceImpl) triggerAsyncRelease(ctx context.Context, overrideRequest *bean3.ValuesOverrideRequest, + envDeploymentConfig *bean9.DeploymentConfig, userDeploymentRequestId int, triggeredAt time.Time, deployedBy int32) (releaseNo int, manifestPushTemplate *bean4.ManifestPushTemplate, err error) { newCtx, span := otel.Tracer("orchestrator").Start(ctx, "TriggerServiceImpl.triggerAsyncRelease") defer span.End() // build merged values and save PCO history for the release - valuesOverrideResponse, err := impl.manifestCreationService.GetValuesOverrideForTrigger(overrideRequest, triggeredAt, newCtx) + valuesOverrideResponse, err := impl.manifestCreationService.GetValuesOverrideForTrigger(newCtx, overrideRequest, envDeploymentConfig, triggeredAt) // auditDeploymentTriggerHistory is performed irrespective of GetValuesOverrideForTrigger error - for auditing purposes historyErr := impl.auditDeploymentTriggerHistory(overrideRequest.WfrId, valuesOverrideResponse, newCtx, triggeredAt, deployedBy) if historyErr != nil { @@ -814,11 +815,19 @@ func (impl *TriggerServiceImpl) handleCDTriggerRelease(ctx context.Context, over "forceSyncDeployment", overrideRequest.ForceSyncDeployment, "appId", overrideRequest.AppId, "envId", overrideRequest.EnvId) return userDeploymentRequestId, manifestPushTemplate, err } + if envDeploymentConfig.IsEmpty() { + deploymentConfig, dbErr := impl.deploymentConfigService.GetAndMigrateConfigIfAbsentForDevtronApps(overrideRequest.AppId, overrideRequest.EnvId) + if dbErr != nil { + impl.logger.Errorw("error in getting deployment config by appId and envId", "appId", overrideRequest.AppId, "envId", overrideRequest.EnvId, "err", dbErr) + return releaseNo, manifestPushTemplate, dbErr + } + envDeploymentConfig = deploymentConfig + } if isAsyncMode { - return impl.triggerAsyncRelease(userDeploymentRequestId, overrideRequest, newCtx, triggeredAt, deployedBy) + return impl.triggerAsyncRelease(newCtx, overrideRequest, envDeploymentConfig, userDeploymentRequestId, triggeredAt, deployedBy) } // synchronous mode of installation starts - return impl.TriggerRelease(overrideRequest, envDeploymentConfig, newCtx, triggeredAt, deployedBy) + return impl.TriggerRelease(newCtx, overrideRequest, envDeploymentConfig, triggeredAt, deployedBy) } func (impl *TriggerServiceImpl) auditDeploymentTriggerHistory(cdWfrId int, valuesOverrideResponse *app.ValuesOverrideResponse, ctx context.Context, triggeredAt time.Time, triggeredBy int32) (err error) { @@ -835,7 +844,8 @@ func (impl *TriggerServiceImpl) auditDeploymentTriggerHistory(cdWfrId int, value } // TriggerRelease will trigger Install/Upgrade request for Devtron App releases synchronously -func (impl *TriggerServiceImpl) TriggerRelease(overrideRequest *bean3.ValuesOverrideRequest, envDeploymentConfig *bean9.DeploymentConfig, ctx context.Context, triggeredAt time.Time, triggeredBy int32) (releaseNo int, manifestPushTemplate *bean4.ManifestPushTemplate, err error) { +func (impl *TriggerServiceImpl) TriggerRelease(ctx context.Context, overrideRequest *bean3.ValuesOverrideRequest, + envDeploymentConfig *bean9.DeploymentConfig, triggeredAt time.Time, triggeredBy int32) (releaseNo int, manifestPushTemplate *bean4.ManifestPushTemplate, err error) { newCtx, span := otel.Tracer("orchestrator").Start(ctx, "TriggerServiceImpl.TriggerRelease") defer span.End() triggerEvent, skipRequest, err := impl.buildTriggerEventForOverrideRequest(overrideRequest, triggeredAt) @@ -849,17 +859,7 @@ func (impl *TriggerServiceImpl) TriggerRelease(overrideRequest *bean3.ValuesOver return releaseNo, manifestPushTemplate, nil } // build merged values and save PCO history for the release - valuesOverrideResponse, builtChartPath, err := impl.manifestCreationService.BuildManifestForTrigger(overrideRequest, triggeredAt, newCtx) - - if envDeploymentConfig == nil || (envDeploymentConfig != nil && envDeploymentConfig.Id == 0) { - envDeploymentConfig, err1 := impl.deploymentConfigService.GetAndMigrateConfigIfAbsentForDevtronApps(overrideRequest.AppId, overrideRequest.EnvId) - if err1 != nil { - impl.logger.Errorw("error in getting deployment config by appId and envId", "appId", overrideRequest.AppId, "envId", overrideRequest.EnvId, "err", err1) - return releaseNo, manifestPushTemplate, err1 - } - valuesOverrideResponse.DeploymentConfig = envDeploymentConfig - } - valuesOverrideResponse.DeploymentConfig = envDeploymentConfig + valuesOverrideResponse, builtChartPath, err := impl.manifestCreationService.BuildManifestForTrigger(newCtx, overrideRequest, envDeploymentConfig, triggeredAt) // auditDeploymentTriggerHistory is performed irrespective of BuildManifestForTrigger error - for auditing purposes historyErr := impl.auditDeploymentTriggerHistory(overrideRequest.WfrId, valuesOverrideResponse, newCtx, triggeredAt, triggeredBy) @@ -922,7 +922,7 @@ func (impl *TriggerServiceImpl) performGitOps(ctx context.Context, } if manifestPushResponse.IsNewGitRepoConfigured() { // Update GitOps repo url after repo new repo created - valuesOverrideResponse.DeploymentConfig.RepoURL = manifestPushResponse.NewGitRepoUrl + valuesOverrideResponse.DeploymentConfig.SetRepoURL(manifestPushResponse.NewGitRepoUrl) } valuesOverrideResponse.ManifestPushTemplate = manifestPushTemplate return nil @@ -1003,17 +1003,17 @@ func (impl *TriggerServiceImpl) triggerPipeline(overrideRequest *bean3.ValuesOve func (impl *TriggerServiceImpl) buildManifestPushTemplate(overrideRequest *bean3.ValuesOverrideRequest, valuesOverrideResponse *app.ValuesOverrideResponse, builtChartPath string) (*bean4.ManifestPushTemplate, error) { manifestPushTemplate := &bean4.ManifestPushTemplate{ - WorkflowRunnerId: overrideRequest.WfrId, - AppId: overrideRequest.AppId, - ChartRefId: valuesOverrideResponse.EnvOverride.Chart.ChartRefId, - EnvironmentId: valuesOverrideResponse.EnvOverride.Environment.Id, - EnvironmentName: valuesOverrideResponse.EnvOverride.Environment.Namespace, - UserId: overrideRequest.UserId, - PipelineOverrideId: valuesOverrideResponse.PipelineOverride.Id, - AppName: overrideRequest.AppName, - TargetEnvironmentName: valuesOverrideResponse.EnvOverride.TargetEnvironment, - BuiltChartPath: builtChartPath, - MergedValues: valuesOverrideResponse.MergedValues, + WorkflowRunnerId: overrideRequest.WfrId, + AppId: overrideRequest.AppId, + ChartRefId: valuesOverrideResponse.EnvOverride.Chart.ChartRefId, + EnvironmentId: valuesOverrideResponse.EnvOverride.Environment.Id, + EnvironmentName: valuesOverrideResponse.EnvOverride.Environment.Namespace, + UserId: overrideRequest.UserId, + PipelineOverrideId: valuesOverrideResponse.PipelineOverride.Id, + AppName: overrideRequest.AppName, + TargetEnvironmentId: valuesOverrideResponse.EnvOverride.TargetEnvironment, + BuiltChartPath: builtChartPath, + MergedValues: valuesOverrideResponse.MergedValues, } manifestPushConfig, err := impl.manifestPushConfigRepository.GetManifestPushConfigByAppIdAndEnvId(overrideRequest.AppId, overrideRequest.EnvId) @@ -1036,9 +1036,13 @@ func (impl *TriggerServiceImpl) buildManifestPushTemplate(overrideRequest *bean3 manifestPushTemplate.ChartReferenceTemplate = valuesOverrideResponse.EnvOverride.Chart.ReferenceTemplate manifestPushTemplate.ChartName = valuesOverrideResponse.EnvOverride.Chart.ChartName manifestPushTemplate.ChartVersion = valuesOverrideResponse.EnvOverride.Chart.ChartVersion - manifestPushTemplate.ChartLocation = valuesOverrideResponse.EnvOverride.Chart.ChartLocation - manifestPushTemplate.RepoUrl = valuesOverrideResponse.DeploymentConfig.RepoURL + manifestPushTemplate.ChartLocation = valuesOverrideResponse.DeploymentConfig.GetChartLocation() + manifestPushTemplate.RepoUrl = valuesOverrideResponse.DeploymentConfig.GetRepoURL() + manifestPushTemplate.TargetRevision = valuesOverrideResponse.DeploymentConfig.GetTargetRevision() + manifestPushTemplate.ValuesFilePath = valuesOverrideResponse.DeploymentConfig.GetValuesFilePath() + manifestPushTemplate.ReleaseMode = valuesOverrideResponse.DeploymentConfig.ReleaseMode manifestPushTemplate.IsCustomGitRepository = common.IsCustomGitOpsRepo(valuesOverrideResponse.DeploymentConfig.ConfigType) + manifestPushTemplate.IsArgoSyncSupported = valuesOverrideResponse.DeploymentConfig.IsArgoAppSyncAndRefreshSupported() } return manifestPushTemplate, nil } @@ -1176,8 +1180,7 @@ func (impl *TriggerServiceImpl) deployArgoCdApp(ctx context.Context, overrideReq valuesOverrideResponse *app.ValuesOverrideResponse) error { newCtx, span := otel.Tracer("orchestrator").Start(ctx, "TriggerServiceImpl.deployArgoCdApp") defer span.End() - impl.logger.Debugw("new pipeline found", "pipeline", valuesOverrideResponse.Pipeline) - name, err := impl.createArgoApplicationIfRequired(newCtx, overrideRequest.AppId, valuesOverrideResponse.EnvOverride, valuesOverrideResponse.Pipeline, overrideRequest.UserId) + name, err := impl.createArgoApplicationIfRequired(newCtx, valuesOverrideResponse.EnvOverride, valuesOverrideResponse.Pipeline, valuesOverrideResponse.DeploymentConfig, overrideRequest.UserId) if err != nil { impl.logger.Errorw("acd application create error on cd trigger", "err", err, "req", overrideRequest) return err @@ -1188,23 +1191,26 @@ func (impl *TriggerServiceImpl) deployArgoCdApp(ctx context.Context, overrideReq impl.logger.Errorw("error in updating argocd app ", "err", err) return err } - syncTime := time.Now() - err = impl.argoClientWrapperService.SyncArgoCDApplicationIfNeededAndRefresh(newCtx, valuesOverrideResponse.Pipeline.DeploymentAppName) - if err != nil { - impl.logger.Errorw("error in getting argo application with normal refresh", "argoAppName", valuesOverrideResponse.Pipeline.DeploymentAppName) - return fmt.Errorf("%s. err: %s", bean.ARGOCD_SYNC_ERROR, util.GetClientErrorDetailedMessage(err)) - } - if impl.ACDConfig.IsManualSyncEnabled() { - timeline := &pipelineConfig.PipelineStatusTimeline{ - CdWorkflowRunnerId: overrideRequest.WfrId, - StatusTime: syncTime, - Status: timelineStatus.TIMELINE_STATUS_ARGOCD_SYNC_COMPLETED, - StatusDetail: timelineStatus.TIMELINE_DESCRIPTION_ARGOCD_SYNC_COMPLETED, - } - timeline.CreateAuditLog(overrideRequest.UserId) - _, err = impl.pipelineStatusTimelineService.SaveTimelineIfNotAlreadyPresent(timeline, nil) + if valuesOverrideResponse.DeploymentConfig.IsArgoAppSyncAndRefreshSupported() { + syncTime := time.Now() + targetRevision := valuesOverrideResponse.DeploymentConfig.GetTargetRevision() + err = impl.argoClientWrapperService.SyncArgoCDApplicationIfNeededAndRefresh(newCtx, valuesOverrideResponse.Pipeline.DeploymentAppName, targetRevision) if err != nil { - impl.logger.Errorw("error in saving pipeline status timeline", "err", err) + impl.logger.Errorw("error in getting argo application with normal refresh", "argoAppName", valuesOverrideResponse.Pipeline.DeploymentAppName) + return fmt.Errorf("%s. err: %s", bean.ARGOCD_SYNC_ERROR, util.GetClientErrorDetailedMessage(err)) + } + if impl.ACDConfig.IsManualSyncEnabled() { + timeline := &pipelineConfig.PipelineStatusTimeline{ + CdWorkflowRunnerId: overrideRequest.WfrId, + StatusTime: syncTime, + Status: timelineStatus.TIMELINE_STATUS_ARGOCD_SYNC_COMPLETED, + StatusDetail: timelineStatus.TIMELINE_DESCRIPTION_ARGOCD_SYNC_COMPLETED, + } + timeline.CreateAuditLog(overrideRequest.UserId) + _, err = impl.pipelineStatusTimelineService.SaveTimelineIfNotAlreadyPresent(timeline, nil) + if err != nil { + impl.logger.Errorw("error in saving pipeline status timeline", "err", err) + } } } if updateAppInArgoCd { @@ -1217,8 +1223,12 @@ func (impl *TriggerServiceImpl) deployArgoCdApp(ctx context.Context, overrideReq // update repoUrl, revision and argo app sync mode (auto/manual) if needed func (impl *TriggerServiceImpl) updateArgoPipeline(ctx context.Context, pipeline *pipelineConfig.Pipeline, envOverride *bean10.EnvConfigOverride, deploymentConfig *bean9.DeploymentConfig) (bool, error) { + if !deploymentConfig.IsArgoAppPatchSupported() { + impl.logger.Infow("argo app patch not supported", "pipelineId", pipeline.Id, "pipelineName", pipeline.Name) + return false, nil + } if ctx == nil { - impl.logger.Errorw("err in syncing ACD, ctx is NULL", "pipelineName", pipeline.Name) + impl.logger.Errorw("err in syncing ACD, ctx is NULL", "pipelineId", pipeline.Id, "pipelineName", pipeline.Name) return false, nil } newCtx, span := otel.Tracer("orchestrator").Start(ctx, "TriggerServiceImpl.updateArgoPipeline") @@ -1234,15 +1244,15 @@ func (impl *TriggerServiceImpl) updateArgoPipeline(ctx context.Context, pipeline appStatus, _ := status2.FromError(err) if appStatus.Code() == codes.OK { impl.logger.Debugw("argo app exists", "app", argoAppName, "pipeline", pipeline.Name) - if impl.argoClientWrapperService.IsArgoAppPatchRequired(argoApplication.Spec.Source, deploymentConfig.RepoURL, envOverride.Chart.ChartLocation) { + if impl.argoClientWrapperService.IsArgoAppPatchRequired(argoApplication.Spec.Source, deploymentConfig.GetRepoURL(), deploymentConfig.GetTargetRevision(), deploymentConfig.GetChartLocation()) { patchRequestDto := &bean7.ArgoCdAppPatchReqDto{ ArgoAppName: argoAppName, - ChartLocation: envOverride.Chart.ChartLocation, - GitRepoUrl: deploymentConfig.RepoURL, - TargetRevision: bean7.TargetRevisionMaster, + ChartLocation: deploymentConfig.GetChartLocation(), + GitRepoUrl: deploymentConfig.GetRepoURL(), + TargetRevision: deploymentConfig.GetTargetRevision(), PatchType: bean7.PatchTypeMerge, } - url, err := impl.gitOperationService.GetRepoUrlWithUserName(deploymentConfig.RepoURL) + url, err := impl.gitOperationService.GetRepoUrlWithUserName(deploymentConfig.GetRepoURL()) if err != nil { return false, err } @@ -1252,9 +1262,12 @@ func (impl *TriggerServiceImpl) updateArgoPipeline(ctx context.Context, pipeline impl.logger.Errorw("error in patching argo pipeline", "err", err, "req", patchRequestDto) return false, err } - if deploymentConfig.RepoURL != argoApplication.Spec.Source.RepoURL { + if deploymentConfig.GetRepoURL() != argoApplication.Spec.Source.RepoURL { impl.logger.Infow("patching argo application's repo url", "argoAppName", argoAppName) } + if deploymentConfig.GetTargetRevision() != argoApplication.Spec.Source.TargetRevision { + impl.logger.Infow("patching argo application's revision", "argoAppName", argoAppName) + } impl.logger.Debugw("pipeline update req", "res", patchRequestDto) } else { impl.logger.Debug("pipeline no need to update ") @@ -1274,23 +1287,19 @@ func (impl *TriggerServiceImpl) updateArgoPipeline(ctx context.Context, pipeline } } -func (impl *TriggerServiceImpl) createArgoApplicationIfRequired(ctx context.Context, appId int, envConfigOverride *bean10.EnvConfigOverride, pipeline *pipelineConfig.Pipeline, userId int32) (string, error) { +func (impl *TriggerServiceImpl) createArgoApplicationIfRequired(ctx context.Context, envConfigOverride *bean10.EnvConfigOverride, + pipeline *pipelineConfig.Pipeline, deploymentConfig *bean9.DeploymentConfig, userId int32) (string, error) { newCtx, span := otel.Tracer("orchestrator").Start(ctx, "TriggerServiceImpl.createArgoApplicationIfRequired") defer span.End() - // repo has been registered while helm create - chart, err := impl.chartRepository.FindLatestChartForAppByAppId(appId) - if err != nil { - impl.logger.Errorw("no chart found ", "app", appId) - return "", err - } envModel, err := impl.envRepository.FindById(envConfigOverride.TargetEnvironment) if err != nil { return "", err } argoAppName := pipeline.DeploymentAppName - if pipeline.DeploymentAppCreated { + if !deploymentConfig.IsArgoAppCreationRequired(pipeline.DeploymentAppCreated) { return argoAppName, nil } else { + impl.logger.Debugw("new pipeline found", "pipeline", pipeline) // create appNamespace := envConfigOverride.Namespace if appNamespace == "" { @@ -1305,15 +1314,15 @@ func (impl *TriggerServiceImpl) createArgoApplicationIfRequired(ctx context.Cont TargetServer: envModel.Cluster.ServerUrl, Project: "default", ValuesFile: helper.GetValuesFileForEnv(envModel.Id), - RepoPath: chart.ChartLocation, - RepoUrl: chart.GitRepoUrl, + RepoPath: deploymentConfig.GetChartLocation(), + RepoUrl: deploymentConfig.GetRepoURL(), AutoSyncEnabled: impl.ACDConfig.ArgoCDAutoSyncEnabled, } appRequest.RepoUrl, err = impl.gitOperationService.GetRepoUrlWithUserName(appRequest.RepoUrl) if err != nil { return "", err } - argoAppName, err := impl.argoK8sClient.CreateAcdApp(newCtx, appRequest, argocdServer.ARGOCD_APPLICATION_TEMPLATE) + createdArgoAppName, err := impl.argoK8sClient.CreateAcdApp(newCtx, appRequest, argocdServer.ARGOCD_APPLICATION_TEMPLATE) if err != nil { return "", err } @@ -1323,7 +1332,7 @@ func (impl *TriggerServiceImpl) createArgoApplicationIfRequired(ctx context.Cont impl.logger.Errorw("error in update cd pipeline for deployment app created or not", "err", err) return "", err } - return argoAppName, nil + return createdArgoAppName, nil } } @@ -1517,7 +1526,7 @@ func (impl *TriggerServiceImpl) handleCustomGitOpsRepoValidation(runner *pipelin // impl.logger.Errorw("error in fetching latest chart for app by appId", "err", err, "appId", pipeline.AppId) // return err //} - if gitOps.IsGitOpsRepoNotConfigured(envDeploymentConfig.RepoURL) { + if gitOps.IsGitOpsRepoNotConfigured(envDeploymentConfig.GetRepoURL()) { if err = impl.cdWorkflowCommonService.MarkCurrentDeploymentFailed(runner, errors.New(cdWorkflow.GITOPS_REPO_NOT_CONFIGURED), triggeredBy); err != nil { impl.logger.Errorw("error while updating current runner status to failed, TriggerDeployment", "wfrId", runner.Id, "err", err) } diff --git a/pkg/deployment/trigger/devtronApps/helper/helper.go b/pkg/deployment/trigger/devtronApps/helper/helper.go index 64cdc49a93..4fa14195cf 100644 --- a/pkg/deployment/trigger/devtronApps/helper/helper.go +++ b/pkg/deployment/trigger/devtronApps/helper/helper.go @@ -24,7 +24,7 @@ import ( ) func GetValuesFileForEnv(environmentId int) string { - return fmt.Sprintf("_%d-values.yaml", environmentId) //-{envId}-values.yaml + return fmt.Sprintf("_%d-values.yaml", environmentId) //_{envId}-values.yaml } func NewTriggerEvent(deploymentAppType string, triggeredAt time.Time, deployedBy int32) bean.TriggerEvent { diff --git a/pkg/eventProcessor/bean/deployedApplicationEventBean.go b/pkg/eventProcessor/bean/deployedApplicationEventBean.go index 4fd1f523d4..99ba6dcf27 100644 --- a/pkg/eventProcessor/bean/deployedApplicationEventBean.go +++ b/pkg/eventProcessor/bean/deployedApplicationEventBean.go @@ -23,6 +23,7 @@ import ( type ApplicationDetail struct { Application *v1alpha12.Application `json:"application"` + ClusterId int `json:"clusterId"` StatusTime time.Time `json:"statusTime"` } diff --git a/pkg/eventProcessor/in/DeployedApplicationEventProcessorService.go b/pkg/eventProcessor/in/DeployedApplicationEventProcessorService.go index 0e7d549c91..849c0ac1ab 100644 --- a/pkg/eventProcessor/in/DeployedApplicationEventProcessorService.go +++ b/pkg/eventProcessor/in/DeployedApplicationEventProcessorService.go @@ -21,7 +21,7 @@ import ( "encoding/json" "errors" "fmt" - v1alpha12 "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1" + "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1" pubsub "github.com/devtron-labs/common-lib/pubsub-lib" "github.com/devtron-labs/common-lib/pubsub-lib/model" "github.com/devtron-labs/devtron/internal/sql/repository/pipelineConfig" @@ -31,6 +31,7 @@ import ( "github.com/devtron-labs/devtron/pkg/appStore/installedApp/service" "github.com/devtron-labs/devtron/pkg/appStore/installedApp/service/FullMode" "github.com/devtron-labs/devtron/pkg/bean" + "github.com/devtron-labs/devtron/pkg/deployment/common" "github.com/devtron-labs/devtron/pkg/deployment/gitOps/config" bean2 "github.com/devtron-labs/devtron/pkg/deployment/trigger/devtronApps/bean" bean3 "github.com/devtron-labs/devtron/pkg/eventProcessor/bean" @@ -55,6 +56,7 @@ type DeployedApplicationEventProcessorImpl struct { appStoreDeploymentService service.AppStoreDeploymentService pipelineRepository pipelineConfig.PipelineRepository // TODO: should use cdPipelineReadService instead installedAppReadService installedAppReader.InstalledAppReadService + DeploymentConfigService common.DeploymentConfigService } func NewDeployedApplicationEventProcessorImpl(logger *zap.SugaredLogger, @@ -67,7 +69,8 @@ func NewDeployedApplicationEventProcessorImpl(logger *zap.SugaredLogger, pipelineBuilder pipeline.PipelineBuilder, appStoreDeploymentService service.AppStoreDeploymentService, pipelineRepository pipelineConfig.PipelineRepository, - installedAppReadService installedAppReader.InstalledAppReadService) *DeployedApplicationEventProcessorImpl { + installedAppReadService installedAppReader.InstalledAppReadService, + DeploymentConfigService common.DeploymentConfigService) *DeployedApplicationEventProcessorImpl { deployedApplicationEventProcessorImpl := &DeployedApplicationEventProcessorImpl{ logger: logger, pubSubClient: pubSubClient, @@ -80,6 +83,7 @@ func NewDeployedApplicationEventProcessorImpl(logger *zap.SugaredLogger, appStoreDeploymentService: appStoreDeploymentService, pipelineRepository: pipelineRepository, installedAppReadService: installedAppReadService, + DeploymentConfigService: DeploymentConfigService, } return deployedApplicationEventProcessorImpl } @@ -100,8 +104,12 @@ func (impl *DeployedApplicationEventProcessorImpl) SubscribeArgoAppUpdate() erro applicationDetail.StatusTime = time.Now() } isAppStoreApplication := false - _, err = impl.pipelineRepository.GetArgoPipelineByArgoAppName(app.ObjectMeta.Name) - if err != nil && err == pg.ErrNoRows { + pipelines, err := impl.pipelineRepository.GetArgoPipelineByArgoAppName(app.ObjectMeta.Name) + if err != nil { + impl.logger.Errorw("error in fetching pipeline from Pipeline Repository", "err", err, "appName", app.ObjectMeta.Name) + return + } + if len(pipelines) == 0 { impl.logger.Infow("this app not found in pipeline table looking in installed_apps table", "appName", app.ObjectMeta.Name) // if not found in pipeline table then search in installed_apps table installedAppModel, err := impl.installedAppReadService.GetInstalledAppByGitOpsAppName(app.ObjectMeta.Name) @@ -122,7 +130,7 @@ func (impl *DeployedApplicationEventProcessorImpl) SubscribeArgoAppUpdate() erro return } } - isSucceeded, _, pipelineOverride, err := impl.appService.UpdateDeploymentStatusForGitOpsPipelines(app, applicationDetail.StatusTime, isAppStoreApplication) + isSucceeded, _, pipelineOverride, err := impl.appService.UpdateDeploymentStatusForGitOpsPipelines(app, applicationDetail.ClusterId, applicationDetail.StatusTime, isAppStoreApplication) if err != nil { impl.logger.Errorw("error on application status update", "err", err, "msg", string(msg.Data)) // TODO - check update for charts - fix this call @@ -182,7 +190,7 @@ func (impl *DeployedApplicationEventProcessorImpl) SubscribeArgoAppDeleteStatus( } impl.logger.Infow("argo delete event received", "appName", app.Name, "namespace", app.Namespace, "deleteTimestamp", app.DeletionTimestamp) - err = impl.updateArgoAppDeleteStatus(app) + err = impl.updateArgoAppDeleteStatus(applicationDetail) if err != nil { impl.logger.Errorw("error in updating pipeline delete status", "err", err, "appName", app.Name) } @@ -206,69 +214,90 @@ func (impl *DeployedApplicationEventProcessorImpl) SubscribeArgoAppDeleteStatus( return nil } -func (impl *DeployedApplicationEventProcessorImpl) updateArgoAppDeleteStatus(app *v1alpha12.Application) error { - pipeline, err := impl.pipelineRepository.GetArgoPipelineByArgoAppName(app.ObjectMeta.Name) - if err != nil && err != pg.ErrNoRows { - impl.logger.Errorw("error in fetching pipeline from Pipeline Repository", "err", err) +func (impl *DeployedApplicationEventProcessorImpl) updateHelmAppArgoAppDeleteStatus(application *v1alpha1.Application) error { + // Helm app deployed using argocd + var gitHash string + if application.Operation != nil && application.Operation.Sync != nil { + gitHash = application.Operation.Sync.Revision + } else if application.Status.OperationState != nil && application.Status.OperationState.Operation.Sync != nil { + gitHash = application.Status.OperationState.Operation.Sync.Revision + } + installedAppDeleteReq, err := impl.installedAppReadService.GetInstalledAppByGitHash(gitHash) + if err != nil { + impl.logger.Errorw("error in fetching installed app by git hash from installed app repository", "err", err) return err } - if pipeline.Deleted == true { - impl.logger.Errorw("invalid nats message, pipeline already deleted") - return errors.New("invalid nats message, pipeline already deleted") + + // Check to ensure that delete request for app was received + installedApp, err := impl.installedAppService.GetInstalledAppById(installedAppDeleteReq.InstalledAppId) + if err != nil && !errors.Is(err, pg.ErrNoRows) { + impl.logger.Errorw("error in fetching app from installed app repository", "err", err, "installedAppId", installedAppDeleteReq.InstalledAppId) + return err + } else if errors.Is(err, pg.ErrNoRows) { + impl.logger.Errorw("App not found in database", "installedAppId", installedAppDeleteReq.InstalledAppId, "err", err) + return fmt.Errorf("app not found in database %s", err) } - if err == pg.ErrNoRows { - // Helm app deployed using argocd - var gitHash string - if app.Operation != nil && app.Operation.Sync != nil { - gitHash = app.Operation.Sync.Revision - } else if app.Status.OperationState != nil && app.Status.OperationState.Operation.Sync != nil { - gitHash = app.Status.OperationState.Operation.Sync.Revision - } - installedAppDeleteReq, err := impl.installedAppReadService.GetInstalledAppByGitHash(gitHash) - if err != nil { - impl.logger.Errorw("error in fetching installed app by git hash from installed app repository", "err", err) - return err - } - // Check to ensure that delete request for app was received - installedApp, err := impl.installedAppService.GetInstalledAppById(installedAppDeleteReq.InstalledAppId) - if err == pg.ErrNoRows { - impl.logger.Errorw("App not found in database", "installedAppId", installedAppDeleteReq.InstalledAppId, "err", err) - return fmt.Errorf("app not found in database %s", err) - } else if installedApp.DeploymentAppDeleteRequest == false { - // TODO 4465 remove app from log after final RCA - impl.logger.Infow("Deployment delete not requested for app, not deleting app from DB", "appName", app.Name, "app", app) - return nil - } + if installedApp.DeploymentAppDeleteRequest == false { + // TODO 4465 remove app from log after final RCA + impl.logger.Infow("Deployment delete not requested for app, not deleting app from DB", "appName", application.Name, "installedApp", installedApp) + return nil + } - deleteRequest := &appStoreBean.InstallAppVersionDTO{} - deleteRequest.ForceDelete = false - deleteRequest.NonCascadeDelete = false - deleteRequest.AcdPartialDelete = false - deleteRequest.InstalledAppId = installedAppDeleteReq.InstalledAppId - deleteRequest.AppId = installedAppDeleteReq.AppId - deleteRequest.AppName = installedAppDeleteReq.AppName - deleteRequest.Namespace = installedAppDeleteReq.Namespace - deleteRequest.ClusterId = installedAppDeleteReq.ClusterId - deleteRequest.EnvironmentId = installedAppDeleteReq.EnvironmentId - deleteRequest.AppOfferingMode = installedAppDeleteReq.AppOfferingMode - deleteRequest.UserId = 1 - _, err = impl.appStoreDeploymentService.DeleteInstalledApp(context.Background(), deleteRequest) - if err != nil { - impl.logger.Errorw("error in deleting installed app", "err", err) - return err - } - } else { - // devtron app - if pipeline.DeploymentAppDeleteRequest == false { - impl.logger.Infow("Deployment delete not requested for app, not deleting app from DB", "appName", app.Name, "app", app) - return nil - } - _, err = impl.pipelineBuilder.DeleteCdPipeline(&pipeline, context.Background(), bean.FORCE_DELETE, false, 1) - if err != nil { - impl.logger.Errorw("error in deleting cd pipeline", "err", err) - return err - } + deleteRequest := &appStoreBean.InstallAppVersionDTO{} + deleteRequest.ForceDelete = false + deleteRequest.NonCascadeDelete = false + deleteRequest.AcdPartialDelete = false + deleteRequest.InstalledAppId = installedAppDeleteReq.InstalledAppId + deleteRequest.AppId = installedAppDeleteReq.AppId + deleteRequest.AppName = installedAppDeleteReq.AppName + deleteRequest.Namespace = installedAppDeleteReq.Namespace + deleteRequest.ClusterId = installedAppDeleteReq.ClusterId + deleteRequest.EnvironmentId = installedAppDeleteReq.EnvironmentId + deleteRequest.AppOfferingMode = installedAppDeleteReq.AppOfferingMode + deleteRequest.UserId = 1 + _, err = impl.appStoreDeploymentService.DeleteInstalledApp(context.Background(), deleteRequest) + if err != nil { + impl.logger.Errorw("error in deleting installed app", "err", err) + return err } return nil } + +func (impl *DeployedApplicationEventProcessorImpl) updateDevtronAppArgoAppDeleteStatus(applicationDetail bean3.ApplicationDetail, + pipelines []pipelineConfig.Pipeline) error { + application := applicationDetail.Application + pipelineModel, err := impl.DeploymentConfigService.FilterPipelinesByApplicationClusterIdAndNamespace(pipelines, applicationDetail.ClusterId, application.Namespace) + if err != nil { + impl.logger.Errorw("error in filtering pipeline by application cluster id and namespace", "err", err) + return err + } + if pipelineModel.Deleted == true { + impl.logger.Errorw("invalid nats message, pipeline already deleted") + return errors.New("invalid nats message, pipeline already deleted") + } + // devtron app + if pipelineModel.DeploymentAppDeleteRequest == false { + impl.logger.Infow("Deployment delete not requested for app, not deleting app from DB", "appName", application.Name, "app", application) + return nil + } + _, err = impl.pipelineBuilder.DeleteCdPipeline(&pipelineModel, context.Background(), bean.FORCE_DELETE, false, 1) + if err != nil { + impl.logger.Errorw("error in deleting cd pipeline", "err", err) + return err + } + return nil +} + +func (impl *DeployedApplicationEventProcessorImpl) updateArgoAppDeleteStatus(applicationDetail bean3.ApplicationDetail) error { + application := applicationDetail.Application + pipelines, err := impl.pipelineRepository.GetArgoPipelineByArgoAppName(application.ObjectMeta.Name) + if err != nil { + impl.logger.Errorw("error in fetching pipeline from Pipeline Repository", "err", err) + return err + } + if len(pipelines) == 0 { + return impl.updateHelmAppArgoAppDeleteStatus(application) + } + return impl.updateDevtronAppArgoAppDeleteStatus(applicationDetail, pipelines) +} diff --git a/pkg/generateManifest/DeploymentTemplateService.go b/pkg/generateManifest/DeploymentTemplateService.go index 5db8d4acd6..eaab4bc77d 100644 --- a/pkg/generateManifest/DeploymentTemplateService.go +++ b/pkg/generateManifest/DeploymentTemplateService.go @@ -21,7 +21,6 @@ import ( "fmt" "github.com/caarlos0/env" "github.com/devtron-labs/common-lib/utils/k8s" - "github.com/devtron-labs/devtron/api/helm-app/bean" "github.com/devtron-labs/devtron/api/helm-app/gRPC" read2 "github.com/devtron-labs/devtron/api/helm-app/service/read" openapi2 "github.com/devtron-labs/devtron/api/openapi/openapiClient" @@ -33,7 +32,9 @@ import ( "github.com/devtron-labs/devtron/pkg/app" "github.com/devtron-labs/devtron/pkg/chart" chartRepoRepository "github.com/devtron-labs/devtron/pkg/chartRepo/repository" + clusterBean "github.com/devtron-labs/devtron/pkg/cluster/bean" repository3 "github.com/devtron-labs/devtron/pkg/cluster/environment/repository" + read3 "github.com/devtron-labs/devtron/pkg/deployment/common/read" "github.com/devtron-labs/devtron/pkg/deployment/manifest/deploymentTemplate/chartRef" "github.com/devtron-labs/devtron/pkg/deployment/manifest/deploymentTemplate/read" bean2 "github.com/devtron-labs/devtron/pkg/deployment/trigger/devtronApps/bean" @@ -67,6 +68,7 @@ type DeploymentTemplateService interface { GetDeploymentTemplateWithResolvedData(ctx context.Context, request DeploymentTemplateRequest) (DeploymentTemplateResponse, error) ResolveTemplateVariables(ctx context.Context, values string, request DeploymentTemplateRequest) (string, map[string]string, error) } + type DeploymentTemplateServiceImpl struct { Logger *zap.SugaredLogger chartService chart.ChartService @@ -87,6 +89,7 @@ type DeploymentTemplateServiceImpl struct { restartWorkloadConfig *RestartWorkloadConfig mergeUtil *util.MergeUtil deploymentTemplateHistoryReadService read.DeploymentTemplateHistoryReadService + deploymentConfigReadService read3.DeploymentConfigReadService } func GetRestartWorkloadConfig() (*RestartWorkloadConfig, error) { @@ -112,6 +115,7 @@ func NewDeploymentTemplateServiceImpl(Logger *zap.SugaredLogger, chartService ch pipelineRepository pipelineConfig.PipelineRepository, mergeUtil *util.MergeUtil, deploymentTemplateHistoryReadService read.DeploymentTemplateHistoryReadService, + deploymentConfigReadService read3.DeploymentConfigReadService, ) (*DeploymentTemplateServiceImpl, error) { deploymentTemplateServiceImpl := &DeploymentTemplateServiceImpl{ Logger: Logger, @@ -132,6 +136,7 @@ func NewDeploymentTemplateServiceImpl(Logger *zap.SugaredLogger, chartService ch pipelineRepository: pipelineRepository, mergeUtil: mergeUtil, deploymentTemplateHistoryReadService: deploymentTemplateHistoryReadService, + deploymentConfigReadService: deploymentConfigReadService, } cfg, err := GetRestartWorkloadConfig() if err != nil { @@ -319,6 +324,7 @@ func (impl DeploymentTemplateServiceImpl) setRequestMetadata(request *Deployment // not returning the error as this will break the UX } request.PipelineName = cdPipeline.Name + request.DeploymentAppName = cdPipeline.DeploymentAppName } return *request @@ -434,7 +440,14 @@ func (impl DeploymentTemplateServiceImpl) GenerateManifest(ctx context.Context, Name: request.AppName, Version: refChart.Version, } - + deploymentConfigMin, err := impl.deploymentConfigReadService.GetDeploymentConfigMinForAppAndEnv(request.AppId, request.EnvId) + if err != nil { + impl.Logger.Errorw("error in getting deployment config", "appId", request.AppId, "envId", request.EnvId, "err", err) + return nil, err + } + if deploymentConfigMin.IsLinkedRelease() { + chartMetaData.Name = refChart.Name + } refChartPath, err := impl.chartRefService.GetChartLocation(refChart.Location, refChart.ChartData) if err != nil { impl.Logger.Errorw("error in getting chart location", "chartMetaData", chartMetaData, "refChartLocation", refChart.Location) @@ -467,6 +480,10 @@ func (impl DeploymentTemplateServiceImpl) GenerateManifest(ctx context.Context, sanitizedK8sVersion = k8s2.StripPrereleaseFromK8sVersion(sanitizedK8sVersion) } + releaseName := util2.BuildDeployedAppName(request.AppName, request.EnvName) + if len(request.DeploymentAppName) != 0 { + releaseName = request.DeploymentAppName + } mergedValuesYaml := impl.patchReleaseAttributes(request, valuesYaml) installReleaseRequest := &gRPC.InstallReleaseRequest{ AppName: request.AppName, @@ -476,14 +493,14 @@ func (impl DeploymentTemplateServiceImpl) GenerateManifest(ctx context.Context, K8SVersion: sanitizedK8sVersion, ChartRepository: ChartRepository, ReleaseIdentifier: &gRPC.ReleaseIdentifier{ - ReleaseName: fmt.Sprintf("%s-%s", request.AppName, request.EnvName), + ReleaseName: releaseName, ReleaseNamespace: request.Namespace, }, ChartContent: &gRPC.ChartContent{ Content: chartInBytes, }, } - config, err := impl.helmAppReadService.GetClusterConf(bean.DEFAULT_CLUSTER_ID) + config, err := impl.helmAppReadService.GetClusterConf(clusterBean.DefaultClusterId) if err != nil { impl.Logger.Errorw("error in fetching cluster detail", "clusterId", 1, "err", err) return nil, err diff --git a/pkg/generateManifest/DeploymentTemplateService_test.go b/pkg/generateManifest/DeploymentTemplateService_test.go index 08af2bd343..caa93504a9 100644 --- a/pkg/generateManifest/DeploymentTemplateService_test.go +++ b/pkg/generateManifest/DeploymentTemplateService_test.go @@ -29,6 +29,7 @@ import ( mocks6 "github.com/devtron-labs/devtron/internal/util/mocks" mocks2 "github.com/devtron-labs/devtron/pkg/app/mocks" "github.com/devtron-labs/devtron/pkg/chart" + "github.com/devtron-labs/devtron/pkg/chart/bean" "github.com/devtron-labs/devtron/pkg/chart/mocks" chartRepoRepository "github.com/devtron-labs/devtron/pkg/chartRepo/repository" mocks5 "github.com/devtron-labs/devtron/pkg/chartRepo/repository/mocks" @@ -453,7 +454,7 @@ func TestDeploymentTemplateServiceImpl_GetManifest(t *testing.T) { impl, chartService, _, _, _, _, _, _ := InitEventSimpleFactoryImpl(t) valuesYaml := "{\\\"ContainerPort\\\":[{\\\"envoyPort\\\":6961,\\\"idleTimeout\\\":\\\"6969s\\\",\\\"name\\\":\\\"app\\\",\\\"port\\\":6969,\\\"servicePort\\\":69,\\\"supportStreaming\\\":false,\\\"useHTTP2\\\":false}],\\\"EnvVariables\\\":[],\\\"GracePeriod\\\":30,\\\"LivenessProbe\\\":{\\\"Path\\\":\\\"\\\",\\\"command\\\":[],\\\"failureThreshold\\\":3,\\\"httpHeaders\\\":[],\\\"initialDelaySeconds\\\":20,\\\"periodSeconds\\\":10,\\\"port\\\":6969,\\\"scheme\\\":\\\"\\\",\\\"successThreshold\\\":1,\\\"tcp\\\":false,\\\"timeoutSeconds\\\":5},\\\"MaxSurge\\\":1,\\\"MaxUnavailable\\\":0,\\\"MinReadySeconds\\\":60,\\\"ReadinessProbe\\\":{\\\"Path\\\":\\\"\\\",\\\"command\\\":[],\\\"failureThreshold\\\":3,\\\"httpHeaders\\\":[],\\\"initialDelaySeconds\\\":20,\\\"periodSeconds\\\":10,\\\"port\\\":6969,\\\"scheme\\\":\\\"\\\",\\\"successThreshold\\\":1,\\\"tcp\\\":false,\\\"timeoutSeconds\\\":5},\\\"Spec\\\":{\\\"Affinity\\\":{\\\"Key\\\":null,\\\"Values\\\":\\\"nodes\\\",\\\"key\\\":\\\"\\\"}},\\\"StartupProbe\\\":{\\\"Path\\\":\\\"\\\",\\\"command\\\":[],\\\"failureThreshold\\\":3,\\\"httpHeaders\\\":[],\\\"initialDelaySeconds\\\":20,\\\"periodSeconds\\\":10,\\\"port\\\":6969,\\\"successThreshold\\\":1,\\\"tcp\\\":false,\\\"timeoutSeconds\\\":5},\\\"ambassadorMapping\\\":{\\\"ambassadorId\\\":\\\"\\\",\\\"cors\\\":{},\\\"enabled\\\":false,\\\"hostname\\\":\\\"devtron.example.com\\\",\\\"labels\\\":{},\\\"prefix\\\":\\\"/\\\",\\\"retryPolicy\\\":{},\\\"rewrite\\\":\\\"\\\",\\\"tls\\\":{\\\"context\\\":\\\"\\\",\\\"create\\\":false,\\\"hosts\\\":[],\\\"secretName\\\":\\\"\\\"}},\\\"args\\\":{\\\"enabled\\\":false,\\\"value\\\":[\\\"/bin/sh\\\",\\\"-c\\\",\\\"touch /tmp/healthy; sleep 30; rm -rf /tmp/healthy; sleep 600\\\"]},\\\"autoscaling\\\":{\\\"MaxReplicas\\\":2,\\\"MinReplicas\\\":1,\\\"TargetCPUUtilizationPercentage\\\":90,\\\"TargetMemoryUtilizationPercentage\\\":69,\\\"annotations\\\":{},\\\"behavior\\\":{},\\\"enabled\\\":false,\\\"extraMetrics\\\":[],\\\"labels\\\":{}},\\\"command\\\":{\\\"enabled\\\":false,\\\"value\\\":[],\\\"workingDir\\\":{}},\\\"containerSecurityContext\\\":{},\\\"containerSpec\\\":{\\\"lifecycle\\\":{\\\"enabled\\\":false,\\\"postStart\\\":{\\\"httpGet\\\":{\\\"host\\\":\\\"example.com\\\",\\\"path\\\":\\\"/example\\\",\\\"port\\\":90}},\\\"preStop\\\":{\\\"exec\\\":{\\\"command\\\":[\\\"sleep\\\",\\\"10\\\"]}}}},\\\"containers\\\":[],\\\"dbMigrationConfig\\\":{\\\"enabled\\\":false},\\\"envoyproxy\\\":{\\\"configMapName\\\":\\\"\\\",\\\"image\\\":\\\"docker.io/envoyproxy/envoy:v1.16.0\\\",\\\"lifecycle\\\":{},\\\"resources\\\":{\\\"limits\\\":{\\\"cpu\\\":\\\"50m\\\",\\\"memory\\\":\\\"50Mi\\\"},\\\"requests\\\":{\\\"cpu\\\":\\\"50m\\\",\\\"memory\\\":\\\"50Mi\\\"}}},\\\"hostAliases\\\":[],\\\"image\\\":{\\\"pullPolicy\\\":\\\"IfNotPresent\\\"},\\\"imagePullSecrets\\\":[],\\\"ingress\\\":{\\\"annotations\\\":{},\\\"className\\\":\\\"\\\",\\\"enabled\\\":false,\\\"hosts\\\":[{\\\"host\\\":\\\"chart-example1.local\\\",\\\"pathType\\\":\\\"ImplementationSpecific\\\",\\\"paths\\\":[\\\"/example1\\\"]},{\\\"host\\\":\\\"chart-example2.local\\\",\\\"pathType\\\":\\\"ImplementationSpecific\\\",\\\"paths\\\":[\\\"/example2\\\",\\\"/example2/healthz\\\"]}],\\\"labels\\\":{},\\\"tls\\\":[]},\\\"ingressInternal\\\":{\\\"annotations\\\":{},\\\"className\\\":\\\"\\\",\\\"enabled\\\":false,\\\"hosts\\\":[{\\\"host\\\":\\\"chart-example1.internal\\\",\\\"pathType\\\":\\\"ImplementationSpecific\\\",\\\"paths\\\":[\\\"/example1\\\"]},{\\\"host\\\":\\\"chart-example2.internal\\\",\\\"pathType\\\":\\\"ImplementationSpecific\\\",\\\"paths\\\":[\\\"/example2\\\",\\\"/example2/healthz\\\"]}],\\\"tls\\\":[]},\\\"initContainers\\\":[],\\\"istio\\\":{\\\"authorizationPolicy\\\":{\\\"action\\\":null,\\\"annotations\\\":{},\\\"enabled\\\":false,\\\"labels\\\":{},\\\"provider\\\":{},\\\"rules\\\":[]},\\\"destinationRule\\\":{\\\"annotations\\\":{},\\\"enabled\\\":false,\\\"labels\\\":{},\\\"subsets\\\":[],\\\"trafficPolicy\\\":{}},\\\"enable\\\":false,\\\"gateway\\\":{\\\"annotations\\\":{},\\\"enabled\\\":false,\\\"host\\\":\\\"example.com\\\",\\\"labels\\\":{},\\\"tls\\\":{\\\"enabled\\\":false,\\\"secretName\\\":\\\"secret-name\\\"}},\\\"peerAuthentication\\\":{\\\"annotations\\\":{},\\\"enabled\\\":false,\\\"labels\\\":{},\\\"mtls\\\":{\\\"mode\\\":null},\\\"portLevelMtls\\\":{},\\\"selector\\\":{\\\"enabled\\\":false}},\\\"requestAuthentication\\\":{\\\"annotations\\\":{},\\\"enabled\\\":false,\\\"jwtRules\\\":[],\\\"labels\\\":{},\\\"selector\\\":{\\\"enabled\\\":false}},\\\"virtualService\\\":{\\\"annotations\\\":{},\\\"enabled\\\":false,\\\"gateways\\\":[],\\\"hosts\\\":[],\\\"http\\\":[],\\\"labels\\\":{}}},\\\"kedaAutoscaling\\\":{\\\"advanced\\\":{},\\\"authenticationRef\\\":{},\\\"enabled\\\":false,\\\"envSourceContainerName\\\":\\\"\\\",\\\"maxReplicaCount\\\":2,\\\"minReplicaCount\\\":1,\\\"triggerAuthentication\\\":{\\\"enabled\\\":false,\\\"name\\\":\\\"\\\",\\\"spec\\\":{}},\\\"triggers\\\":[]},\\\"networkPolicy\\\":{\\\"annotations\\\":{},\\\"egress\\\":[],\\\"enabled\\\":false,\\\"ingress\\\":[],\\\"labels\\\":{},\\\"podSelector\\\":{\\\"matchExpressions\\\":[],\\\"matchLabels\\\":{}},\\\"policyTypes\\\":[]},\\\"pauseForSecondsBeforeSwitchActive\\\":30,\\\"podAnnotations\\\":{},\\\"podDisruptionBudget\\\":{},\\\"podLabels\\\":{},\\\"podSecurityContext\\\":{},\\\"prometheus\\\":{\\\"release\\\":\\\"monitoring\\\"},\\\"rawYaml\\\":[],\\\"replicaCount\\\":1,\\\"resources\\\":{\\\"limits\\\":{\\\"cpu\\\":\\\"0.05\\\",\\\"memory\\\":\\\"50Mi\\\"},\\\"requests\\\":{\\\"cpu\\\":\\\"0.01\\\",\\\"memory\\\":\\\"10Mi\\\"}},\\\"restartPolicy\\\":\\\"Always\\\",\\\"rolloutAnnotations\\\":{},\\\"rolloutLabels\\\":{},\\\"secret\\\":{\\\"data\\\":{},\\\"enabled\\\":false},\\\"server\\\":{\\\"deployment\\\":{\\\"image\\\":\\\"\\\",\\\"image_tag\\\":\\\"1-95af053\\\"}},\\\"service\\\":{\\\"annotations\\\":{},\\\"loadBalancerSourceRanges\\\":[],\\\"type\\\":\\\"ClusterIP\\\"},\\\"serviceAccount\\\":{\\\"annotations\\\":{},\\\"create\\\":false,\\\"name\\\":\\\"\\\"},\\\"servicemonitor\\\":{\\\"additionalLabels\\\":{}},\\\"tolerations\\\":[],\\\"topologySpreadConstraints\\\":[],\\\"volumeMounts\\\":[],\\\"volumes\\\":[],\\\"waitForSecondsBeforeScalingDown\\\":30,\\\"winterSoldier\\\":{\\\"action\\\":\\\"sleep\\\",\\\"annotation\\\":{},\\\"apiVersion\\\":\\\"pincher.devtron.ai/v1alpha1\\\",\\\"enabled\\\":false,\\\"fieldSelector\\\":[\\\"AfterTime(AddTime(ParseTime({{metadata.creationTimestamp}}, '2006-01-02T15:04:05Z'), '5m'), Now())\\\"],\\\"labels\\\":{},\\\"targetReplicas\\\":[],\\\"timeRangesWithZone\\\":{\\\"timeRanges\\\":[],\\\"timeZone\\\":\\\"Asia/Kolkata\\\"},\\\"type\\\":\\\"Rollout\\\"}}" ctx := context.Background() - request := chart.TemplateRequest{ChartRefId: 1} + request := bean.TemplateRequest{ChartRefId: 1} wantErr := errors.New("error in getting refChart") chartService.On("GetRefChart", request).Return(refChart, template, wantErr, version, myString) _, gotErr := impl.GetManifest(ctx, 1, valuesYaml) @@ -464,7 +465,7 @@ func TestDeploymentTemplateServiceImpl_GetManifest(t *testing.T) { impl, chartService, _, _, _, chartTemplateServiceImpl, helmAppService, helmAppClient := InitEventSimpleFactoryImpl(t) valuesYaml := "{\\\"ContainerPort\\\":[{\\\"envoyPort\\\":6962,\\\"idleTimeout\\\":\\\"6969s\\\",\\\"name\\\":\\\"app\\\",\\\"port\\\":6969,\\\"servicePort\\\":69,\\\"supportStreaming\\\":false,\\\"useHTTP2\\\":false}],\\\"EnvVariables\\\":[],\\\"GracePeriod\\\":30,\\\"LivenessProbe\\\":{\\\"Path\\\":\\\"\\\",\\\"command\\\":[],\\\"failureThreshold\\\":3,\\\"httpHeaders\\\":[],\\\"initialDelaySeconds\\\":20,\\\"periodSeconds\\\":10,\\\"port\\\":6969,\\\"scheme\\\":\\\"\\\",\\\"successThreshold\\\":1,\\\"tcp\\\":false,\\\"timeoutSeconds\\\":5},\\\"MaxSurge\\\":1,\\\"MaxUnavailable\\\":0,\\\"MinReadySeconds\\\":60,\\\"ReadinessProbe\\\":{\\\"Path\\\":\\\"\\\",\\\"command\\\":[],\\\"failureThreshold\\\":3,\\\"httpHeaders\\\":[],\\\"initialDelaySeconds\\\":20,\\\"periodSeconds\\\":10,\\\"port\\\":6969,\\\"scheme\\\":\\\"\\\",\\\"successThreshold\\\":1,\\\"tcp\\\":false,\\\"timeoutSeconds\\\":5},\\\"Spec\\\":{\\\"Affinity\\\":{\\\"Key\\\":null,\\\"Values\\\":\\\"nodes\\\",\\\"key\\\":\\\"\\\"}},\\\"StartupProbe\\\":{\\\"Path\\\":\\\"\\\",\\\"command\\\":[],\\\"failureThreshold\\\":3,\\\"httpHeaders\\\":[],\\\"initialDelaySeconds\\\":20,\\\"periodSeconds\\\":10,\\\"port\\\":6969,\\\"successThreshold\\\":1,\\\"tcp\\\":false,\\\"timeoutSeconds\\\":5},\\\"ambassadorMapping\\\":{\\\"ambassadorId\\\":\\\"\\\",\\\"cors\\\":{},\\\"enabled\\\":false,\\\"hostname\\\":\\\"devtron.example.com\\\",\\\"labels\\\":{},\\\"prefix\\\":\\\"/\\\",\\\"retryPolicy\\\":{},\\\"rewrite\\\":\\\"\\\",\\\"tls\\\":{\\\"context\\\":\\\"\\\",\\\"create\\\":false,\\\"hosts\\\":[],\\\"secretName\\\":\\\"\\\"}},\\\"args\\\":{\\\"enabled\\\":false,\\\"value\\\":[\\\"/bin/sh\\\",\\\"-c\\\",\\\"touch /tmp/healthy; sleep 30; rm -rf /tmp/healthy; sleep 600\\\"]},\\\"autoscaling\\\":{\\\"MaxReplicas\\\":2,\\\"MinReplicas\\\":1,\\\"TargetCPUUtilizationPercentage\\\":90,\\\"TargetMemoryUtilizationPercentage\\\":69,\\\"annotations\\\":{},\\\"behavior\\\":{},\\\"enabled\\\":false,\\\"extraMetrics\\\":[],\\\"labels\\\":{}},\\\"command\\\":{\\\"enabled\\\":false,\\\"value\\\":[],\\\"workingDir\\\":{}},\\\"containerSecurityContext\\\":{},\\\"containerSpec\\\":{\\\"lifecycle\\\":{\\\"enabled\\\":false,\\\"postStart\\\":{\\\"httpGet\\\":{\\\"host\\\":\\\"example.com\\\",\\\"path\\\":\\\"/example\\\",\\\"port\\\":90}},\\\"preStop\\\":{\\\"exec\\\":{\\\"command\\\":[\\\"sleep\\\",\\\"10\\\"]}}}},\\\"containers\\\":[],\\\"dbMigrationConfig\\\":{\\\"enabled\\\":false},\\\"envoyproxy\\\":{\\\"configMapName\\\":\\\"\\\",\\\"image\\\":\\\"docker.io/envoyproxy/envoy:v1.16.0\\\",\\\"lifecycle\\\":{},\\\"resources\\\":{\\\"limits\\\":{\\\"cpu\\\":\\\"50m\\\",\\\"memory\\\":\\\"50Mi\\\"},\\\"requests\\\":{\\\"cpu\\\":\\\"50m\\\",\\\"memory\\\":\\\"50Mi\\\"}}},\\\"hostAliases\\\":[],\\\"image\\\":{\\\"pullPolicy\\\":\\\"IfNotPresent\\\"},\\\"imagePullSecrets\\\":[],\\\"ingress\\\":{\\\"annotations\\\":{},\\\"className\\\":\\\"\\\",\\\"enabled\\\":false,\\\"hosts\\\":[{\\\"host\\\":\\\"chart-example1.local\\\",\\\"pathType\\\":\\\"ImplementationSpecific\\\",\\\"paths\\\":[\\\"/example1\\\"]},{\\\"host\\\":\\\"chart-example2.local\\\",\\\"pathType\\\":\\\"ImplementationSpecific\\\",\\\"paths\\\":[\\\"/example2\\\",\\\"/example2/healthz\\\"]}],\\\"labels\\\":{},\\\"tls\\\":[]},\\\"ingressInternal\\\":{\\\"annotations\\\":{},\\\"className\\\":\\\"\\\",\\\"enabled\\\":false,\\\"hosts\\\":[{\\\"host\\\":\\\"chart-example1.internal\\\",\\\"pathType\\\":\\\"ImplementationSpecific\\\",\\\"paths\\\":[\\\"/example1\\\"]},{\\\"host\\\":\\\"chart-example2.internal\\\",\\\"pathType\\\":\\\"ImplementationSpecific\\\",\\\"paths\\\":[\\\"/example2\\\",\\\"/example2/healthz\\\"]}],\\\"tls\\\":[]},\\\"initContainers\\\":[],\\\"istio\\\":{\\\"authorizationPolicy\\\":{\\\"action\\\":null,\\\"annotations\\\":{},\\\"enabled\\\":false,\\\"labels\\\":{},\\\"provider\\\":{},\\\"rules\\\":[]},\\\"destinationRule\\\":{\\\"annotations\\\":{},\\\"enabled\\\":false,\\\"labels\\\":{},\\\"subsets\\\":[],\\\"trafficPolicy\\\":{}},\\\"enable\\\":false,\\\"gateway\\\":{\\\"annotations\\\":{},\\\"enabled\\\":false,\\\"host\\\":\\\"example.com\\\",\\\"labels\\\":{},\\\"tls\\\":{\\\"enabled\\\":false,\\\"secretName\\\":\\\"secret-name\\\"}},\\\"peerAuthentication\\\":{\\\"annotations\\\":{},\\\"enabled\\\":false,\\\"labels\\\":{},\\\"mtls\\\":{\\\"mode\\\":null},\\\"portLevelMtls\\\":{},\\\"selector\\\":{\\\"enabled\\\":false}},\\\"requestAuthentication\\\":{\\\"annotations\\\":{},\\\"enabled\\\":false,\\\"jwtRules\\\":[],\\\"labels\\\":{},\\\"selector\\\":{\\\"enabled\\\":false}},\\\"virtualService\\\":{\\\"annotations\\\":{},\\\"enabled\\\":false,\\\"gateways\\\":[],\\\"hosts\\\":[],\\\"http\\\":[],\\\"labels\\\":{}}},\\\"kedaAutoscaling\\\":{\\\"advanced\\\":{},\\\"authenticationRef\\\":{},\\\"enabled\\\":false,\\\"envSourceContainerName\\\":\\\"\\\",\\\"maxReplicaCount\\\":2,\\\"minReplicaCount\\\":1,\\\"triggerAuthentication\\\":{\\\"enabled\\\":false,\\\"name\\\":\\\"\\\",\\\"spec\\\":{}},\\\"triggers\\\":[]},\\\"networkPolicy\\\":{\\\"annotations\\\":{},\\\"egress\\\":[],\\\"enabled\\\":false,\\\"ingress\\\":[],\\\"labels\\\":{},\\\"podSelector\\\":{\\\"matchExpressions\\\":[],\\\"matchLabels\\\":{}},\\\"policyTypes\\\":[]},\\\"pauseForSecondsBeforeSwitchActive\\\":30,\\\"podAnnotations\\\":{},\\\"podDisruptionBudget\\\":{},\\\"podLabels\\\":{},\\\"podSecurityContext\\\":{},\\\"prometheus\\\":{\\\"release\\\":\\\"monitoring\\\"},\\\"rawYaml\\\":[],\\\"replicaCount\\\":1,\\\"resources\\\":{\\\"limits\\\":{\\\"cpu\\\":\\\"0.05\\\",\\\"memory\\\":\\\"50Mi\\\"},\\\"requests\\\":{\\\"cpu\\\":\\\"0.01\\\",\\\"memory\\\":\\\"10Mi\\\"}},\\\"restartPolicy\\\":\\\"Always\\\",\\\"rolloutAnnotations\\\":{},\\\"rolloutLabels\\\":{},\\\"secret\\\":{\\\"data\\\":{},\\\"enabled\\\":false},\\\"server\\\":{\\\"deployment\\\":{\\\"image\\\":\\\"\\\",\\\"image_tag\\\":\\\"1-95af053\\\"}},\\\"service\\\":{\\\"annotations\\\":{},\\\"loadBalancerSourceRanges\\\":[],\\\"type\\\":\\\"ClusterIP\\\"},\\\"serviceAccount\\\":{\\\"annotations\\\":{},\\\"create\\\":false,\\\"name\\\":\\\"\\\"},\\\"servicemonitor\\\":{\\\"additionalLabels\\\":{}},\\\"tolerations\\\":[],\\\"topologySpreadConstraints\\\":[],\\\"volumeMounts\\\":[],\\\"volumes\\\":[],\\\"waitForSecondsBeforeScalingDown\\\":30,\\\"winterSoldier\\\":{\\\"action\\\":\\\"sleep\\\",\\\"annotation\\\":{},\\\"apiVersion\\\":\\\"pincher.devtron.ai/v1alpha1\\\",\\\"enabled\\\":false,\\\"fieldSelector\\\":[\\\"AfterTime(AddTime(ParseTime({{metadata.creationTimestamp}}, '2006-01-02T15:04:05Z'), '5m'), Now())\\\"],\\\"labels\\\":{},\\\"targetReplicas\\\":[],\\\"timeRangesWithZone\\\":{\\\"timeRanges\\\":[],\\\"timeZone\\\":\\\"Asia/Kolkata\\\"},\\\"type\\\":\\\"Rollout\\\"}}" ctx := context.Background() - request := chart.TemplateRequest{ChartRefId: 2} + request := bean.TemplateRequest{ChartRefId: 2} var config *client.ClusterConfig templateChartResponse := &client.TemplateChartResponse{ GeneratedManifest: "test generated manifest", @@ -483,7 +484,7 @@ func TestDeploymentTemplateServiceImpl_GetManifest(t *testing.T) { impl, chartService, _, _, _, chartTemplateServiceImpl, helmAppService, _ := InitEventSimpleFactoryImpl(t) valuesYaml := "{\\\"ContainerPort\\\":[{\\\"envoyPort\\\":6963,\\\"idleTimeout\\\":\\\"6969s\\\",\\\"name\\\":\\\"app\\\",\\\"port\\\":6969,\\\"servicePort\\\":69,\\\"supportStreaming\\\":false,\\\"useHTTP2\\\":false}],\\\"EnvVariables\\\":[],\\\"GracePeriod\\\":30,\\\"LivenessProbe\\\":{\\\"Path\\\":\\\"\\\",\\\"command\\\":[],\\\"failureThreshold\\\":3,\\\"httpHeaders\\\":[],\\\"initialDelaySeconds\\\":20,\\\"periodSeconds\\\":10,\\\"port\\\":6969,\\\"scheme\\\":\\\"\\\",\\\"successThreshold\\\":1,\\\"tcp\\\":false,\\\"timeoutSeconds\\\":5},\\\"MaxSurge\\\":1,\\\"MaxUnavailable\\\":0,\\\"MinReadySeconds\\\":60,\\\"ReadinessProbe\\\":{\\\"Path\\\":\\\"\\\",\\\"command\\\":[],\\\"failureThreshold\\\":3,\\\"httpHeaders\\\":[],\\\"initialDelaySeconds\\\":20,\\\"periodSeconds\\\":10,\\\"port\\\":6969,\\\"scheme\\\":\\\"\\\",\\\"successThreshold\\\":1,\\\"tcp\\\":false,\\\"timeoutSeconds\\\":5},\\\"Spec\\\":{\\\"Affinity\\\":{\\\"Key\\\":null,\\\"Values\\\":\\\"nodes\\\",\\\"key\\\":\\\"\\\"}},\\\"StartupProbe\\\":{\\\"Path\\\":\\\"\\\",\\\"command\\\":[],\\\"failureThreshold\\\":3,\\\"httpHeaders\\\":[],\\\"initialDelaySeconds\\\":20,\\\"periodSeconds\\\":10,\\\"port\\\":6969,\\\"successThreshold\\\":1,\\\"tcp\\\":false,\\\"timeoutSeconds\\\":5},\\\"ambassadorMapping\\\":{\\\"ambassadorId\\\":\\\"\\\",\\\"cors\\\":{},\\\"enabled\\\":false,\\\"hostname\\\":\\\"devtron.example.com\\\",\\\"labels\\\":{},\\\"prefix\\\":\\\"/\\\",\\\"retryPolicy\\\":{},\\\"rewrite\\\":\\\"\\\",\\\"tls\\\":{\\\"context\\\":\\\"\\\",\\\"create\\\":false,\\\"hosts\\\":[],\\\"secretName\\\":\\\"\\\"}},\\\"args\\\":{\\\"enabled\\\":false,\\\"value\\\":[\\\"/bin/sh\\\",\\\"-c\\\",\\\"touch /tmp/healthy; sleep 30; rm -rf /tmp/healthy; sleep 600\\\"]},\\\"autoscaling\\\":{\\\"MaxReplicas\\\":2,\\\"MinReplicas\\\":1,\\\"TargetCPUUtilizationPercentage\\\":90,\\\"TargetMemoryUtilizationPercentage\\\":69,\\\"annotations\\\":{},\\\"behavior\\\":{},\\\"enabled\\\":false,\\\"extraMetrics\\\":[],\\\"labels\\\":{}},\\\"command\\\":{\\\"enabled\\\":false,\\\"value\\\":[],\\\"workingDir\\\":{}},\\\"containerSecurityContext\\\":{},\\\"containerSpec\\\":{\\\"lifecycle\\\":{\\\"enabled\\\":false,\\\"postStart\\\":{\\\"httpGet\\\":{\\\"host\\\":\\\"example.com\\\",\\\"path\\\":\\\"/example\\\",\\\"port\\\":90}},\\\"preStop\\\":{\\\"exec\\\":{\\\"command\\\":[\\\"sleep\\\",\\\"10\\\"]}}}},\\\"containers\\\":[],\\\"dbMigrationConfig\\\":{\\\"enabled\\\":false},\\\"envoyproxy\\\":{\\\"configMapName\\\":\\\"\\\",\\\"image\\\":\\\"docker.io/envoyproxy/envoy:v1.16.0\\\",\\\"lifecycle\\\":{},\\\"resources\\\":{\\\"limits\\\":{\\\"cpu\\\":\\\"50m\\\",\\\"memory\\\":\\\"50Mi\\\"},\\\"requests\\\":{\\\"cpu\\\":\\\"50m\\\",\\\"memory\\\":\\\"50Mi\\\"}}},\\\"hostAliases\\\":[],\\\"image\\\":{\\\"pullPolicy\\\":\\\"IfNotPresent\\\"},\\\"imagePullSecrets\\\":[],\\\"ingress\\\":{\\\"annotations\\\":{},\\\"className\\\":\\\"\\\",\\\"enabled\\\":false,\\\"hosts\\\":[{\\\"host\\\":\\\"chart-example1.local\\\",\\\"pathType\\\":\\\"ImplementationSpecific\\\",\\\"paths\\\":[\\\"/example1\\\"]},{\\\"host\\\":\\\"chart-example2.local\\\",\\\"pathType\\\":\\\"ImplementationSpecific\\\",\\\"paths\\\":[\\\"/example2\\\",\\\"/example2/healthz\\\"]}],\\\"labels\\\":{},\\\"tls\\\":[]},\\\"ingressInternal\\\":{\\\"annotations\\\":{},\\\"className\\\":\\\"\\\",\\\"enabled\\\":false,\\\"hosts\\\":[{\\\"host\\\":\\\"chart-example1.internal\\\",\\\"pathType\\\":\\\"ImplementationSpecific\\\",\\\"paths\\\":[\\\"/example1\\\"]},{\\\"host\\\":\\\"chart-example2.internal\\\",\\\"pathType\\\":\\\"ImplementationSpecific\\\",\\\"paths\\\":[\\\"/example2\\\",\\\"/example2/healthz\\\"]}],\\\"tls\\\":[]},\\\"initContainers\\\":[],\\\"istio\\\":{\\\"authorizationPolicy\\\":{\\\"action\\\":null,\\\"annotations\\\":{},\\\"enabled\\\":false,\\\"labels\\\":{},\\\"provider\\\":{},\\\"rules\\\":[]},\\\"destinationRule\\\":{\\\"annotations\\\":{},\\\"enabled\\\":false,\\\"labels\\\":{},\\\"subsets\\\":[],\\\"trafficPolicy\\\":{}},\\\"enable\\\":false,\\\"gateway\\\":{\\\"annotations\\\":{},\\\"enabled\\\":false,\\\"host\\\":\\\"example.com\\\",\\\"labels\\\":{},\\\"tls\\\":{\\\"enabled\\\":false,\\\"secretName\\\":\\\"secret-name\\\"}},\\\"peerAuthentication\\\":{\\\"annotations\\\":{},\\\"enabled\\\":false,\\\"labels\\\":{},\\\"mtls\\\":{\\\"mode\\\":null},\\\"portLevelMtls\\\":{},\\\"selector\\\":{\\\"enabled\\\":false}},\\\"requestAuthentication\\\":{\\\"annotations\\\":{},\\\"enabled\\\":false,\\\"jwtRules\\\":[],\\\"labels\\\":{},\\\"selector\\\":{\\\"enabled\\\":false}},\\\"virtualService\\\":{\\\"annotations\\\":{},\\\"enabled\\\":false,\\\"gateways\\\":[],\\\"hosts\\\":[],\\\"http\\\":[],\\\"labels\\\":{}}},\\\"kedaAutoscaling\\\":{\\\"advanced\\\":{},\\\"authenticationRef\\\":{},\\\"enabled\\\":false,\\\"envSourceContainerName\\\":\\\"\\\",\\\"maxReplicaCount\\\":2,\\\"minReplicaCount\\\":1,\\\"triggerAuthentication\\\":{\\\"enabled\\\":false,\\\"name\\\":\\\"\\\",\\\"spec\\\":{}},\\\"triggers\\\":[]},\\\"networkPolicy\\\":{\\\"annotations\\\":{},\\\"egress\\\":[],\\\"enabled\\\":false,\\\"ingress\\\":[],\\\"labels\\\":{},\\\"podSelector\\\":{\\\"matchExpressions\\\":[],\\\"matchLabels\\\":{}},\\\"policyTypes\\\":[]},\\\"pauseForSecondsBeforeSwitchActive\\\":30,\\\"podAnnotations\\\":{},\\\"podDisruptionBudget\\\":{},\\\"podLabels\\\":{},\\\"podSecurityContext\\\":{},\\\"prometheus\\\":{\\\"release\\\":\\\"monitoring\\\"},\\\"rawYaml\\\":[],\\\"replicaCount\\\":1,\\\"resources\\\":{\\\"limits\\\":{\\\"cpu\\\":\\\"0.05\\\",\\\"memory\\\":\\\"50Mi\\\"},\\\"requests\\\":{\\\"cpu\\\":\\\"0.01\\\",\\\"memory\\\":\\\"10Mi\\\"}},\\\"restartPolicy\\\":\\\"Always\\\",\\\"rolloutAnnotations\\\":{},\\\"rolloutLabels\\\":{},\\\"secret\\\":{\\\"data\\\":{},\\\"enabled\\\":false},\\\"server\\\":{\\\"deployment\\\":{\\\"image\\\":\\\"\\\",\\\"image_tag\\\":\\\"1-95af053\\\"}},\\\"service\\\":{\\\"annotations\\\":{},\\\"loadBalancerSourceRanges\\\":[],\\\"type\\\":\\\"ClusterIP\\\"},\\\"serviceAccount\\\":{\\\"annotations\\\":{},\\\"create\\\":false,\\\"name\\\":\\\"\\\"},\\\"servicemonitor\\\":{\\\"additionalLabels\\\":{}},\\\"tolerations\\\":[],\\\"topologySpreadConstraints\\\":[],\\\"volumeMounts\\\":[],\\\"volumes\\\":[],\\\"waitForSecondsBeforeScalingDown\\\":30,\\\"winterSoldier\\\":{\\\"action\\\":\\\"sleep\\\",\\\"annotation\\\":{},\\\"apiVersion\\\":\\\"pincher.devtron.ai/v1alpha1\\\",\\\"enabled\\\":false,\\\"fieldSelector\\\":[\\\"AfterTime(AddTime(ParseTime({{metadata.creationTimestamp}}, '2006-01-02T15:04:05Z'), '5m'), Now())\\\"],\\\"labels\\\":{},\\\"targetReplicas\\\":[],\\\"timeRangesWithZone\\\":{\\\"timeRanges\\\":[],\\\"timeZone\\\":\\\"Asia/Kolkata\\\"},\\\"type\\\":\\\"Rollout\\\"}}" ctx := context.Background() - request := chart.TemplateRequest{ChartRefId: 5} + request := bean.TemplateRequest{ChartRefId: 5} wantErr1 := errors.New("error in fetching cluster detail") var zipPath string chartService.On("GetRefChart", request).Return("refChart5", "template5", nil, "version5", "myString5") @@ -498,7 +499,7 @@ func TestDeploymentTemplateServiceImpl_GetManifest(t *testing.T) { impl, chartService, _, _, _, chartTemplateServiceImpl, helmAppService, helmAppClient := InitEventSimpleFactoryImpl(t) valuesYaml := "{\\\"ContainerPort\\\":[{\\\"envoyPort\\\":6963,\\\"idleTimeout\\\":\\\"6969s\\\",\\\"name\\\":\\\"app\\\",\\\"port\\\":6969,\\\"servicePort\\\":69,\\\"supportStreaming\\\":false,\\\"useHTTP2\\\":false}],\\\"EnvVariables\\\":[],\\\"GracePeriod\\\":30,\\\"LivenessProbe\\\":{\\\"Path\\\":\\\"\\\",\\\"command\\\":[],\\\"failureThreshold\\\":3,\\\"httpHeaders\\\":[],\\\"initialDelaySeconds\\\":20,\\\"periodSeconds\\\":10,\\\"port\\\":6969,\\\"scheme\\\":\\\"\\\",\\\"successThreshold\\\":1,\\\"tcp\\\":false,\\\"timeoutSeconds\\\":5},\\\"MaxSurge\\\":1,\\\"MaxUnavailable\\\":0,\\\"MinReadySeconds\\\":60,\\\"ReadinessProbe\\\":{\\\"Path\\\":\\\"\\\",\\\"command\\\":[],\\\"failureThreshold\\\":3,\\\"httpHeaders\\\":[],\\\"initialDelaySeconds\\\":20,\\\"periodSeconds\\\":10,\\\"port\\\":6969,\\\"scheme\\\":\\\"\\\",\\\"successThreshold\\\":1,\\\"tcp\\\":false,\\\"timeoutSeconds\\\":5},\\\"Spec\\\":{\\\"Affinity\\\":{\\\"Key\\\":null,\\\"Values\\\":\\\"nodes\\\",\\\"key\\\":\\\"\\\"}},\\\"StartupProbe\\\":{\\\"Path\\\":\\\"\\\",\\\"command\\\":[],\\\"failureThreshold\\\":3,\\\"httpHeaders\\\":[],\\\"initialDelaySeconds\\\":20,\\\"periodSeconds\\\":10,\\\"port\\\":6969,\\\"successThreshold\\\":1,\\\"tcp\\\":false,\\\"timeoutSeconds\\\":5},\\\"ambassadorMapping\\\":{\\\"ambassadorId\\\":\\\"\\\",\\\"cors\\\":{},\\\"enabled\\\":false,\\\"hostname\\\":\\\"devtron.example.com\\\",\\\"labels\\\":{},\\\"prefix\\\":\\\"/\\\",\\\"retryPolicy\\\":{},\\\"rewrite\\\":\\\"\\\",\\\"tls\\\":{\\\"context\\\":\\\"\\\",\\\"create\\\":false,\\\"hosts\\\":[],\\\"secretName\\\":\\\"\\\"}},\\\"args\\\":{\\\"enabled\\\":false,\\\"value\\\":[\\\"/bin/sh\\\",\\\"-c\\\",\\\"touch /tmp/healthy; sleep 30; rm -rf /tmp/healthy; sleep 600\\\"]},\\\"autoscaling\\\":{\\\"MaxReplicas\\\":2,\\\"MinReplicas\\\":1,\\\"TargetCPUUtilizationPercentage\\\":90,\\\"TargetMemoryUtilizationPercentage\\\":69,\\\"annotations\\\":{},\\\"behavior\\\":{},\\\"enabled\\\":false,\\\"extraMetrics\\\":[],\\\"labels\\\":{}},\\\"command\\\":{\\\"enabled\\\":false,\\\"value\\\":[],\\\"workingDir\\\":{}},\\\"containerSecurityContext\\\":{},\\\"containerSpec\\\":{\\\"lifecycle\\\":{\\\"enabled\\\":false,\\\"postStart\\\":{\\\"httpGet\\\":{\\\"host\\\":\\\"example.com\\\",\\\"path\\\":\\\"/example\\\",\\\"port\\\":90}},\\\"preStop\\\":{\\\"exec\\\":{\\\"command\\\":[\\\"sleep\\\",\\\"10\\\"]}}}},\\\"containers\\\":[],\\\"dbMigrationConfig\\\":{\\\"enabled\\\":false},\\\"envoyproxy\\\":{\\\"configMapName\\\":\\\"\\\",\\\"image\\\":\\\"docker.io/envoyproxy/envoy:v1.16.0\\\",\\\"lifecycle\\\":{},\\\"resources\\\":{\\\"limits\\\":{\\\"cpu\\\":\\\"50m\\\",\\\"memory\\\":\\\"50Mi\\\"},\\\"requests\\\":{\\\"cpu\\\":\\\"50m\\\",\\\"memory\\\":\\\"50Mi\\\"}}},\\\"hostAliases\\\":[],\\\"image\\\":{\\\"pullPolicy\\\":\\\"IfNotPresent\\\"},\\\"imagePullSecrets\\\":[],\\\"ingress\\\":{\\\"annotations\\\":{},\\\"className\\\":\\\"\\\",\\\"enabled\\\":false,\\\"hosts\\\":[{\\\"host\\\":\\\"chart-example1.local\\\",\\\"pathType\\\":\\\"ImplementationSpecific\\\",\\\"paths\\\":[\\\"/example1\\\"]},{\\\"host\\\":\\\"chart-example2.local\\\",\\\"pathType\\\":\\\"ImplementationSpecific\\\",\\\"paths\\\":[\\\"/example2\\\",\\\"/example2/healthz\\\"]}],\\\"labels\\\":{},\\\"tls\\\":[]},\\\"ingressInternal\\\":{\\\"annotations\\\":{},\\\"className\\\":\\\"\\\",\\\"enabled\\\":false,\\\"hosts\\\":[{\\\"host\\\":\\\"chart-example1.internal\\\",\\\"pathType\\\":\\\"ImplementationSpecific\\\",\\\"paths\\\":[\\\"/example1\\\"]},{\\\"host\\\":\\\"chart-example2.internal\\\",\\\"pathType\\\":\\\"ImplementationSpecific\\\",\\\"paths\\\":[\\\"/example2\\\",\\\"/example2/healthz\\\"]}],\\\"tls\\\":[]},\\\"initContainers\\\":[],\\\"istio\\\":{\\\"authorizationPolicy\\\":{\\\"action\\\":null,\\\"annotations\\\":{},\\\"enabled\\\":false,\\\"labels\\\":{},\\\"provider\\\":{},\\\"rules\\\":[]},\\\"destinationRule\\\":{\\\"annotations\\\":{},\\\"enabled\\\":false,\\\"labels\\\":{},\\\"subsets\\\":[],\\\"trafficPolicy\\\":{}},\\\"enable\\\":false,\\\"gateway\\\":{\\\"annotations\\\":{},\\\"enabled\\\":false,\\\"host\\\":\\\"example.com\\\",\\\"labels\\\":{},\\\"tls\\\":{\\\"enabled\\\":false,\\\"secretName\\\":\\\"secret-name\\\"}},\\\"peerAuthentication\\\":{\\\"annotations\\\":{},\\\"enabled\\\":false,\\\"labels\\\":{},\\\"mtls\\\":{\\\"mode\\\":null},\\\"portLevelMtls\\\":{},\\\"selector\\\":{\\\"enabled\\\":false}},\\\"requestAuthentication\\\":{\\\"annotations\\\":{},\\\"enabled\\\":false,\\\"jwtRules\\\":[],\\\"labels\\\":{},\\\"selector\\\":{\\\"enabled\\\":false}},\\\"virtualService\\\":{\\\"annotations\\\":{},\\\"enabled\\\":false,\\\"gateways\\\":[],\\\"hosts\\\":[],\\\"http\\\":[],\\\"labels\\\":{}}},\\\"kedaAutoscaling\\\":{\\\"advanced\\\":{},\\\"authenticationRef\\\":{},\\\"enabled\\\":false,\\\"envSourceContainerName\\\":\\\"\\\",\\\"maxReplicaCount\\\":2,\\\"minReplicaCount\\\":1,\\\"triggerAuthentication\\\":{\\\"enabled\\\":false,\\\"name\\\":\\\"\\\",\\\"spec\\\":{}},\\\"triggers\\\":[]},\\\"networkPolicy\\\":{\\\"annotations\\\":{},\\\"egress\\\":[],\\\"enabled\\\":false,\\\"ingress\\\":[],\\\"labels\\\":{},\\\"podSelector\\\":{\\\"matchExpressions\\\":[],\\\"matchLabels\\\":{}},\\\"policyTypes\\\":[]},\\\"pauseForSecondsBeforeSwitchActive\\\":30,\\\"podAnnotations\\\":{},\\\"podDisruptionBudget\\\":{},\\\"podLabels\\\":{},\\\"podSecurityContext\\\":{},\\\"prometheus\\\":{\\\"release\\\":\\\"monitoring\\\"},\\\"rawYaml\\\":[],\\\"replicaCount\\\":1,\\\"resources\\\":{\\\"limits\\\":{\\\"cpu\\\":\\\"0.05\\\",\\\"memory\\\":\\\"50Mi\\\"},\\\"requests\\\":{\\\"cpu\\\":\\\"0.01\\\",\\\"memory\\\":\\\"10Mi\\\"}},\\\"restartPolicy\\\":\\\"Always\\\",\\\"rolloutAnnotations\\\":{},\\\"rolloutLabels\\\":{},\\\"secret\\\":{\\\"data\\\":{},\\\"enabled\\\":false},\\\"server\\\":{\\\"deployment\\\":{\\\"image\\\":\\\"\\\",\\\"image_tag\\\":\\\"1-95af053\\\"}},\\\"service\\\":{\\\"annotations\\\":{},\\\"loadBalancerSourceRanges\\\":[],\\\"type\\\":\\\"ClusterIP\\\"},\\\"serviceAccount\\\":{\\\"annotations\\\":{},\\\"create\\\":false,\\\"name\\\":\\\"\\\"},\\\"servicemonitor\\\":{\\\"additionalLabels\\\":{}},\\\"tolerations\\\":[],\\\"topologySpreadConstraints\\\":[],\\\"volumeMounts\\\":[],\\\"volumes\\\":[],\\\"waitForSecondsBeforeScalingDown\\\":30,\\\"winterSoldier\\\":{\\\"action\\\":\\\"sleep\\\",\\\"annotation\\\":{},\\\"apiVersion\\\":\\\"pincher.devtron.ai/v1alpha1\\\",\\\"enabled\\\":false,\\\"fieldSelector\\\":[\\\"AfterTime(AddTime(ParseTime({{metadata.creationTimestamp}}, '2006-01-02T15:04:05Z'), '5m'), Now())\\\"],\\\"labels\\\":{},\\\"targetReplicas\\\":[],\\\"timeRangesWithZone\\\":{\\\"timeRanges\\\":[],\\\"timeZone\\\":\\\"Asia/Kolkata\\\"},\\\"type\\\":\\\"Rollout\\\"}}" ctx := context.Background() - request := chart.TemplateRequest{ChartRefId: 5} + request := bean.TemplateRequest{ChartRefId: 5} var config *client.ClusterConfig wantErr1 := errors.New("error in templating chart") var zipPath string @@ -515,7 +516,7 @@ func TestDeploymentTemplateServiceImpl_GetManifest(t *testing.T) { impl, chartService, _, _, _, chartTemplateServiceImpl, _, _ := InitEventSimpleFactoryImpl(t) valuesYaml := "{\\\"ContainerPort\\\":[{\\\"envoyPort\\\":6964,\\\"idleTimeout\\\":\\\"6969s\\\",\\\"name\\\":\\\"app\\\",\\\"port\\\":6969,\\\"servicePort\\\":69,\\\"supportStreaming\\\":false,\\\"useHTTP2\\\":false}],\\\"EnvVariables\\\":[],\\\"GracePeriod\\\":30,\\\"LivenessProbe\\\":{\\\"Path\\\":\\\"\\\",\\\"command\\\":[],\\\"failureThreshold\\\":3,\\\"httpHeaders\\\":[],\\\"initialDelaySeconds\\\":20,\\\"periodSeconds\\\":10,\\\"port\\\":6969,\\\"scheme\\\":\\\"\\\",\\\"successThreshold\\\":1,\\\"tcp\\\":false,\\\"timeoutSeconds\\\":5},\\\"MaxSurge\\\":1,\\\"MaxUnavailable\\\":0,\\\"MinReadySeconds\\\":60,\\\"ReadinessProbe\\\":{\\\"Path\\\":\\\"\\\",\\\"command\\\":[],\\\"failureThreshold\\\":3,\\\"httpHeaders\\\":[],\\\"initialDelaySeconds\\\":20,\\\"periodSeconds\\\":10,\\\"port\\\":6969,\\\"scheme\\\":\\\"\\\",\\\"successThreshold\\\":1,\\\"tcp\\\":false,\\\"timeoutSeconds\\\":5},\\\"Spec\\\":{\\\"Affinity\\\":{\\\"Key\\\":null,\\\"Values\\\":\\\"nodes\\\",\\\"key\\\":\\\"\\\"}},\\\"StartupProbe\\\":{\\\"Path\\\":\\\"\\\",\\\"command\\\":[],\\\"failureThreshold\\\":3,\\\"httpHeaders\\\":[],\\\"initialDelaySeconds\\\":20,\\\"periodSeconds\\\":10,\\\"port\\\":6969,\\\"successThreshold\\\":1,\\\"tcp\\\":false,\\\"timeoutSeconds\\\":5},\\\"ambassadorMapping\\\":{\\\"ambassadorId\\\":\\\"\\\",\\\"cors\\\":{},\\\"enabled\\\":false,\\\"hostname\\\":\\\"devtron.example.com\\\",\\\"labels\\\":{},\\\"prefix\\\":\\\"/\\\",\\\"retryPolicy\\\":{},\\\"rewrite\\\":\\\"\\\",\\\"tls\\\":{\\\"context\\\":\\\"\\\",\\\"create\\\":false,\\\"hosts\\\":[],\\\"secretName\\\":\\\"\\\"}},\\\"args\\\":{\\\"enabled\\\":false,\\\"value\\\":[\\\"/bin/sh\\\",\\\"-c\\\",\\\"touch /tmp/healthy; sleep 30; rm -rf /tmp/healthy; sleep 600\\\"]},\\\"autoscaling\\\":{\\\"MaxReplicas\\\":2,\\\"MinReplicas\\\":1,\\\"TargetCPUUtilizationPercentage\\\":90,\\\"TargetMemoryUtilizationPercentage\\\":69,\\\"annotations\\\":{},\\\"behavior\\\":{},\\\"enabled\\\":false,\\\"extraMetrics\\\":[],\\\"labels\\\":{}},\\\"command\\\":{\\\"enabled\\\":false,\\\"value\\\":[],\\\"workingDir\\\":{}},\\\"containerSecurityContext\\\":{},\\\"containerSpec\\\":{\\\"lifecycle\\\":{\\\"enabled\\\":false,\\\"postStart\\\":{\\\"httpGet\\\":{\\\"host\\\":\\\"example.com\\\",\\\"path\\\":\\\"/example\\\",\\\"port\\\":90}},\\\"preStop\\\":{\\\"exec\\\":{\\\"command\\\":[\\\"sleep\\\",\\\"10\\\"]}}}},\\\"containers\\\":[],\\\"dbMigrationConfig\\\":{\\\"enabled\\\":false},\\\"envoyproxy\\\":{\\\"configMapName\\\":\\\"\\\",\\\"image\\\":\\\"docker.io/envoyproxy/envoy:v1.16.0\\\",\\\"lifecycle\\\":{},\\\"resources\\\":{\\\"limits\\\":{\\\"cpu\\\":\\\"50m\\\",\\\"memory\\\":\\\"50Mi\\\"},\\\"requests\\\":{\\\"cpu\\\":\\\"50m\\\",\\\"memory\\\":\\\"50Mi\\\"}}},\\\"hostAliases\\\":[],\\\"image\\\":{\\\"pullPolicy\\\":\\\"IfNotPresent\\\"},\\\"imagePullSecrets\\\":[],\\\"ingress\\\":{\\\"annotations\\\":{},\\\"className\\\":\\\"\\\",\\\"enabled\\\":false,\\\"hosts\\\":[{\\\"host\\\":\\\"chart-example1.local\\\",\\\"pathType\\\":\\\"ImplementationSpecific\\\",\\\"paths\\\":[\\\"/example1\\\"]},{\\\"host\\\":\\\"chart-example2.local\\\",\\\"pathType\\\":\\\"ImplementationSpecific\\\",\\\"paths\\\":[\\\"/example2\\\",\\\"/example2/healthz\\\"]}],\\\"labels\\\":{},\\\"tls\\\":[]},\\\"ingressInternal\\\":{\\\"annotations\\\":{},\\\"className\\\":\\\"\\\",\\\"enabled\\\":false,\\\"hosts\\\":[{\\\"host\\\":\\\"chart-example1.internal\\\",\\\"pathType\\\":\\\"ImplementationSpecific\\\",\\\"paths\\\":[\\\"/example1\\\"]},{\\\"host\\\":\\\"chart-example2.internal\\\",\\\"pathType\\\":\\\"ImplementationSpecific\\\",\\\"paths\\\":[\\\"/example2\\\",\\\"/example2/healthz\\\"]}],\\\"tls\\\":[]},\\\"initContainers\\\":[],\\\"istio\\\":{\\\"authorizationPolicy\\\":{\\\"action\\\":null,\\\"annotations\\\":{},\\\"enabled\\\":false,\\\"labels\\\":{},\\\"provider\\\":{},\\\"rules\\\":[]},\\\"destinationRule\\\":{\\\"annotations\\\":{},\\\"enabled\\\":false,\\\"labels\\\":{},\\\"subsets\\\":[],\\\"trafficPolicy\\\":{}},\\\"enable\\\":false,\\\"gateway\\\":{\\\"annotations\\\":{},\\\"enabled\\\":false,\\\"host\\\":\\\"example.com\\\",\\\"labels\\\":{},\\\"tls\\\":{\\\"enabled\\\":false,\\\"secretName\\\":\\\"secret-name\\\"}},\\\"peerAuthentication\\\":{\\\"annotations\\\":{},\\\"enabled\\\":false,\\\"labels\\\":{},\\\"mtls\\\":{\\\"mode\\\":null},\\\"portLevelMtls\\\":{},\\\"selector\\\":{\\\"enabled\\\":false}},\\\"requestAuthentication\\\":{\\\"annotations\\\":{},\\\"enabled\\\":false,\\\"jwtRules\\\":[],\\\"labels\\\":{},\\\"selector\\\":{\\\"enabled\\\":false}},\\\"virtualService\\\":{\\\"annotations\\\":{},\\\"enabled\\\":false,\\\"gateways\\\":[],\\\"hosts\\\":[],\\\"http\\\":[],\\\"labels\\\":{}}},\\\"kedaAutoscaling\\\":{\\\"advanced\\\":{},\\\"authenticationRef\\\":{},\\\"enabled\\\":false,\\\"envSourceContainerName\\\":\\\"\\\",\\\"maxReplicaCount\\\":2,\\\"minReplicaCount\\\":1,\\\"triggerAuthentication\\\":{\\\"enabled\\\":false,\\\"name\\\":\\\"\\\",\\\"spec\\\":{}},\\\"triggers\\\":[]},\\\"networkPolicy\\\":{\\\"annotations\\\":{},\\\"egress\\\":[],\\\"enabled\\\":false,\\\"ingress\\\":[],\\\"labels\\\":{},\\\"podSelector\\\":{\\\"matchExpressions\\\":[],\\\"matchLabels\\\":{}},\\\"policyTypes\\\":[]},\\\"pauseForSecondsBeforeSwitchActive\\\":30,\\\"podAnnotations\\\":{},\\\"podDisruptionBudget\\\":{},\\\"podLabels\\\":{},\\\"podSecurityContext\\\":{},\\\"prometheus\\\":{\\\"release\\\":\\\"monitoring\\\"},\\\"rawYaml\\\":[],\\\"replicaCount\\\":1,\\\"resources\\\":{\\\"limits\\\":{\\\"cpu\\\":\\\"0.05\\\",\\\"memory\\\":\\\"50Mi\\\"},\\\"requests\\\":{\\\"cpu\\\":\\\"0.01\\\",\\\"memory\\\":\\\"10Mi\\\"}},\\\"restartPolicy\\\":\\\"Always\\\",\\\"rolloutAnnotations\\\":{},\\\"rolloutLabels\\\":{},\\\"secret\\\":{\\\"data\\\":{},\\\"enabled\\\":false},\\\"server\\\":{\\\"deployment\\\":{\\\"image\\\":\\\"\\\",\\\"image_tag\\\":\\\"1-95af053\\\"}},\\\"service\\\":{\\\"annotations\\\":{},\\\"loadBalancerSourceRanges\\\":[],\\\"type\\\":\\\"ClusterIP\\\"},\\\"serviceAccount\\\":{\\\"annotations\\\":{},\\\"create\\\":false,\\\"name\\\":\\\"\\\"},\\\"servicemonitor\\\":{\\\"additionalLabels\\\":{}},\\\"tolerations\\\":[],\\\"topologySpreadConstraints\\\":[],\\\"volumeMounts\\\":[],\\\"volumes\\\":[],\\\"waitForSecondsBeforeScalingDown\\\":30,\\\"winterSoldier\\\":{\\\"action\\\":\\\"sleep\\\",\\\"annotation\\\":{},\\\"apiVersion\\\":\\\"pincher.devtron.ai/v1alpha1\\\",\\\"enabled\\\":false,\\\"fieldSelector\\\":[\\\"AfterTime(AddTime(ParseTime({{metadata.creationTimestamp}}, '2006-01-02T15:04:05Z'), '5m'), Now())\\\"],\\\"labels\\\":{},\\\"targetReplicas\\\":[],\\\"timeRangesWithZone\\\":{\\\"timeRanges\\\":[],\\\"timeZone\\\":\\\"Asia/Kolkata\\\"},\\\"type\\\":\\\"Rollout\\\"}}" ctx := context.Background() - request := chart.TemplateRequest{ChartRefId: 3} + request := bean.TemplateRequest{ChartRefId: 3} wantErr := errors.New("error in getting chart") var zipPath string chartService.On("GetRefChart", request).Return("refChart1", "template1", nil, "version1", "myString1") diff --git a/pkg/generateManifest/bean.go b/pkg/generateManifest/bean.go index 26a465dd3f..bb9b0d397b 100644 --- a/pkg/generateManifest/bean.go +++ b/pkg/generateManifest/bean.go @@ -45,6 +45,7 @@ type DeploymentTemplateRequest struct { EnvName string `json:"-"` Namespace string `json:"-"` PipelineName string `json:"-"` + DeploymentAppName string `json:"-"` ChartRefId int `json:"chartRefId"` RequestDataMode RequestDataMode `json:"valuesAndManifestFlag"` Values string `json:"values"` diff --git a/pkg/generateManifest/helper.go b/pkg/generateManifest/helper.go index 8706267d6f..6f53c12a90 100644 --- a/pkg/generateManifest/helper.go +++ b/pkg/generateManifest/helper.go @@ -20,10 +20,10 @@ import ( "context" "fmt" "github.com/devtron-labs/common-lib/utils/yaml" - "github.com/devtron-labs/devtron/api/helm-app/bean" "github.com/devtron-labs/devtron/api/helm-app/gRPC" "github.com/devtron-labs/devtron/internal/sql/repository/app" "github.com/devtron-labs/devtron/internal/sql/repository/pipelineConfig" + clusterBean "github.com/devtron-labs/devtron/pkg/cluster/bean" "github.com/devtron-labs/devtron/pkg/cluster/environment/repository" "go.opentelemetry.io/otel" "golang.org/x/exp/maps" @@ -100,7 +100,7 @@ func (impl DeploymentTemplateServiceImpl) constructInstallReleaseBulkReq(apps [] impl.Logger.Errorw("exception caught in getting k8sServerVersion", "err", err) return nil, err } - config, err := impl.helmAppReadService.GetClusterConf(bean.DEFAULT_CLUSTER_ID) + config, err := impl.helmAppReadService.GetClusterConf(clusterBean.DefaultClusterId) if err != nil { impl.Logger.Errorw("error in fetching cluster detail", "clusterId", 1, "err", err) return nil, err diff --git a/pkg/gitops/GitOpsConfigService.go b/pkg/gitops/GitOpsConfigService.go index 0b7bb6c2f5..a0776f75b3 100644 --- a/pkg/gitops/GitOpsConfigService.go +++ b/pkg/gitops/GitOpsConfigService.go @@ -19,8 +19,10 @@ package gitops import ( "context" "encoding/json" + "errors" "fmt" certificate2 "github.com/argoproj/argo-cd/v2/pkg/apiclient/certificate" + cluster3 "github.com/argoproj/argo-cd/v2/pkg/apiclient/cluster" repocreds2 "github.com/argoproj/argo-cd/v2/pkg/apiclient/repocreds" "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1" util4 "github.com/devtron-labs/common-lib/utils/k8s" @@ -36,12 +38,15 @@ import ( "github.com/devtron-labs/devtron/pkg/deployment/gitOps/git" "github.com/devtron-labs/devtron/pkg/deployment/gitOps/validation" gitOpsBean "github.com/devtron-labs/devtron/pkg/gitops/bean" + moduleBean "github.com/devtron-labs/devtron/pkg/module/bean" + moduleRead "github.com/devtron-labs/devtron/pkg/module/read" + moduleReadBean "github.com/devtron-labs/devtron/pkg/module/read/bean" + moduleErr "github.com/devtron-labs/devtron/pkg/module/read/error" util2 "github.com/devtron-labs/devtron/util" "net/http" "strings" "time" - cluster3 "github.com/argoproj/argo-cd/v2/pkg/apiclient/cluster" "github.com/devtron-labs/devtron/internal/sql/repository" "github.com/devtron-labs/devtron/internal/util" "github.com/devtron-labs/devtron/pkg/cluster" @@ -50,14 +55,14 @@ import ( util3 "github.com/devtron-labs/devtron/pkg/util" "github.com/go-pg/pg" "go.uber.org/zap" - "k8s.io/apimachinery/pkg/api/errors" + k8sError "k8s.io/apimachinery/pkg/api/errors" "sigs.k8s.io/yaml" ) type GitOpsConfigService interface { ValidateAndCreateGitOpsConfig(config *apiBean.GitOpsConfigDto) (apiBean.DetailedErrorGitOpsConfigResponse, error) ValidateAndUpdateGitOpsConfig(config *apiBean.GitOpsConfigDto) (apiBean.DetailedErrorGitOpsConfigResponse, error) - GitOpsValidateDryRun(config *apiBean.GitOpsConfigDto) apiBean.DetailedErrorGitOpsConfigResponse + GitOpsValidateDryRun(isArgoModuleInstalled bool, config *apiBean.GitOpsConfigDto) apiBean.DetailedErrorGitOpsConfigResponse GetGitOpsConfigById(id int) (*apiBean.GitOpsConfigDto, error) GetAllGitOpsConfig() ([]*apiBean.GitOpsConfigDto, error) GetGitOpsConfigByProvider(provider string) (*apiBean.GitOpsConfigDto, error) @@ -79,6 +84,7 @@ type GitOpsConfigServiceImpl struct { argoCDConfigGetter config2.ArgoCDConfigGetter argoClientWrapperService argocdServer.ArgoClientWrapperService clusterReadService read.ClusterReadService + moduleReadService moduleRead.ModuleReadService } func NewGitOpsConfigServiceImpl(Logger *zap.SugaredLogger, @@ -94,7 +100,8 @@ func NewGitOpsConfigServiceImpl(Logger *zap.SugaredLogger, argoCDConnectionManager connection.ArgoCDConnectionManager, argoCDConfigGetter config2.ArgoCDConfigGetter, argoClientWrapperService argocdServer.ArgoClientWrapperService, - clusterReadService read.ClusterReadService) *GitOpsConfigServiceImpl { + clusterReadService read.ClusterReadService, + moduleReadService moduleRead.ModuleReadService) *GitOpsConfigServiceImpl { return &GitOpsConfigServiceImpl{ logger: Logger, gitOpsRepository: gitOpsRepository, @@ -111,19 +118,22 @@ func NewGitOpsConfigServiceImpl(Logger *zap.SugaredLogger, argoCDConfigGetter: argoCDConfigGetter, argoClientWrapperService: argoClientWrapperService, clusterReadService: clusterReadService, + moduleReadService: moduleReadService, } } func (impl *GitOpsConfigServiceImpl) ValidateAndCreateGitOpsConfig(config *apiBean.GitOpsConfigDto) (apiBean.DetailedErrorGitOpsConfigResponse, error) { - detailedErrorGitOpsConfigResponse := impl.GitOpsValidateDryRun(config) + argoModule, err := impl.moduleReadService.GetModuleInfoByName(moduleBean.ModuleNameArgoCd) + if err != nil && !errors.Is(err, moduleErr.ModuleNotFoundError) { + impl.logger.Errorw("error in getting argo module", "error", err) + return apiBean.DetailedErrorGitOpsConfigResponse{}, err + } + detailedErrorGitOpsConfigResponse := impl.GitOpsValidateDryRun(argoModule.IsInstalled(), config) if len(detailedErrorGitOpsConfigResponse.StageErrorMap) == 0 { - //create argo-cd user, if not created, here argo-cd integration has to be installed - gRPCConfig, err := impl.argoCDConfigGetter.GetGRPCConfig() + err = impl.updateArgoCdUserDetailIfNotPresent(argoModule) if err != nil { - impl.logger.Errorw("error in getting all grpc configs", "error", err) return detailedErrorGitOpsConfigResponse, err } - _ = impl.argoCDConnectionManager.GetOrUpdateArgoCdUserDetail(gRPCConfig) _, err = impl.createGitOpsConfig(context.Background(), config) if err != nil { impl.logger.Errorw("service err, SaveGitRepoConfig", "err", err, "payload", config) @@ -133,6 +143,19 @@ func (impl *GitOpsConfigServiceImpl) ValidateAndCreateGitOpsConfig(config *apiBe return detailedErrorGitOpsConfigResponse, nil } +func (impl *GitOpsConfigServiceImpl) updateArgoCdUserDetailIfNotPresent(argoModule *moduleReadBean.ModuleInfoMin) error { + // create argo-cd user, if not created, here argo-cd integration has to be installed + if argoModule.IsInstalled() { + gRPCConfig, err := impl.argoCDConfigGetter.GetGRPCConfig() + if err != nil { + impl.logger.Errorw("error in getting all grpc configs", "error", err) + return err + } + _ = impl.argoCDConnectionManager.GetOrUpdateArgoCdUserDetail(gRPCConfig) + } + return nil +} + func (impl *GitOpsConfigServiceImpl) ValidateAndUpdateGitOpsConfig(config *apiBean.GitOpsConfigDto) (apiBean.DetailedErrorGitOpsConfigResponse, error) { isTokenEmpty := config.Token == "" isTlsDetailsEmpty := config.EnableTLSVerification && @@ -176,16 +199,18 @@ func (impl *GitOpsConfigServiceImpl) ValidateAndUpdateGitOpsConfig(config *apiBe } } } - gRPCConfig, err := impl.argoCDConfigGetter.GetGRPCConfig() - if err != nil { - impl.logger.Errorw("error in getting all grpc configs", "error", err) + argoModule, err := impl.moduleReadService.GetModuleInfoByName(moduleBean.ModuleNameArgoCd) + if err != nil && !errors.Is(err, moduleErr.ModuleNotFoundError) { + impl.logger.Errorw("error in getting argo module", "error", err) return apiBean.DetailedErrorGitOpsConfigResponse{}, err } - _ = impl.argoCDConnectionManager.GetOrUpdateArgoCdUserDetail(gRPCConfig) - - detailedErrorGitOpsConfigResponse := impl.GitOpsValidateDryRun(config) + detailedErrorGitOpsConfigResponse := impl.GitOpsValidateDryRun(argoModule.IsInstalled(), config) if len(detailedErrorGitOpsConfigResponse.StageErrorMap) == 0 { - err := impl.updateGitOpsConfig(config) + err = impl.updateArgoCdUserDetailIfNotPresent(argoModule) + if err != nil { + return detailedErrorGitOpsConfigResponse, err + } + err = impl.updateGitOpsConfig(config) if err != nil { impl.logger.Errorw("service err, updateGitOpsConfig", "err", err, "payload", config) return detailedErrorGitOpsConfigResponse, err @@ -194,11 +219,151 @@ func (impl *GitOpsConfigServiceImpl) ValidateAndUpdateGitOpsConfig(config *apiBe return detailedErrorGitOpsConfigResponse, nil } +// step-1: add ca cert if present to list of trusted certificates on argoCD using certificate.ServiceClient service +// step-2: add repository credentials in secret declared using env variable util.ACDAuthConfig.(GitOpsSecretName) +// step-3: add repository URL in argocd-cm, argocd-cm will have reference to secret created in step-3 for credentials +// step-4: upsert cluster in acd +func (impl *GitOpsConfigServiceImpl) registerGitOpsClientConfig(ctx context.Context, model *repository.GitOpsConfig, request *apiBean.GitOpsConfigDto) (*apiBean.GitOpsConfigDto, error) { + if model.EnableTLSVerification { + err := impl.gitOperationService.UpdateGitHostUrlByProvider(request) + if err != nil { + return nil, err + } + _, err = impl.argoClientWrapperService.CreateRepoCreds(ctx, &repocreds2.RepoCredsCreateRequest{ + Creds: &v1alpha1.RepoCreds{ + URL: request.Host, + Username: model.Username, + Password: model.Token, + TLSClientCertData: model.TlsCert, + TLSClientCertKey: model.TlsKey, + }, + Upsert: true, + }) + if err != nil { + impl.logger.Errorw("error in saving repo credential template to argocd", "err", err) + return nil, err + } + + err = impl.addCACertInArgoIfPresent(ctx, model) + if err != nil { + impl.logger.Errorw("error in adding ca cert to argo", "err", err) + return nil, err + } + } else { + + clusterBean, err := impl.clusterReadService.FindOne(bean2.DEFAULT_CLUSTER) + if err != nil { + return nil, err + } + cfg := clusterBean.GetClusterConfig() + + client, err := impl.K8sUtil.GetCoreV1Client(cfg) + if err != nil { + return nil, err + } + secret, err := impl.K8sUtil.GetSecret(impl.aCDAuthConfig.ACDConfigMapNamespace, impl.aCDAuthConfig.GitOpsSecretName, client) + statusError, _ := err.(*k8sError.StatusError) + if err != nil && statusError.Status().Code != http.StatusNotFound { + impl.logger.Errorw("secret not found", "err", err) + return nil, err + } + data := make(map[string][]byte) + data[gitOpsBean.USERNAME] = []byte(request.Username) + data[gitOpsBean.PASSWORD] = []byte(request.Token) + + if secret == nil { + secret, err = impl.K8sUtil.CreateSecret(impl.aCDAuthConfig.ACDConfigMapNamespace, data, impl.aCDAuthConfig.GitOpsSecretName, "", client, nil, nil) + if err != nil { + impl.logger.Errorw("err on creating secret", "err", err) + return nil, err + } + } else { + secret.Data = data + secret, err = impl.K8sUtil.UpdateSecret(impl.aCDAuthConfig.ACDConfigMapNamespace, secret, client) + if err != nil { + operationComplete := false + retryCount := 0 + for !operationComplete && retryCount < 3 { + retryCount = retryCount + 1 + secret, err := impl.K8sUtil.GetSecret(impl.aCDAuthConfig.ACDConfigMapNamespace, impl.aCDAuthConfig.GitOpsSecretName, client) + if err != nil { + impl.logger.Errorw("secret not found", "err", err) + return nil, err + } + secret.Data = data + secret, err = impl.K8sUtil.UpdateSecret(impl.aCDAuthConfig.ACDConfigMapNamespace, secret, client) + if err != nil { + continue + } else { + operationComplete = true + } + } + } + } + err = impl.gitOperationService.UpdateGitHostUrlByProvider(request) + if err != nil { + return nil, err + } + operationComplete := false + retryCount := 0 + for !operationComplete && retryCount < 3 { + retryCount = retryCount + 1 + + cm, err := impl.K8sUtil.GetConfigMap(impl.aCDAuthConfig.ACDConfigMapNamespace, impl.aCDAuthConfig.ACDConfigMapName, client) + if err != nil { + return nil, err + } + currentHost := request.Host + updatedData := impl.updateData(cm.Data, request, impl.aCDAuthConfig.GitOpsSecretName, currentHost) + data := cm.Data + if data == nil { + data = make(map[string]string, 0) + } + data["repository.credentials"] = updatedData["repository.credentials"] + cm.Data = data + _, err = impl.K8sUtil.UpdateConfigMap(impl.aCDAuthConfig.ACDConfigMapNamespace, cm, client) + if err != nil { + continue + } else { + operationComplete = true + } + } + if !operationComplete { + return nil, fmt.Errorf("resouce version not matched with config map attempted 3 times") + } + } + if err := impl.registerClustersInArgoCd(ctx); err != nil { + return nil, err + } + return request, nil +} + +func (impl *GitOpsConfigServiceImpl) registerClustersInArgoCd(ctx context.Context) error { + // if git-ops config is created/saved successfully (just before transaction commit) and this was first git-ops config, then upsert clusters in acd + gitOpsConfigurationStatus, err := impl.gitOpsConfigReadService.IsGitOpsConfigured() + if err != nil { + return err + } + if !gitOpsConfigurationStatus.IsGitOpsConfiguredAndArgoCdInstalled() { + clusters, err := impl.clusterService.FindAllActive() + if err != nil { + impl.logger.Errorw("Error while fetching all the clusters", "err", err) + return err + } + for _, clusterBean := range clusters { + cl := impl.clusterService.ConvertClusterBeanObjectToCluster(&clusterBean) + _, err = impl.argoClientWrapperService.CreateCluster(ctx, &cluster3.ClusterCreateRequest{Upsert: true, Cluster: cl}) + if err != nil { + impl.logger.Errorw("Error while upserting cluster in acd", "clusterName", clusterBean.ClusterName, "err", err) + return err + } + } + } + return nil +} + // step-1: save data in DB -// step-3: add ca cert if present to list of trusted certificates on argoCD using certificate.ServiceClient service -// step-3: add repository credentials in secret declared using env variable GITOPS_SECRET_NAME -// step-4 add repository URL in argocd-cm, argocd-cm will have reference to secret created in step-3 for credentials -// steps-5 upsert cluster in acd +// step-2: register GitOps config in ArgoCd func (impl *GitOpsConfigServiceImpl) createGitOpsConfig(ctx context.Context, request *apiBean.GitOpsConfigDto) (*apiBean.GitOpsConfigDto, error) { impl.logger.Debugw("gitops create request", "req", request) dbConnection := impl.gitOpsRepository.GetConnection() @@ -293,13 +458,65 @@ func (impl *GitOpsConfigServiceImpl) createGitOpsConfig(ctx context.Context, req return nil, err } - if model.EnableTLSVerification { - - err = impl.gitOperationService.UpdateGitHostUrlByProvider(request) + argoModule, err := impl.moduleReadService.GetModuleInfoByName(moduleBean.ModuleNameArgoCd) + if err != nil && !errors.Is(err, moduleErr.ModuleNotFoundError) { + impl.logger.Errorw("error in getting argo module", "error", err) + return nil, err + } + if argoModule.IsInstalled() { + request, err = impl.registerGitOpsClientConfig(ctx, model, request) if err != nil { + impl.logger.Errorw("error in registering gitops config", "err", err) return nil, err } - _, err = impl.argoClientWrapperService.CreateRepoCreds(ctx, &repocreds2.RepoCredsCreateRequest{ + } + + // now commit transaction + err = tx.Commit() + if err != nil { + return nil, err + } + + err = impl.gitOperationService.ReloadGitOpsProvider() + if err != nil { + return nil, err + } + request.Id = model.Id + return request, nil +} + +func (impl *GitOpsConfigServiceImpl) addCACertInArgoIfPresent(ctx context.Context, model *repository.GitOpsConfig) error { + if len(model.CaCert) > 0 { + host, _, err := util2.GetHost(model.Host) + if err != nil { + impl.logger.Errorw("invalid gitOps host", "host", host, "err", err) + return err + } + _, err = impl.argoClientWrapperService.CreateCertificate(ctx, &certificate2.RepositoryCertificateCreateRequest{ + Certificates: &v1alpha1.RepositoryCertificateList{ + Items: []v1alpha1.RepositoryCertificate{{ + ServerName: host, + CertData: []byte(model.CaCert), + CertType: "https", + }}, + }, + Upsert: true, + }) + if err != nil { + return err + } + } + return nil +} + +func (impl *GitOpsConfigServiceImpl) patchGitOpsClientConfig(model *repository.GitOpsConfig, request *apiBean.GitOpsConfigDto) error { + if model.EnableTLSVerification { + err := impl.gitOperationService.UpdateGitHostUrlByProvider(request) + if err != nil { + return err + } + + _, err = impl.argoClientWrapperService.CreateRepoCreds(context.Background(), &repocreds2.RepoCredsCreateRequest{ Creds: &v1alpha1.RepoCreds{ URL: request.Host, Username: model.Username, @@ -311,32 +528,32 @@ func (impl *GitOpsConfigServiceImpl) createGitOpsConfig(ctx context.Context, req }) if err != nil { impl.logger.Errorw("error in saving repo credential template to argocd", "err", err) - return nil, err + return err } - err = impl.addCACertInArgoIfPresent(ctx, model) + err = impl.addCACertInArgoIfPresent(context.Background(), model) if err != nil { impl.logger.Errorw("error in adding ca cert to argo", "err", err) - return nil, err + return err } } else { - clusterBean, err := impl.clusterReadService.FindOne(bean2.DEFAULT_CLUSTER) if err != nil { - return nil, err + return err } cfg := clusterBean.GetClusterConfig() client, err := impl.K8sUtil.GetCoreV1Client(cfg) if err != nil { - return nil, err + return err } + secret, err := impl.K8sUtil.GetSecret(impl.aCDAuthConfig.ACDConfigMapNamespace, impl.aCDAuthConfig.GitOpsSecretName, client) - statusError, _ := err.(*errors.StatusError) + statusError, _ := err.(*k8sError.StatusError) if err != nil && statusError.Status().Code != http.StatusNotFound { impl.logger.Errorw("secret not found", "err", err) - return nil, err + return err } data := make(map[string][]byte) data[gitOpsBean.USERNAME] = []byte(request.Username) @@ -346,7 +563,7 @@ func (impl *GitOpsConfigServiceImpl) createGitOpsConfig(ctx context.Context, req secret, err = impl.K8sUtil.CreateSecret(impl.aCDAuthConfig.ACDConfigMapNamespace, data, impl.aCDAuthConfig.GitOpsSecretName, "", client, nil, nil) if err != nil { impl.logger.Errorw("err on creating secret", "err", err) - return nil, err + return err } } else { secret.Data = data @@ -359,22 +576,22 @@ func (impl *GitOpsConfigServiceImpl) createGitOpsConfig(ctx context.Context, req secret, err := impl.K8sUtil.GetSecret(impl.aCDAuthConfig.ACDConfigMapNamespace, impl.aCDAuthConfig.GitOpsSecretName, client) if err != nil { impl.logger.Errorw("secret not found", "err", err) - return nil, err + return err } secret.Data = data secret, err = impl.K8sUtil.UpdateSecret(impl.aCDAuthConfig.ACDConfigMapNamespace, secret, client) if err != nil { continue - } - if err == nil { + } else { operationComplete = true } } + } } err = impl.gitOperationService.UpdateGitHostUrlByProvider(request) if err != nil { - return nil, err + return err } operationComplete := false retryCount := 0 @@ -383,14 +600,11 @@ func (impl *GitOpsConfigServiceImpl) createGitOpsConfig(ctx context.Context, req cm, err := impl.K8sUtil.GetConfigMap(impl.aCDAuthConfig.ACDConfigMapNamespace, impl.aCDAuthConfig.ACDConfigMapName, client) if err != nil { - return nil, err + return err } currentHost := request.Host updatedData := impl.updateData(cm.Data, request, impl.aCDAuthConfig.GitOpsSecretName, currentHost) data := cm.Data - if data == nil { - data = make(map[string]string, 0) - } data["repository.credentials"] = updatedData["repository.credentials"] cm.Data = data _, err = impl.K8sUtil.UpdateConfigMap(impl.aCDAuthConfig.ACDConfigMapNamespace, cm, client) @@ -401,64 +615,7 @@ func (impl *GitOpsConfigServiceImpl) createGitOpsConfig(ctx context.Context, req } } if !operationComplete { - return nil, fmt.Errorf("resouce version not matched with config map attempted 3 times") - } - } - - // if git-ops config is created/saved successfully (just before transaction commit) and this was first git-ops config, then upsert clusters in acd - gitOpsConfigurationStatus, err := impl.gitOpsConfigReadService.IsGitOpsConfigured() - if err != nil { - return nil, err - } - if !gitOpsConfigurationStatus.IsGitOpsConfigured { - clusters, err := impl.clusterService.FindAllActive() - if err != nil { - impl.logger.Errorw("Error while fetching all the clusters", "err", err) - return nil, err - } - for _, cluster := range clusters { - cl := impl.clusterService.ConvertClusterBeanObjectToCluster(&cluster) - _, err = impl.argoClientWrapperService.CreateCluster(ctx, &cluster3.ClusterCreateRequest{Upsert: true, Cluster: cl}) - if err != nil { - impl.logger.Errorw("Error while upserting cluster in acd", "clusterName", cluster.ClusterName, "err", err) - return nil, err - } - } - } - - // now commit transaction - err = tx.Commit() - if err != nil { - return nil, err - } - - err = impl.gitOperationService.ReloadGitOpsProvider() - if err != nil { - return nil, err - } - request.Id = model.Id - return request, nil -} - -func (impl *GitOpsConfigServiceImpl) addCACertInArgoIfPresent(ctx context.Context, model *repository.GitOpsConfig) error { - if len(model.CaCert) > 0 { - host, err := util2.GetHost(model.Host) - if err != nil { - impl.logger.Errorw("invalid gitOps host", "host", host, "err", err) - return err - } - _, err = impl.argoClientWrapperService.CreateCertificate(ctx, &certificate2.RepositoryCertificateCreateRequest{ - Certificates: &v1alpha1.RepositoryCertificateList{ - Items: []v1alpha1.RepositoryCertificate{{ - ServerName: host, - CertData: []byte(model.CaCert), - CertType: "https", - }}, - }, - Upsert: true, - }) - if err != nil { - return err + return fmt.Errorf("resouce version not matched with config map attempted 3 times") } } return nil @@ -576,123 +733,18 @@ func (impl *GitOpsConfigServiceImpl) updateGitOpsConfig(request *apiBean.GitOpsC return err } request.Id = model.Id - - if model.EnableTLSVerification { - - err = impl.gitOperationService.UpdateGitHostUrlByProvider(request) - if err != nil { - return err - } - - _, err = impl.argoClientWrapperService.CreateRepoCreds(context.Background(), &repocreds2.RepoCredsCreateRequest{ - Creds: &v1alpha1.RepoCreds{ - URL: request.Host, - Username: model.Username, - Password: model.Token, - TLSClientCertData: model.TlsCert, - TLSClientCertKey: model.TlsKey, - }, - Upsert: true, - }) - if err != nil { - impl.logger.Errorw("error in saving repo credential template to argocd", "err", err) - return err - } - - err = impl.addCACertInArgoIfPresent(context.Background(), model) - if err != nil { - impl.logger.Errorw("error in adding ca cert to argo", "err", err) - return err - } - - } else { - clusterBean, err := impl.clusterReadService.FindOne(bean2.DEFAULT_CLUSTER) - if err != nil { - return err - } - cfg := clusterBean.GetClusterConfig() - if err != nil { - return err - } - - client, err := impl.K8sUtil.GetCoreV1Client(cfg) - if err != nil { - return err - } - - secret, err := impl.K8sUtil.GetSecret(impl.aCDAuthConfig.ACDConfigMapNamespace, impl.aCDAuthConfig.GitOpsSecretName, client) - statusError, _ := err.(*errors.StatusError) - if err != nil && statusError.Status().Code != http.StatusNotFound { - impl.logger.Errorw("secret not found", "err", err) - return err - } - data := make(map[string][]byte) - data[gitOpsBean.USERNAME] = []byte(request.Username) - data[gitOpsBean.PASSWORD] = []byte(request.Token) - - if secret == nil { - secret, err = impl.K8sUtil.CreateSecret(impl.aCDAuthConfig.ACDConfigMapNamespace, data, impl.aCDAuthConfig.GitOpsSecretName, "", client, nil, nil) - if err != nil { - impl.logger.Errorw("err on creating secret", "err", err) - return err - } - } else { - secret.Data = data - secret, err = impl.K8sUtil.UpdateSecret(impl.aCDAuthConfig.ACDConfigMapNamespace, secret, client) - if err != nil { - operationComplete := false - retryCount := 0 - for !operationComplete && retryCount < 3 { - retryCount = retryCount + 1 - secret, err := impl.K8sUtil.GetSecret(impl.aCDAuthConfig.ACDConfigMapNamespace, impl.aCDAuthConfig.GitOpsSecretName, client) - if err != nil { - impl.logger.Errorw("secret not found", "err", err) - return err - } - secret.Data = data - secret, err = impl.K8sUtil.UpdateSecret(impl.aCDAuthConfig.ACDConfigMapNamespace, secret, client) - if err != nil { - continue - } - if err == nil { - operationComplete = true - } - } - - } - } - err = impl.gitOperationService.UpdateGitHostUrlByProvider(request) + argoModule, err := impl.moduleReadService.GetModuleInfoByName(moduleBean.ModuleNameArgoCd) + if err != nil && !errors.Is(err, moduleErr.ModuleNotFoundError) { + impl.logger.Errorw("error in getting argo module", "error", err) + return err + } + if argoModule.IsInstalled() { + err = impl.patchGitOpsClientConfig(model, request) if err != nil { + impl.logger.Errorw("error in registering gitops config", "err", err) return err } - operationComplete := false - retryCount := 0 - for !operationComplete && retryCount < 3 { - retryCount = retryCount + 1 - - cm, err := impl.K8sUtil.GetConfigMap(impl.aCDAuthConfig.ACDConfigMapNamespace, impl.aCDAuthConfig.ACDConfigMapName, client) - if err != nil { - return err - } - currentHost := request.Host - updatedData := impl.updateData(cm.Data, request, impl.aCDAuthConfig.GitOpsSecretName, currentHost) - data := cm.Data - data["repository.credentials"] = updatedData["repository.credentials"] - cm.Data = data - _, err = impl.K8sUtil.UpdateConfigMap(impl.aCDAuthConfig.ACDConfigMapNamespace, cm, client) - if err != nil { - continue - } - if err == nil { - operationComplete = true - } - } - if !operationComplete { - return fmt.Errorf("resouce version not matched with config map attempted 3 times") - } - } - err = tx.Commit() if err != nil { return err @@ -808,7 +860,7 @@ func (impl *GitOpsConfigServiceImpl) GetGitOpsConfigByProvider(provider string) return config, err } -func (impl *GitOpsConfigServiceImpl) GitOpsValidateDryRun(config *apiBean.GitOpsConfigDto) apiBean.DetailedErrorGitOpsConfigResponse { +func (impl *GitOpsConfigServiceImpl) GitOpsValidateDryRun(isArgoModuleInstalled bool, config *apiBean.GitOpsConfigDto) apiBean.DetailedErrorGitOpsConfigResponse { isTokenEmpty := config.Token == "" isTlsDetailsEmpty := config.EnableTLSVerification && (len(config.TLSConfig.CaData) == 0 && len(config.TLSConfig.TLSCertData) == 0 && len(config.TLSConfig.TLSKeyData) == 0) @@ -850,7 +902,7 @@ func (impl *GitOpsConfigServiceImpl) GitOpsValidateDryRun(config *apiBean.GitOps } } - return impl.gitOpsValidationService.GitOpsValidateDryRun(config) + return impl.gitOpsValidationService.GitOpsValidateDryRun(isArgoModuleInstalled, config) } func (impl *GitOpsConfigServiceImpl) updateData(data map[string]string, request *apiBean.GitOpsConfigDto, secretName string, currentHost string) map[string]string { diff --git a/pkg/k8s/application/k8sApplicationService.go b/pkg/k8s/application/k8sApplicationService.go index 3898bfc71f..39588ef15e 100644 --- a/pkg/k8s/application/k8sApplicationService.go +++ b/pkg/k8s/application/k8sApplicationService.go @@ -109,9 +109,8 @@ type K8sApplicationServiceImpl struct { ephemeralContainerService cluster.EphemeralContainerService ephemeralContainerRepository repository.EphemeralContainersRepository ephemeralContainerConfig *EphemeralContainerConfig - //argoApplicationService argoApplication.ArgoApplicationService - fluxApplicationService fluxApplication.FluxApplicationService - clusterReadService read.ClusterReadService + fluxApplicationService fluxApplication.FluxApplicationService + clusterReadService read.ClusterReadService } func NewK8sApplicationServiceImpl(Logger *zap.SugaredLogger, clusterService cluster.ClusterService, pump connector.Pump, helmAppService client.HelmAppService, K8sUtil *k8s2.K8sServiceImpl, aCDAuthConfig *util3.ACDAuthConfig, K8sResourceHistoryService kubernetesResourceAuditLogs.K8sResourceHistoryService, diff --git a/pkg/module/ModuleCacheService.go b/pkg/module/ModuleCacheService.go index 57f45922d6..dfbce782bd 100644 --- a/pkg/module/ModuleCacheService.go +++ b/pkg/module/ModuleCacheService.go @@ -19,6 +19,7 @@ package module import ( "context" "github.com/devtron-labs/common-lib/utils/k8s" + "github.com/devtron-labs/devtron/pkg/module/bean" moduleRepo "github.com/devtron-labs/devtron/pkg/module/repo" serverBean "github.com/devtron-labs/devtron/pkg/server/bean" serverEnvConfig "github.com/devtron-labs/devtron/pkg/server/config" @@ -44,14 +45,14 @@ type ModuleCacheServiceImpl struct { logger *zap.SugaredLogger mutex sync.Mutex K8sUtil *k8s.K8sServiceImpl - moduleEnvConfig *ModuleEnvConfig + moduleEnvConfig *bean.ModuleEnvConfig serverEnvConfig *serverEnvConfig.ServerEnvConfig serverDataStore *serverDataStore.ServerDataStore moduleRepository moduleRepo.ModuleRepository teamService team.TeamService } -func NewModuleCacheServiceImpl(logger *zap.SugaredLogger, K8sUtil *k8s.K8sServiceImpl, moduleEnvConfig *ModuleEnvConfig, serverEnvConfig *serverEnvConfig.ServerEnvConfig, +func NewModuleCacheServiceImpl(logger *zap.SugaredLogger, K8sUtil *k8s.K8sServiceImpl, moduleEnvConfig *bean.ModuleEnvConfig, serverEnvConfig *serverEnvConfig.ServerEnvConfig, serverDataStore *serverDataStore.ServerDataStore, moduleRepository moduleRepo.ModuleRepository, teamService team.TeamService) (*ModuleCacheServiceImpl, error) { impl := &ModuleCacheServiceImpl{ logger: logger, @@ -72,7 +73,7 @@ func NewModuleCacheServiceImpl(logger *zap.SugaredLogger, K8sUtil *k8s.K8sServic } if !exists { // insert cicd module entry - err = impl.updateModuleToInstalled(ModuleNameCicd) + err = impl.updateModuleToInstalled(bean.ModuleNameCiCd) if err != nil { return nil, err } @@ -98,7 +99,7 @@ func (impl *ModuleCacheServiceImpl) updateModuleToInstalled(moduleName string) e module := &moduleRepo.Module{ Name: moduleName, Version: impl.serverDataStore.CurrentVersion, - Status: ModuleStatusInstalled, + Status: bean.ModuleStatusInstalled, UpdatedOn: time.Now(), } err := impl.moduleRepository.Save(module) diff --git a/pkg/module/ModuleCronService.go b/pkg/module/ModuleCronService.go index 5cad249d3e..a05fed6793 100644 --- a/pkg/module/ModuleCronService.go +++ b/pkg/module/ModuleCronService.go @@ -23,6 +23,7 @@ import ( "github.com/devtron-labs/devtron/api/helm-app/gRPC" client "github.com/devtron-labs/devtron/api/helm-app/service" "github.com/devtron-labs/devtron/api/helm-app/service/bean" + bean2 "github.com/devtron-labs/devtron/pkg/module/bean" moduleRepo "github.com/devtron-labs/devtron/pkg/module/repo" moduleDataStore "github.com/devtron-labs/devtron/pkg/module/store" serverBean "github.com/devtron-labs/devtron/pkg/server/bean" @@ -43,7 +44,7 @@ type ModuleCronService interface { type ModuleCronServiceImpl struct { logger *zap.SugaredLogger cron *cron.Cron - moduleEnvConfig *ModuleEnvConfig + moduleEnvConfig *bean2.ModuleEnvConfig moduleRepository moduleRepo.ModuleRepository serverEnvConfig *serverEnvConfig.ServerEnvConfig helmAppService client.HelmAppService @@ -52,7 +53,7 @@ type ModuleCronServiceImpl struct { moduleDataStore *moduleDataStore.ModuleDataStore } -func NewModuleCronServiceImpl(logger *zap.SugaredLogger, moduleEnvConfig *ModuleEnvConfig, moduleRepository moduleRepo.ModuleRepository, +func NewModuleCronServiceImpl(logger *zap.SugaredLogger, moduleEnvConfig *bean2.ModuleEnvConfig, moduleRepository moduleRepo.ModuleRepository, serverEnvConfig *serverEnvConfig.ServerEnvConfig, helmAppService client.HelmAppService, moduleServiceHelper ModuleServiceHelper, moduleResourceStatusRepository moduleRepo.ModuleResourceStatusRepository, moduleDataStore *moduleDataStore.ModuleDataStore, cronLogger *cron2.CronLoggerImpl) (*ModuleCronServiceImpl, error) { @@ -118,7 +119,7 @@ func (impl *ModuleCronServiceImpl) handleModuleStatus(moduleNameInput string) { // update status timeout if module status is installing for more than 1 hour for _, module := range modules { - if module.Status != ModuleStatusInstalling { + if module.Status != bean2.ModuleStatusInstalling { continue } if len(moduleNameInput) > 0 && module.Name != moduleNameInput { @@ -126,11 +127,11 @@ func (impl *ModuleCronServiceImpl) handleModuleStatus(moduleNameInput string) { } if time.Now().After(module.UpdatedOn.Add(1 * time.Hour)) { // timeout case - impl.updateModuleStatus(module, ModuleStatusTimeout) + impl.updateModuleStatus(module, bean2.ModuleStatusTimeout) } else if !util.IsBaseStack() { // if module is cicd then insert as installed - if module.Name == ModuleNameCicd { - impl.updateModuleStatus(module, ModuleStatusInstalled) + if module.Name == bean2.ModuleNameCiCd { + impl.updateModuleStatus(module, bean2.ModuleStatusInstalled) } else { resourceTreeFilter, err := impl.buildResourceTreeFilter(module.Name) if err != nil { @@ -146,7 +147,7 @@ func (impl *ModuleCronServiceImpl) handleModuleStatus(moduleNameInput string) { impl.logger.Errorw("Error occurred while fetching helm application detail to check if module is installed", "moduleName", module.Name, "err", err) continue } else if appDetail.ApplicationStatus == serverBean.AppHealthStatusHealthy { - impl.updateModuleStatus(module, ModuleStatusInstalled) + impl.updateModuleStatus(module, bean2.ModuleStatusInstalled) } // save module resources status @@ -249,7 +250,7 @@ func (impl *ModuleCronServiceImpl) buildResourceTreeFilter(moduleName string) (* return nil, nil } - resourceFilterIfaceValue := ResourceFilter{} + resourceFilterIfaceValue := bean2.ResourceFilter{} err = json.Unmarshal([]byte(resourceFilterIface), &resourceFilterIfaceValue) if err != nil { impl.logger.Errorw("Error while unmarshalling resourceFilterIface", "resourceFilterIface", resourceFilterIface, "err", err) @@ -290,7 +291,7 @@ func (impl *ModuleCronServiceImpl) buildResourceTreeFilter(moduleName string) (* return resourceTreeFilter, nil } -func (impl *ModuleCronServiceImpl) updateModuleStatus(module moduleRepo.Module, status ModuleStatus) { +func (impl *ModuleCronServiceImpl) updateModuleStatus(module moduleRepo.Module, status bean2.ModuleStatus) { impl.logger.Debugw("updating module status", "name", module.Name, "status", status) module.Status = status module.UpdatedOn = time.Now() diff --git a/pkg/module/ModuleService.go b/pkg/module/ModuleService.go index e988bbd583..e750dc2f60 100644 --- a/pkg/module/ModuleService.go +++ b/pkg/module/ModuleService.go @@ -23,6 +23,7 @@ import ( "github.com/devtron-labs/devtron/api/helm-app/gRPC" client "github.com/devtron-labs/devtron/api/helm-app/service" clientErrors "github.com/devtron-labs/devtron/pkg/errors" + "github.com/devtron-labs/devtron/pkg/module/bean" moduleRepo "github.com/devtron-labs/devtron/pkg/module/repo" moduleUtil "github.com/devtron-labs/devtron/pkg/module/util" "github.com/devtron-labs/devtron/pkg/policyGovernance/security/scanTool" @@ -40,11 +41,11 @@ import ( ) type ModuleService interface { - GetModuleInfo(name string) (*ModuleInfoDto, error) - GetModuleConfig(name string) (*ModuleConfigDto, error) - HandleModuleAction(userId int32, moduleName string, moduleActionRequest *ModuleActionRequestDto) (*ActionResponse, error) - GetAllModuleInfo() ([]ModuleInfoDto, error) - EnableModule(moduleName, version string) (*ActionResponse, error) + GetModuleInfo(name string) (*bean.ModuleInfoDto, error) + GetModuleConfig(name string) (*bean.ModuleConfigDto, error) + HandleModuleAction(userId int32, moduleName string, moduleActionRequest *bean.ModuleActionRequestDto) (*bean.ActionResponse, error) + GetAllModuleInfo() ([]bean.ModuleInfoDto, error) + EnableModule(moduleName, version string) (*bean.ActionResponse, error) } type ModuleServiceImpl struct { @@ -84,10 +85,10 @@ func NewModuleServiceImpl(logger *zap.SugaredLogger, serverEnvConfig *serverEnvC } } -func (impl ModuleServiceImpl) GetModuleInfo(name string) (*ModuleInfoDto, error) { +func (impl ModuleServiceImpl) GetModuleInfo(name string) (*bean.ModuleInfoDto, error) { impl.logger.Debugw("getting module info", "name", name) - moduleInfoDto := &ModuleInfoDto{ + moduleInfoDto := &bean.ModuleInfoDto{ Name: name, } @@ -100,9 +101,9 @@ func (impl ModuleServiceImpl) GetModuleInfo(name string) (*ModuleInfoDto, error) impl.logger.Errorw("error in handling module not found status ", "name", name, "err", err) } if flagForMarkingActiveTool { - toolVersion := TRIVY_V1 - if name == ModuleNameSecurityClair { - toolVersion = CLAIR_V4 + toolVersion := bean.TRIVY_V1 + if name == bean.ModuleNameSecurityClair { + toolVersion = bean.CLAIR_V4 } _, err = impl.EnableModule(name, toolVersion) if err != nil { @@ -120,7 +121,7 @@ func (impl ModuleServiceImpl) GetModuleInfo(name string) (*ModuleInfoDto, error) // now this is the case when data found in DB // if module is in installing state, then trigger module status check and override module model - if module.Status == ModuleStatusInstalling { + if module.Status == bean.ModuleStatusInstalling { impl.moduleCronService.HandleModuleStatusIfNotInProgress(module.Name) // override module model module, err = impl.moduleRepository.FindOne(name) @@ -131,7 +132,7 @@ func (impl ModuleServiceImpl) GetModuleInfo(name string) (*ModuleInfoDto, error) } // Handling for previous Modules flagForEnablingState := false - if module.ModuleType != MODULE_TYPE_SECURITY && module.Status == ModuleStatusInstalled { + if module.ModuleType != bean.MODULE_TYPE_SECURITY && module.Status == bean.ModuleStatusInstalled { flagForEnablingState = true err = impl.moduleRepository.MarkModuleAsEnabled(name) if err != nil { @@ -152,9 +153,9 @@ func (impl ModuleServiceImpl) GetModuleInfo(name string) (*ModuleInfoDto, error) return nil, err } if moduleResourcesStatusFromDb != nil { - var moduleResourcesStatus []*ModuleResourceStatusDto + var moduleResourcesStatus []*bean.ModuleResourceStatusDto for _, moduleResourceStatusFromDb := range moduleResourcesStatusFromDb { - moduleResourcesStatus = append(moduleResourcesStatus, &ModuleResourceStatusDto{ + moduleResourcesStatus = append(moduleResourcesStatus, &bean.ModuleResourceStatusDto{ Group: moduleResourceStatusFromDb.Group, Version: moduleResourceStatusFromDb.Version, Kind: moduleResourceStatusFromDb.Kind, @@ -169,17 +170,17 @@ func (impl ModuleServiceImpl) GetModuleInfo(name string) (*ModuleInfoDto, error) return moduleInfoDto, nil } -func (impl ModuleServiceImpl) GetModuleConfig(name string) (*ModuleConfigDto, error) { - moduleConfig := &ModuleConfigDto{} - if name == BlobStorage { - blobStorageConfig := &BlobStorageConfig{} +func (impl ModuleServiceImpl) GetModuleConfig(name string) (*bean.ModuleConfigDto, error) { + moduleConfig := &bean.ModuleConfigDto{} + if name == bean.BlobStorage { + blobStorageConfig := &bean.BlobStorageConfig{} env.Parse(blobStorageConfig) moduleConfig.Enabled = blobStorageConfig.Enabled } return moduleConfig, nil } -func (impl ModuleServiceImpl) handleModuleNotFoundStatus(moduleName string) (ModuleStatus, string, bool, error) { +func (impl ModuleServiceImpl) handleModuleNotFoundStatus(moduleName string) (bean.ModuleStatus, string, bool, error) { // if entry is not found in database, then check if that module is legacy or not // if enterprise user -> if legacy -> then mark as installed in db and return as installed, if not legacy -> return as not installed // if non-enterprise user-> fetch helm release enable Key. if true -> then mark as installed in db and return as installed. if false -> @@ -189,7 +190,7 @@ func (impl ModuleServiceImpl) handleModuleNotFoundStatus(moduleName string) (Mod moduleMetaData, err := impl.moduleServiceHelper.GetModuleMetadata(moduleName) if err != nil { impl.logger.Errorw("Error in getting module metadata", "moduleName", moduleName, "err", err) - return ModuleStatusNotInstalled, "", false, err + return bean.ModuleStatusNotInstalled, "", false, err } moduleMetaDataStr := string(moduleMetaData) isLegacyModule := gjson.Get(moduleMetaDataStr, "result.isIncludedInLegacyFullPackage").Bool() @@ -197,15 +198,15 @@ func (impl ModuleServiceImpl) handleModuleNotFoundStatus(moduleName string) (Mod flagForEnablingState := false flagForActiveTool := false - if moduleType == MODULE_TYPE_SECURITY { - err = impl.moduleRepository.FindByModuleTypeAndStatus(moduleType, ModuleStatusInstalled) + if moduleType == bean.MODULE_TYPE_SECURITY { + err = impl.moduleRepository.FindByModuleTypeAndStatus(moduleType, bean.ModuleStatusInstalled) if err != nil { if err == pg.ErrNoRows { flagForEnablingState = true flagForActiveTool = true } else { impl.logger.Errorw("error in getting module by type", "moduleName", moduleName, "err", err) - return ModuleStatusNotInstalled, moduleType, false, err + return bean.ModuleStatusNotInstalled, moduleType, false, err } } } else { @@ -218,7 +219,7 @@ func (impl ModuleServiceImpl) handleModuleNotFoundStatus(moduleName string) (Mod status, err := impl.saveModuleAsInstalled(moduleName, moduleType, flagForEnablingState) return status, moduleType, flagForActiveTool, err } - return ModuleStatusNotInstalled, moduleType, false, nil + return bean.ModuleStatusNotInstalled, moduleType, false, nil } // for non-enterprise user devtronHelmAppIdentifier := impl.helmAppService.GetDevtronHelmAppIdentifier() @@ -229,12 +230,12 @@ func (impl ModuleServiceImpl) handleModuleNotFoundStatus(moduleName string) (Mod if apiError != nil { err = apiError } - return ModuleStatusNotInstalled, moduleType, false, err + return bean.ModuleStatusNotInstalled, moduleType, false, err } releaseValues := releaseInfo.MergedValues // if check non-cicd module status - if moduleName != ModuleNameCicd { + if moduleName != bean.ModuleNameCiCd { isEnabled := gjson.Get(releaseValues, moduleUtil.BuildModuleEnableKey(moduleName)).Bool() if isEnabled { status, err := impl.saveModuleAsInstalled(moduleName, moduleType, flagForEnablingState) @@ -243,14 +244,14 @@ func (impl ModuleServiceImpl) handleModuleNotFoundStatus(moduleName string) (Mod } else if util2.IsBaseStack() { // check if cicd is in installing state // if devtron is installed with cicd module, then cicd module should be shown as installing - installerModulesIface := gjson.Get(releaseValues, INSTALLER_MODULES_HELM_KEY).Value() + installerModulesIface := gjson.Get(releaseValues, bean.INSTALLER_MODULES_HELM_KEY).Value() if installerModulesIface != nil { installerModulesIfaceKind := reflect.TypeOf(installerModulesIface).Kind() if installerModulesIfaceKind == reflect.Slice { installerModules := installerModulesIface.([]interface{}) for _, installerModule := range installerModules { if installerModule == moduleName { - status, err := impl.saveModule(moduleName, ModuleStatusInstalling, moduleType, flagForEnablingState) + status, err := impl.saveModule(moduleName, bean.ModuleStatusInstalling, moduleType, flagForEnablingState) return status, moduleType, false, err } } @@ -260,11 +261,11 @@ func (impl ModuleServiceImpl) handleModuleNotFoundStatus(moduleName string) (Mod } } - return ModuleStatusNotInstalled, moduleType, false, nil + return bean.ModuleStatusNotInstalled, moduleType, false, nil } -func (impl ModuleServiceImpl) HandleModuleAction(userId int32, moduleName string, moduleActionRequest *ModuleActionRequestDto) (*ActionResponse, error) { +func (impl ModuleServiceImpl) HandleModuleAction(userId int32, moduleName string, moduleActionRequest *bean.ModuleActionRequestDto) (*bean.ActionResponse, error) { impl.logger.Debugw("handling module action request", "moduleName", moduleName, "userId", userId, "payload", moduleActionRequest) //check if can update server @@ -310,14 +311,14 @@ func (impl ModuleServiceImpl) HandleModuleAction(userId int32, moduleName string // case of data found from DB // check if module is already installed or installing currentModuleStatus := module.Status - if currentModuleStatus == ModuleStatusInstalling || currentModuleStatus == ModuleStatusInstalled { + if currentModuleStatus == bean.ModuleStatusInstalling || currentModuleStatus == bean.ModuleStatusInstalled { return nil, errors.New("module is already in installing/installed state") } } // since the request can only come for install, hence update the DB with installing status - module.Status = ModuleStatusInstalling + module.Status = bean.ModuleStatusInstalling module.Version = moduleActionRequest.Version module.UpdatedOn = time.Now() tx, err := impl.moduleRepository.GetConnection().Begin() @@ -327,7 +328,7 @@ func (impl ModuleServiceImpl) HandleModuleAction(userId int32, moduleName string } defer tx.Rollback() flagForEnablingState := false - if moduleActionRequest.ModuleType == MODULE_TYPE_SECURITY { + if moduleActionRequest.ModuleType == bean.MODULE_TYPE_SECURITY { res := strings.Split(moduleName, ".") if len(res) < 2 { impl.logger.Errorw("error in getting toolname from module name as len is less than 2", "err", err, "moduleName", moduleName) @@ -335,15 +336,15 @@ func (impl ModuleServiceImpl) HandleModuleAction(userId int32, moduleName string } toolName := strings.ToUpper(res[1]) // Finding the Module by type and status, if no module exists of current type marking current module as active and enabled by default. - err = impl.moduleRepository.FindByModuleTypeAndStatus(moduleActionRequest.ModuleType, ModuleStatusInstalled) + err = impl.moduleRepository.FindByModuleTypeAndStatus(moduleActionRequest.ModuleType, bean.ModuleStatusInstalled) if err != nil { if err == pg.ErrNoRows { var toolversion string - if moduleName == ModuleNameSecurityClair { + if moduleName == bean.ModuleNameSecurityClair { // Handled for V4 for CLAIR as we are not using CLAIR V2 anymore. - toolversion = CLAIR_V4 - } else if moduleName == ModuleNameSecurityTrivy { - toolversion = TRIVY_V1 + toolversion = bean.CLAIR_V4 + } else if moduleName == bean.ModuleNameSecurityTrivy { + toolversion = bean.TRIVY_V1 } err2 := impl.scanToolMetadataService.MarkToolAsActive(toolName, toolversion, tx) if err2 != nil { @@ -383,7 +384,7 @@ func (impl ModuleServiceImpl) HandleModuleAction(userId int32, moduleName string extraValues := make(map[string]interface{}) extraValues["installer.release"] = moduleActionRequest.Version - extraValues[INSTALLER_MODULES_HELM_KEY] = []interface{}{moduleName} + extraValues[bean.INSTALLER_MODULES_HELM_KEY] = []interface{}{moduleName} alreadyInstalledModuleNames, err := impl.moduleRepository.GetInstalledModuleNames() if err != nil { impl.logger.Errorw("error in getting modules with installed status ", "err", err) @@ -410,12 +411,12 @@ func (impl ModuleServiceImpl) HandleModuleAction(userId int32, moduleName string if apiError != nil { err = apiError } - module.Status = ModuleStatusInstallFailed + module.Status = bean.ModuleStatusInstallFailed impl.moduleRepository.Update(module) return nil, err } if !updateResponse.GetSuccess() { - module.Status = ModuleStatusInstallFailed + module.Status = bean.ModuleStatusInstallFailed impl.moduleRepository.Update(module) return nil, errors.New("success is false from helm") } @@ -427,11 +428,11 @@ func (impl ModuleServiceImpl) HandleModuleAction(userId int32, moduleName string return nil, err } } - return &ActionResponse{ + return &bean.ActionResponse{ Success: true, }, nil } -func (impl ModuleServiceImpl) EnableModule(moduleName, version string) (*ActionResponse, error) { +func (impl ModuleServiceImpl) EnableModule(moduleName, version string) (*bean.ActionResponse, error) { // get module by name module, err := impl.moduleRepository.FindOne(moduleName) @@ -478,16 +479,16 @@ func (impl ModuleServiceImpl) EnableModule(moduleName, version string) (*ActionR if err != nil { return nil, err } - return &ActionResponse{ + return &bean.ActionResponse{ Success: true, }, nil } -func (impl ModuleServiceImpl) saveModuleAsInstalled(moduleName string, moduleType string, moduleEnabled bool) (ModuleStatus, error) { - return impl.saveModule(moduleName, ModuleStatusInstalled, moduleType, moduleEnabled) +func (impl ModuleServiceImpl) saveModuleAsInstalled(moduleName string, moduleType string, moduleEnabled bool) (bean.ModuleStatus, error) { + return impl.saveModule(moduleName, bean.ModuleStatusInstalled, moduleType, moduleEnabled) } -func (impl ModuleServiceImpl) saveModule(moduleName string, moduleStatus ModuleStatus, moduleType string, moduleEnabled bool) (ModuleStatus, error) { +func (impl ModuleServiceImpl) saveModule(moduleName string, moduleStatus bean.ModuleStatus, moduleType string, moduleEnabled bool) (bean.ModuleStatus, error) { module := &moduleRepo.Module{ Name: moduleName, Version: impl.serverDataStore.CurrentVersion, @@ -499,12 +500,12 @@ func (impl ModuleServiceImpl) saveModule(moduleName string, moduleStatus ModuleS err := impl.moduleRepository.Save(module) if err != nil { impl.logger.Errorw("error in saving module status ", "moduleName", moduleName, "moduleStatus", moduleStatus, "err", err) - return ModuleStatusNotInstalled, err + return bean.ModuleStatusNotInstalled, err } return moduleStatus, nil } -func (impl ModuleServiceImpl) GetAllModuleInfo() ([]ModuleInfoDto, error) { +func (impl ModuleServiceImpl) GetAllModuleInfo() ([]bean.ModuleInfoDto, error) { // fetch from DB modules, err := impl.moduleRepository.FindAll() if err != nil { @@ -516,17 +517,17 @@ func (impl ModuleServiceImpl) GetAllModuleInfo() ([]ModuleInfoDto, error) { impl.logger.Errorw("error in getting modules from DB ", "err", err) return nil, err } - var installedModules []ModuleInfoDto + var installedModules []bean.ModuleInfoDto // now this is the case when data found in DB for _, module := range modules { - moduleInfoDto := ModuleInfoDto{ + moduleInfoDto := bean.ModuleInfoDto{ Name: module.Name, Status: module.Status, Moduletype: module.ModuleType, Enabled: module.Enabled, } enabled := false - if module.ModuleType != MODULE_TYPE_SECURITY && module.Status == ModuleStatusInstalled { + if module.ModuleType != bean.MODULE_TYPE_SECURITY && module.Status == bean.ModuleStatusInstalled { module.Enabled = true enabled = true err := impl.moduleRepository.Update(&module) @@ -542,9 +543,9 @@ func (impl ModuleServiceImpl) GetAllModuleInfo() ([]ModuleInfoDto, error) { return nil, err } if moduleResourcesStatusFromDb != nil { - var moduleResourcesStatus []*ModuleResourceStatusDto + var moduleResourcesStatus []*bean.ModuleResourceStatusDto for _, moduleResourceStatusFromDb := range moduleResourcesStatusFromDb { - moduleResourcesStatus = append(moduleResourcesStatus, &ModuleResourceStatusDto{ + moduleResourcesStatus = append(moduleResourcesStatus, &bean.ModuleResourceStatusDto{ Group: moduleResourceStatusFromDb.Group, Version: moduleResourceStatusFromDb.Version, Kind: moduleResourceStatusFromDb.Kind, diff --git a/pkg/module/ModuleService_test.go b/pkg/module/ModuleService_test.go index 710cd90844..4ca5436211 100644 --- a/pkg/module/ModuleService_test.go +++ b/pkg/module/ModuleService_test.go @@ -20,6 +20,7 @@ import ( "encoding/json" "fmt" "github.com/devtron-labs/common-lib/utils" + "github.com/devtron-labs/devtron/pkg/module/bean" moduleRepo "github.com/devtron-labs/devtron/pkg/module/repo" "github.com/devtron-labs/devtron/pkg/sql" "github.com/go-pg/pg" @@ -44,7 +45,7 @@ func TestModuleServiceImpl_GetAllModuleInfo(t *testing.T) { tests := []struct { name string fields fields - want []ModuleInfoDto + want []bean.ModuleInfoDto wantErr bool }{ { @@ -95,8 +96,8 @@ func TestModuleServiceImpl_GetAllModuleInfo(t *testing.T) { } } -func getModuleDtoResponse1() []ModuleInfoDto { - return []ModuleInfoDto{ +func getModuleDtoResponse1() []bean.ModuleInfoDto { + return []bean.ModuleInfoDto{ { Name: "cicd", Status: "installed", @@ -120,7 +121,7 @@ func getModuleDtoResponse1() []ModuleInfoDto { { Name: "notifier", Status: "installed", - ModuleResourcesStatus: []*ModuleResourceStatusDto{ + ModuleResourcesStatus: []*bean.ModuleResourceStatusDto{ { Group: "", Version: "v1", diff --git a/pkg/module/Bean.go b/pkg/module/bean/bean.go similarity index 98% rename from pkg/module/Bean.go rename to pkg/module/bean/bean.go index fede767b45..2cd5a95ff4 100644 --- a/pkg/module/Bean.go +++ b/pkg/module/bean/bean.go @@ -14,7 +14,7 @@ * limitations under the License. */ -package module +package bean import ( "fmt" @@ -95,7 +95,7 @@ const ( ) const ( - ModuleNameCicd ModuleName = "cicd" + ModuleNameCiCd ModuleName = "cicd" ModuleNameArgoCd ModuleName = "argo-cd" ModuleNameSecurityClair ModuleName = "security.clair" ModuleNameNotification ModuleName = "notifier" diff --git a/pkg/module/read/ModuleReadService.go b/pkg/module/read/ModuleReadService.go new file mode 100644 index 0000000000..f8a918f85c --- /dev/null +++ b/pkg/module/read/ModuleReadService.go @@ -0,0 +1,57 @@ +/* + * Copyright (c) 2020-2024. Devtron Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package read + +import ( + "errors" + "github.com/devtron-labs/devtron/pkg/module/read/adapter" + "github.com/devtron-labs/devtron/pkg/module/read/bean" + moduleErr "github.com/devtron-labs/devtron/pkg/module/read/error" + moduleRepo "github.com/devtron-labs/devtron/pkg/module/repo" + "github.com/go-pg/pg" + "go.uber.org/zap" +) + +type ModuleReadService interface { + GetModuleInfoByName(moduleName string) (*bean.ModuleInfoMin, error) +} + +type ModuleReadServiceImpl struct { + logger *zap.SugaredLogger + moduleRepository moduleRepo.ModuleRepository +} + +func NewModuleReadServiceImpl( + logger *zap.SugaredLogger, + moduleRepository moduleRepo.ModuleRepository) *ModuleReadServiceImpl { + return &ModuleReadServiceImpl{ + logger: logger, + moduleRepository: moduleRepository, + } +} + +func (impl ModuleReadServiceImpl) GetModuleInfoByName(moduleName string) (*bean.ModuleInfoMin, error) { + module, err := impl.moduleRepository.FindOne(moduleName) + if err != nil && !errors.Is(err, pg.ErrNoRows) { + impl.logger.Errorw("error while fetching module info", "moduleName", moduleName, "error", err) + return nil, err + } else if errors.Is(err, pg.ErrNoRows) { + impl.logger.Debugw("module not found", "moduleName", moduleName) + return adapter.GetDefaultModuleInfo(moduleName), moduleErr.ModuleNotFoundError + } + return adapter.GetModuleInfoMin(module), nil +} diff --git a/pkg/module/read/adapter/adapter.go b/pkg/module/read/adapter/adapter.go new file mode 100644 index 0000000000..9b03cec7da --- /dev/null +++ b/pkg/module/read/adapter/adapter.go @@ -0,0 +1,39 @@ +/* + * Copyright (c) 2020-2024. Devtron Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package adapter + +import ( + moduleBean "github.com/devtron-labs/devtron/pkg/module/bean" + "github.com/devtron-labs/devtron/pkg/module/read/bean" + moduleRepo "github.com/devtron-labs/devtron/pkg/module/repo" +) + +func GetModuleInfoMin(module *moduleRepo.Module) *bean.ModuleInfoMin { + return &bean.ModuleInfoMin{ + Name: module.Name, + Status: module.Status, + Enabled: module.Enabled, + ModuleType: module.ModuleType, + } +} + +func GetDefaultModuleInfo(moduleName string) *bean.ModuleInfoMin { + return &bean.ModuleInfoMin{ + Name: moduleName, + Status: moduleBean.ModuleStatusNotInstalled, + } +} diff --git a/pkg/module/read/bean/bean.go b/pkg/module/read/bean/bean.go new file mode 100644 index 0000000000..a807dc6de0 --- /dev/null +++ b/pkg/module/read/bean/bean.go @@ -0,0 +1,35 @@ +/* + * Copyright (c) 2020-2024. Devtron Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package bean + +import ( + moduleBean "github.com/devtron-labs/devtron/pkg/module/bean" +) + +type ModuleInfoMin struct { + Name string + Status moduleBean.ModuleName + Enabled bool + ModuleType string +} + +func (m *ModuleInfoMin) IsInstalled() bool { + if m == nil { + return false + } + return m.Status == moduleBean.ModuleStatusInstalled +} diff --git a/pkg/module/read/error/error.go b/pkg/module/read/error/error.go new file mode 100644 index 0000000000..e6f86035cc --- /dev/null +++ b/pkg/module/read/error/error.go @@ -0,0 +1,21 @@ +/* + * Copyright (c) 2020-2024. Devtron Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package error + +import "errors" + +var ModuleNotFoundError = errors.New("module not found") diff --git a/pkg/module/repo/ModuleRepository.go b/pkg/module/repo/ModuleRepository.go index 99f75b5afd..bb997d0576 100644 --- a/pkg/module/repo/ModuleRepository.go +++ b/pkg/module/repo/ModuleRepository.go @@ -52,7 +52,7 @@ type ModuleRepositoryImpl struct { dbConnection *pg.DB } -func (impl ModuleRepositoryImpl) GetConnection() (dbConnection *pg.DB) { +func (impl *ModuleRepositoryImpl) GetConnection() (dbConnection *pg.DB) { return impl.dbConnection } @@ -60,29 +60,30 @@ func NewModuleRepositoryImpl(dbConnection *pg.DB) *ModuleRepositoryImpl { return &ModuleRepositoryImpl{dbConnection: dbConnection} } -func (impl ModuleRepositoryImpl) Save(module *Module) error { +func (impl *ModuleRepositoryImpl) Save(module *Module) error { return impl.dbConnection.Insert(module) } -func (impl ModuleRepositoryImpl) SaveWithTransaction(module *Module, tx *pg.Tx) error { +func (impl *ModuleRepositoryImpl) SaveWithTransaction(module *Module, tx *pg.Tx) error { return tx.Insert(module) } -func (impl ModuleRepositoryImpl) FindOne(name string) (*Module, error) { +func (impl *ModuleRepositoryImpl) FindOne(name string) (*Module, error) { module := &Module{} err := impl.dbConnection.Model(module). Where("name = ?", name).Select() return module, err } -func (impl ModuleRepositoryImpl) Update(module *Module) error { +func (impl *ModuleRepositoryImpl) Update(module *Module) error { return impl.dbConnection.Update(module) } -func (impl ModuleRepositoryImpl) UpdateWithTransaction(module *Module, tx *pg.Tx) error { +func (impl *ModuleRepositoryImpl) UpdateWithTransaction(module *Module, tx *pg.Tx) error { return tx.Update(module) } -func (impl ModuleRepositoryImpl) FindAllByStatus(status string) ([]Module, error) { + +func (impl *ModuleRepositoryImpl) FindAllByStatus(status string) ([]Module, error) { var modules []Module err := impl.dbConnection.Model(&modules). Where("status = ?", status). @@ -90,21 +91,21 @@ func (impl ModuleRepositoryImpl) FindAllByStatus(status string) ([]Module, error return modules, err } -func (impl ModuleRepositoryImpl) FindAll() ([]Module, error) { +func (impl *ModuleRepositoryImpl) FindAll() ([]Module, error) { var modules []Module err := impl.dbConnection.Model(&modules). Select() return modules, err } -func (impl ModuleRepositoryImpl) ModuleExists() (bool, error) { +func (impl *ModuleRepositoryImpl) ModuleExists() (bool, error) { module := &Module{} exists, err := impl.dbConnection.Model(module). Exists() return exists, err } -func (impl ModuleRepositoryImpl) GetInstalledModuleNames() ([]string, error) { +func (impl *ModuleRepositoryImpl) GetInstalledModuleNames() ([]string, error) { modules, err := impl.FindAllByStatus("installed") var moduleNames []string if err != nil && err != pg.ErrNoRows { @@ -117,7 +118,7 @@ func (impl ModuleRepositoryImpl) GetInstalledModuleNames() ([]string, error) { return moduleNames, nil } -func (impl ModuleRepositoryImpl) FindByModuleTypeAndStatus(moduleType string, status string) error { +func (impl *ModuleRepositoryImpl) FindByModuleTypeAndStatus(moduleType string, status string) error { module := &Module{} err := impl.dbConnection.Model(module). Where("module_type = ?", moduleType). @@ -126,18 +127,19 @@ func (impl ModuleRepositoryImpl) FindByModuleTypeAndStatus(moduleType string, st return err } -func (impl ModuleRepositoryImpl) MarkModuleAsEnabledWithTransaction(moduleName string, tx *pg.Tx) error { +func (impl *ModuleRepositoryImpl) MarkModuleAsEnabledWithTransaction(moduleName string, tx *pg.Tx) error { module := &Module{} _, err := tx.Model(module).Set("enabled = ?", true).Where("name = ?", moduleName).Update() return err } -func (impl ModuleRepositoryImpl) MarkModuleAsEnabled(moduleName string) error { + +func (impl *ModuleRepositoryImpl) MarkModuleAsEnabled(moduleName string) error { module := &Module{} _, err := impl.dbConnection.Model(module).Set("enabled = ?", true).Where("name = ?", moduleName).Update() return err } -func (impl ModuleRepositoryImpl) MarkOtherModulesDisabledOfSameType(moduleName, moduleType string, tx *pg.Tx) error { +func (impl *ModuleRepositoryImpl) MarkOtherModulesDisabledOfSameType(moduleName, moduleType string, tx *pg.Tx) error { module := &Module{} _, err := tx.Model(module).Set("enabled = ?", false).Where("name != ?", moduleName).Where("module_type = ?", moduleType).Update() return err diff --git a/pkg/pipeline/AppDeploymentTypeChangeManager.go b/pkg/pipeline/AppDeploymentTypeChangeManager.go index a2d4cdcc34..bcca8290be 100644 --- a/pkg/pipeline/AppDeploymentTypeChangeManager.go +++ b/pkg/pipeline/AppDeploymentTypeChangeManager.go @@ -32,8 +32,11 @@ import ( app2 "github.com/devtron-labs/devtron/pkg/app" "github.com/devtron-labs/devtron/pkg/bean" chartService "github.com/devtron-labs/devtron/pkg/chart" + "github.com/devtron-labs/devtron/pkg/chart/read" "github.com/devtron-labs/devtron/pkg/deployment/common" + "github.com/devtron-labs/devtron/pkg/deployment/common/adapter" bean4 "github.com/devtron-labs/devtron/pkg/deployment/common/bean" + read2 "github.com/devtron-labs/devtron/pkg/deployment/common/read" commonBean "github.com/devtron-labs/devtron/pkg/deployment/gitOps/common/bean" "github.com/devtron-labs/devtron/pkg/deployment/gitOps/config" bean3 "github.com/devtron-labs/devtron/pkg/deployment/trigger/devtronApps/bean" @@ -42,8 +45,8 @@ import ( bean2 "github.com/devtron-labs/devtron/pkg/eventProcessor/out/bean" "github.com/juju/errors" "go.uber.org/zap" + errors2 "k8s.io/apimachinery/pkg/api/errors" "strconv" - "strings" ) type AppDeploymentTypeChangeManager interface { @@ -78,6 +81,8 @@ type AppDeploymentTypeChangeManagerImpl struct { workflowEventPublishService out.WorkflowEventPublishService deploymentConfigService common.DeploymentConfigService ArgoClientWrapperService argocdServer.ArgoClientWrapperService + chartReadService read.ChartReadService + DeploymentConfigReadService read2.DeploymentConfigReadService } func NewAppDeploymentTypeChangeManagerImpl( @@ -91,7 +96,9 @@ func NewAppDeploymentTypeChangeManagerImpl( gitOpsConfigReadService config.GitOpsConfigReadService, chartService chartService.ChartService, workflowEventPublishService out.WorkflowEventPublishService, - deploymentConfigService common.DeploymentConfigService) *AppDeploymentTypeChangeManagerImpl { + deploymentConfigService common.DeploymentConfigService, + chartReadService read.ChartReadService, + DeploymentConfigReadService read2.DeploymentConfigReadService) *AppDeploymentTypeChangeManagerImpl { return &AppDeploymentTypeChangeManagerImpl{ logger: logger, pipelineRepository: pipelineRepository, @@ -104,6 +111,8 @@ func NewAppDeploymentTypeChangeManagerImpl( chartService: chartService, workflowEventPublishService: workflowEventPublishService, deploymentConfigService: deploymentConfigService, + chartReadService: chartReadService, + DeploymentConfigReadService: DeploymentConfigReadService, } } @@ -174,6 +183,13 @@ func (impl *AppDeploymentTypeChangeManagerImpl) ChangeDeploymentType(ctx context return nil, err } deploymentConfig.DeploymentAppType = request.DesiredDeploymentType + deploymentConfig.ReleaseMode = util.PIPELINE_RELEASE_MODE_CREATE //now pipeline release mode will be create + releaseConfig, err := impl.DeploymentConfigReadService.ParseEnvLevelReleaseConfigForDevtronApp(deploymentConfig, pipeline.AppId, pipeline.EnvironmentId) + if err != nil { + impl.logger.Errorw("error in parsing release config", "err", err) + return response, err + } + deploymentConfig.ReleaseConfiguration = releaseConfig deploymentConfig, err = impl.deploymentConfigService.CreateOrUpdateConfig(nil, deploymentConfig, request.UserId) if err != nil { impl.logger.Errorw("error in updating configs", "err", err) @@ -242,6 +258,7 @@ func (impl *AppDeploymentTypeChangeManagerImpl) ChangePipelineDeploymentType(ctx EnvId: request.EnvId, DesiredDeploymentType: request.DesiredDeploymentType, TriggeredPipelines: make([]*bean.CdPipelineTrigger, 0), + FailedPipelines: make([]*bean.DeploymentChangeStatus, 0), } var deleteDeploymentType string @@ -263,12 +280,17 @@ func (impl *AppDeploymentTypeChangeManagerImpl) ChangePipelineDeploymentType(ctx return response, err } - var pipelineIds []int + var allPipelines []int for _, item := range pipelines { - pipelineIds = append(pipelineIds, item.Id) + allPipelines = append(allPipelines, item.Id) } - if len(pipelineIds) == 0 { + if len(allPipelines) == 0 { + return response, nil + } + + pipelineIds := make([]int, 0) + if len(allPipelines) == 0 { return response, nil } @@ -279,7 +301,21 @@ func (impl *AppDeploymentTypeChangeManagerImpl) ChangePipelineDeploymentType(ctx impl.logger.Errorw("error in fetching environment deployment config by appId and envId", "appId", p.AppId, "envId", p.EnvironmentId, "err", err) return response, err } - deploymentConfigs = append(deploymentConfigs, envDeploymentConfig) + if !envDeploymentConfig.IsLinkedRelease() { + pipelineIds = append(pipelineIds, p.Id) + deploymentConfigs = append(deploymentConfigs, envDeploymentConfig) + } else { + response.FailedPipelines = append(response.FailedPipelines, + &bean.DeploymentChangeStatus{ + PipelineId: p.Id, + AppId: p.AppId, + AppName: p.App.AppName, + EnvId: p.EnvironmentId, + EnvName: p.Environment.Name, + Error: "Deployment app type cannot be changed because this app is linked with external application", + Status: bean.Failed, + }) + } } deleteResponse := impl.DeleteDeploymentApps(ctx, pipelines, deploymentConfigs, request.UserId) @@ -314,7 +350,14 @@ func (impl *AppDeploymentTypeChangeManagerImpl) ChangePipelineDeploymentType(ctx impl.logger.Errorw("error in fetching environment deployment config by appId and envId", "appId", item.AppId, "envId", item.EnvId, "err", err) return response, err } + envDeploymentConfig.ReleaseMode = util.PIPELINE_RELEASE_MODE_CREATE // now pipeline release mode will be create envDeploymentConfig.DeploymentAppType = request.DesiredDeploymentType + releaseConfig, err := impl.DeploymentConfigReadService.ParseEnvLevelReleaseConfigForDevtronApp(envDeploymentConfig, item.AppId, item.EnvId) + if err != nil { + impl.logger.Errorw("error in parsing release config", "err", err) + return response, err + } + envDeploymentConfig.ReleaseConfiguration = releaseConfig envDeploymentConfig, err = impl.deploymentConfigService.CreateOrUpdateConfig(nil, envDeploymentConfig, request.UserId) if err != nil { impl.logger.Errorw("error in updating deployment config", "err", err) @@ -484,13 +527,13 @@ func (impl *AppDeploymentTypeChangeManagerImpl) DeleteDeploymentApps(ctx context // delete request var err error if envDeploymentConfig.DeploymentAppType == bean3.ArgoCd { - err = impl.deleteArgoCdApp(ctx, pipeline, pipeline.DeploymentAppName, true) + err = impl.deleteArgoCdApp(ctx, pipeline, envDeploymentConfig, true) } else { // For converting from Helm to ArgoCD, GitOps should be configured - if gitOpsConfigErr != nil || !gitOpsConfigurationStatus.IsGitOpsConfigured { - err = errors.New("GitOps not configured or unable to fetch GitOps configuration") + if gitOpsConfigErr != nil || !gitOpsConfigurationStatus.IsGitOpsConfiguredAndArgoCdInstalled() { + err = errors.New("GitOps integration is not installed/configured. Please install/configure GitOps.") } else { // Register app in ACD @@ -499,7 +542,7 @@ func (impl *AppDeploymentTypeChangeManagerImpl) DeleteDeploymentApps(ctx context AcdRegisterErr, RepoURLUpdateErr, createGitRepoErr, gitOpsRepoNotFound error ) - chart, chartServiceErr := impl.chartService.FindLatestChartForAppByAppId(pipeline.AppId) + chart, chartServiceErr := impl.chartReadService.FindLatestChartForAppByAppId(pipeline.AppId) if chartServiceErr != nil { impl.logger.Errorw("Error in fetching latest chart for pipeline", "err", err, "appId", pipeline.AppId) } @@ -508,7 +551,8 @@ func (impl *AppDeploymentTypeChangeManagerImpl) DeleteDeploymentApps(ctx context if gitOpsConfigurationStatus.AllowCustomRepository || chart.IsCustomGitRepository { gitOpsRepoNotFound = fmt.Errorf(cdWorkflow.GITOPS_REPO_NOT_CONFIGURED) } else { - _, chartGitAttr, createGitRepoErr = impl.appService.CreateGitOpsRepo(&app.App{Id: pipeline.AppId, AppName: pipeline.App.AppName}, userId) + targetRevision := chart.TargetRevision + _, chartGitAttr, createGitRepoErr = impl.appService.CreateGitOpsRepo(&app.App{Id: pipeline.AppId, AppName: pipeline.App.AppName}, targetRevision, userId) if createGitRepoErr == nil { AcdRegisterErr = impl.cdPipelineConfigService.RegisterInACD(ctx, chartGitAttr, userId) if AcdRegisterErr != nil { @@ -519,8 +563,8 @@ func (impl *AppDeploymentTypeChangeManagerImpl) DeleteDeploymentApps(ctx context if RepoURLUpdateErr != nil { impl.logger.Errorw("error in updating git repo url in charts", "err", RepoURLUpdateErr) } - envDeploymentConfig.ConfigType = common.GetDeploymentConfigType(chart.IsCustomGitRepository) - envDeploymentConfig.RepoURL = chartGitAttr.RepoUrl + envDeploymentConfig.ConfigType = adapter.GetDeploymentConfigType(chart.IsCustomGitRepository) + envDeploymentConfig.SetRepoURL(chartGitAttr.RepoUrl) envDeploymentConfig, RepoURLUpdateErr = impl.deploymentConfigService.CreateOrUpdateConfig(nil, envDeploymentConfig, userId) if RepoURLUpdateErr != nil { @@ -532,10 +576,12 @@ func (impl *AppDeploymentTypeChangeManagerImpl) DeleteDeploymentApps(ctx context } else { // in this case user has already created an empty git repository and provided us gitRepoUrl chartGitAttr = &commonBean.ChartGitAttribute{ - RepoUrl: chart.GitRepoUrl, + RepoUrl: chart.GitRepoUrl, + TargetRevision: chart.TargetRevision, } } } + if gitOpsRepoNotFound != nil { impl.logger.Errorw("error no GitOps repository configured for the app", "err", gitOpsRepoNotFound) } @@ -601,21 +647,21 @@ func (impl *AppDeploymentTypeChangeManagerImpl) DeleteDeploymentAppsForEnvironme currentDeploymentAppType bean3.DeploymentType, exclusionList []int, includeApps []int, userId int32) (*bean.DeploymentAppTypeChangeResponse, error) { // fetch active pipelines from database for the given environment id and current deployment app type - pipelines, err := impl.pipelineRepository.FindActiveByEnvIdAndDeploymentType(environmentId, + allPipelines, err := impl.pipelineRepository.FindActiveByEnvIdAndDeploymentType(environmentId, currentDeploymentAppType, exclusionList, includeApps) + pipelines := make([]*pipelineConfig.Pipeline, 0) deploymentConfigs := make([]*bean4.DeploymentConfig, 0) - for _, p := range pipelines { + for _, p := range allPipelines { envDeploymentConfig, err := impl.deploymentConfigService.GetAndMigrateConfigIfAbsentForDevtronApps(p.AppId, p.EnvironmentId) if err != nil { impl.logger.Errorw("error in fetching environment deployment config by appId and envId", "appId", p.AppId, "envId", p.EnvironmentId, "err", err) - return &bean.DeploymentAppTypeChangeResponse{ - EnvId: environmentId, - SuccessfulPipelines: []*bean.DeploymentChangeStatus{}, - FailedPipelines: []*bean.DeploymentChangeStatus{}, - }, err + return nil, err + } + if !envDeploymentConfig.IsLinkedRelease() { + pipelines = append(pipelines, p) + deploymentConfigs = append(deploymentConfigs, envDeploymentConfig) } - deploymentConfigs = append(deploymentConfigs, envDeploymentConfig) } if err != nil { @@ -770,16 +816,19 @@ func (impl *AppDeploymentTypeChangeManagerImpl) fetchDeletedApp(ctx context.Cont // deleteArgoCdApp takes context and deployment app name used in argo cd and deletes // the application in argo cd. -func (impl *AppDeploymentTypeChangeManagerImpl) deleteArgoCdApp(ctx context.Context, pipeline *pipelineConfig.Pipeline, deploymentAppName string, +func (impl *AppDeploymentTypeChangeManagerImpl) deleteArgoCdApp(ctx context.Context, pipeline *pipelineConfig.Pipeline, envDeploymentConfig *bean4.DeploymentConfig, cascadeDelete bool) error { if !pipeline.DeploymentAppCreated { return nil } - _, err := impl.ArgoClientWrapperService.DeleteArgoApp(ctx, deploymentAppName, cascadeDelete) + var err error + applicationObjectClusterId := envDeploymentConfig.GetApplicationObjectClusterId() + applicationNamespace := envDeploymentConfig.GetApplicationObjectNamespace() + err = impl.ArgoClientWrapperService.DeleteArgoAppWithK8sClient(ctx, applicationObjectClusterId, applicationNamespace, pipeline.DeploymentAppName, cascadeDelete) if err != nil { impl.logger.Errorw("error in deleting argocd application", "err", err) // Possible that argocd app got deleted but db updation failed - if strings.Contains(err.Error(), "code = NotFound") { + if errors2.IsNotFound(err) { return nil } return err diff --git a/pkg/pipeline/CiCdPipelineOrchestrator.go b/pkg/pipeline/CiCdPipelineOrchestrator.go index 6f58d9a8fc..c5a0ec8b31 100644 --- a/pkg/pipeline/CiCdPipelineOrchestrator.go +++ b/pkg/pipeline/CiCdPipelineOrchestrator.go @@ -27,16 +27,20 @@ import ( "fmt" constants2 "github.com/devtron-labs/devtron/internal/sql/constants" attributesBean "github.com/devtron-labs/devtron/pkg/attributes/bean" + adapter2 "github.com/devtron-labs/devtron/pkg/bean/adapter" common2 "github.com/devtron-labs/devtron/pkg/bean/common" repository6 "github.com/devtron-labs/devtron/pkg/build/git/gitMaterial/repository" "github.com/devtron-labs/devtron/pkg/build/pipeline" bean2 "github.com/devtron-labs/devtron/pkg/build/pipeline/bean" + read2 "github.com/devtron-labs/devtron/pkg/chart/read" repository2 "github.com/devtron-labs/devtron/pkg/cluster/environment/repository" "github.com/devtron-labs/devtron/pkg/deployment/common" + "github.com/devtron-labs/devtron/pkg/deployment/common/read" "github.com/devtron-labs/devtron/pkg/deployment/gitOps/config" constants3 "github.com/devtron-labs/devtron/pkg/pipeline/constants" util4 "github.com/devtron-labs/devtron/pkg/pipeline/util" "github.com/devtron-labs/devtron/pkg/plugin" + "github.com/devtron-labs/devtron/util/sliceUtil" "golang.org/x/exp/slices" "net/http" "path" @@ -146,7 +150,9 @@ type CiCdPipelineOrchestratorImpl struct { transactionManager sql.TransactionWrapper gitOpsConfigReadService config.GitOpsConfigReadService deploymentConfigService common.DeploymentConfigService + deploymentConfigReadService read.DeploymentConfigReadService workflowCacheConfig types.WorkflowCacheConfig + chartReadService read2.ChartReadService } func NewCiCdPipelineOrchestrator( @@ -176,7 +182,9 @@ func NewCiCdPipelineOrchestrator( genericNoteService genericNotes.GenericNoteService, chartService chart.ChartService, transactionManager sql.TransactionWrapper, gitOpsConfigReadService config.GitOpsConfigReadService, - deploymentConfigService common.DeploymentConfigService) *CiCdPipelineOrchestratorImpl { + deploymentConfigService common.DeploymentConfigService, + deploymentConfigReadService read.DeploymentConfigReadService, + chartReadService read2.ChartReadService) *CiCdPipelineOrchestratorImpl { _, workflowCacheConfig, err := types.GetCiConfigWithWorkflowCacheConfig() if err != nil { logger.Errorw("Error in getting workflow cache config, continuing with default values", "err", err) @@ -211,7 +219,9 @@ func NewCiCdPipelineOrchestrator( transactionManager: transactionManager, gitOpsConfigReadService: gitOpsConfigReadService, deploymentConfigService: deploymentConfigService, + deploymentConfigReadService: deploymentConfigReadService, workflowCacheConfig: workflowCacheConfig, + chartReadService: chartReadService, } } @@ -1375,6 +1385,18 @@ func (impl CiCdPipelineOrchestratorImpl) DeleteApp(appId int, userId int32) erro impl.logger.Errorw("error in deleting auth roles", "err", err) return err } + appDeploymentConfig, err := impl.deploymentConfigService.GetAndMigrateConfigIfAbsentForDevtronApps(appId, 0) + if err != nil && !errors.Is(err, pg.ErrNoRows) { + impl.logger.Errorw("error in fetching environment deployment config by appId and envId", "appId", appId, "err", err) + return err + } else if err == nil && appDeploymentConfig != nil { + appDeploymentConfig.Active = false + appDeploymentConfig, err = impl.deploymentConfigService.CreateOrUpdateConfig(tx, appDeploymentConfig, userId) + if err != nil { + impl.logger.Errorw("error in deleting deployment config for pipeline", "appId", appId, "err", err) + return err + } + } err = tx.Commit() if err != nil { return err @@ -1726,6 +1748,23 @@ func (impl CiCdPipelineOrchestratorImpl) CreateCDPipelines(pipelineRequest *bean DeploymentAppName: fmt.Sprintf("%s-%s", appName, env.Name), AuditLog: sql.AuditLog{UpdatedBy: userId, CreatedBy: userId, UpdatedOn: time.Now(), CreatedOn: time.Now()}, } + + if pipelineRequest.IsLinkedRelease() { + if len(pipelineRequest.DeploymentAppName) == 0 { + return 0, util.DefaultApiError(). + WithHttpStatusCode(http.StatusUnprocessableEntity). + WithInternalMessage("deploymentAppName is required for releaseMode: link"). + WithUserMessage("deploymentAppName is required for releaseMode: link") + } + //Here we are linking an external helm release so deployment_app_name = and deployment_app_created = true as release already exist + // external release name is sent from FE in pipelineRequest.DeploymentAppName field + pipeline.DeploymentAppName = pipelineRequest.DeploymentAppName + pipeline.DeploymentAppCreated = true + } + if len(pipeline.DeploymentAppName) == 0 { + pipeline.DeploymentAppName = util2.BuildDeployedAppName(appName, env.Name) + } + err = impl.pipelineRepository.Save([]*pipelineConfig.Pipeline{pipeline}, tx) if err != nil { impl.logger.Errorw("error in saving cd pipeline", "err", err, "pipeline", pipeline) @@ -1940,10 +1979,11 @@ func (impl CiCdPipelineOrchestratorImpl) GetCdPipelinesForApp(appId int) (cdPipe PreStageConfigMapSecretNames: preStageConfigmapSecrets, PostStageConfigMapSecretNames: postStageConfigmapSecrets, DeploymentAppType: envDeploymentConfig.DeploymentAppType, + ReleaseMode: envDeploymentConfig.ReleaseMode, DeploymentAppCreated: dbPipeline.DeploymentAppCreated, DeploymentAppDeleteRequest: dbPipeline.DeploymentAppDeleteRequest, IsVirtualEnvironment: dbPipeline.Environment.IsVirtualEnvironment, - IsGitOpsRepoNotConfigured: !isAppLevelGitOpsConfigured, + IsGitOpsRepoNotConfigured: !envDeploymentConfig.IsPipelineGitOpsRepoConfigured(isAppLevelGitOpsConfigured), } if pipelineStages, ok := pipelineIdAndPrePostStageMapping[dbPipeline.Id]; ok { pipeline.PreDeployStage = pipelineStages[0] @@ -1992,18 +2032,32 @@ func (impl CiCdPipelineOrchestratorImpl) GetCdPipelinesForEnv(envId int, request impl.logger.Errorw("error in fetching pipelineIdAndPrePostStageMapping", "err", err) return nil, err } - appIdAppLevelGitOpsConfiguredMap, err := impl.chartService.IsGitOpsRepoConfiguredForDevtronApps(appIds) + appIdToGitOpsConfiguredMap, err := impl.chartReadService.IsGitOpsRepoConfiguredForDevtronApps(appIds) if err != nil { impl.logger.Errorw("error in fetching latest chart details for app by appId") return nil, err } pipelines := make([]*bean.CDPipelineConfigObject, 0, len(dbPipelines)) - pipelineIdDeploymentTypeMap, err := impl.deploymentConfigService.GetDeploymentAppTypeForCDInBulk(dbPipelines) + cdPipelineMinObjs := sliceUtil.NewSliceFromFuncExec(dbPipelines, func(dbPipeline *pipelineConfig.Pipeline) *bean.CDPipelineMinConfig { + return adapter2.NewCDPipelineMinConfigFromModel(dbPipeline) + }) + pipelineIdDeploymentTypeMap, err := impl.deploymentConfigReadService.GetDeploymentAppTypeForCDInBulk(cdPipelineMinObjs, appIdToGitOpsConfiguredMap) if err != nil { impl.logger.Errorw("error, GetDeploymentAppTypeForCDInBulk", "pipelines", dbPipelines, "err", err) return nil, err } for _, dbPipeline := range dbPipelines { + var deploymentAppType, releaseMode string + var isGitOpsRepoNotConfigured bool + if envDeploymentConfigMin, ok := pipelineIdDeploymentTypeMap[dbPipeline.Id]; ok { + deploymentAppType = envDeploymentConfigMin.DeploymentAppType + releaseMode = envDeploymentConfigMin.ReleaseMode + isGitOpsRepoNotConfigured = !envDeploymentConfigMin.IsGitOpsRepoConfigured + } else { + // error handling if data is not found; as it is a required field + impl.logger.Errorw("error in fetching deploymentAppType and release mode for pipeline", "pipelineId", dbPipeline.Id, "pipelineIdDeploymentTypeMap", pipelineIdDeploymentTypeMap) + return nil, fmt.Errorf("error in fetching deploymentAppType and release mode for pipeline") + } pipeline := &bean.CDPipelineConfigObject{ Id: dbPipeline.Id, Name: dbPipeline.Name, @@ -2013,13 +2067,14 @@ func (impl CiCdPipelineOrchestratorImpl) GetCdPipelinesForEnv(envId int, request TriggerType: dbPipeline.TriggerType, RunPreStageInEnv: dbPipeline.RunPreStageInEnv, RunPostStageInEnv: dbPipeline.RunPostStageInEnv, - DeploymentAppType: pipelineIdDeploymentTypeMap[dbPipeline.Id], + DeploymentAppType: deploymentAppType, + ReleaseMode: releaseMode, AppName: dbPipeline.App.AppName, AppId: dbPipeline.AppId, TeamId: dbPipeline.App.TeamId, EnvironmentIdentifier: dbPipeline.Environment.EnvironmentIdentifier, IsVirtualEnvironment: dbPipeline.Environment.IsVirtualEnvironment, - IsGitOpsRepoNotConfigured: !appIdAppLevelGitOpsConfiguredMap[dbPipeline.AppId], + IsGitOpsRepoNotConfigured: isGitOpsRepoNotConfigured, } if len(dbPipeline.PreStageConfig) > 0 { preStage := bean.CdStage{} @@ -2124,6 +2179,7 @@ func (impl CiCdPipelineOrchestratorImpl) GetCdPipelinesForAppAndEnv(appId int, e impl.logger.Errorw("error in fetching latest chart details for app by appId") return nil, err } + // TODO Asutosh: why not getting deployment config ?? pipeline := &bean.CDPipelineConfigObject{ Id: dbPipeline.Id, Name: dbPipeline.Name, diff --git a/pkg/pipeline/ConfigMapService.go b/pkg/pipeline/ConfigMapService.go index c7c7b10d9b..e6d7770a70 100644 --- a/pkg/pipeline/ConfigMapService.go +++ b/pkg/pipeline/ConfigMapService.go @@ -106,7 +106,7 @@ func NewConfigMapServiceImpl(chartRepository chartRepoRepository.ChartRepository repoRepository chartRepoRepository.ChartRepoRepository, mergeUtil util.MergeUtil, pipelineConfigRepository chartConfig.PipelineConfigRepository, - configMapRepository chartConfig.ConfigMapRepository, environmentConfigRepository chartConfig.EnvConfigOverrideRepository, + configMapRepository chartConfig.ConfigMapRepository, commonService commonService.CommonService, appRepository app.AppRepository, configMapHistoryService history2.ConfigMapHistoryService, environmentRepository repository3.EnvironmentRepository, scopedVariableManager variables.ScopedVariableCMCSManager, diff --git a/pkg/pipeline/DeploymentPipelineConfigService.go b/pkg/pipeline/DeploymentPipelineConfigService.go index 5164408038..9ad43a77b9 100644 --- a/pkg/pipeline/DeploymentPipelineConfigService.go +++ b/pkg/pipeline/DeploymentPipelineConfigService.go @@ -21,12 +21,15 @@ import ( "encoding/json" errors3 "errors" "fmt" + commonBean2 "github.com/devtron-labs/common-lib/utils/k8s/commonBean" bean2 "github.com/devtron-labs/devtron/api/bean" "github.com/devtron-labs/devtron/api/bean/gitOps" models2 "github.com/devtron-labs/devtron/api/helm-app/models" client "github.com/devtron-labs/devtron/api/helm-app/service" helmBean "github.com/devtron-labs/devtron/api/helm-app/service/bean" "github.com/devtron-labs/devtron/client/argocdServer" + bean7 "github.com/devtron-labs/devtron/client/argocdServer/bean" + "github.com/devtron-labs/devtron/internal/constants" "github.com/devtron-labs/devtron/internal/sql/models" "github.com/devtron-labs/devtron/internal/sql/repository" app2 "github.com/devtron-labs/devtron/internal/sql/repository/app" @@ -37,23 +40,35 @@ import ( "github.com/devtron-labs/devtron/internal/sql/repository/pipelineConfig/bean/workflow/cdWorkflow" "github.com/devtron-labs/devtron/internal/util" "github.com/devtron-labs/devtron/pkg/app" + installedAppReader "github.com/devtron-labs/devtron/pkg/appStore/installedApp/read" "github.com/devtron-labs/devtron/pkg/bean" "github.com/devtron-labs/devtron/pkg/chart" + bean6 "github.com/devtron-labs/devtron/pkg/chart/bean" + read3 "github.com/devtron-labs/devtron/pkg/chart/read" chartRepoRepository "github.com/devtron-labs/devtron/pkg/chartRepo/repository" + bean3 "github.com/devtron-labs/devtron/pkg/cluster/bean" clutserBean "github.com/devtron-labs/devtron/pkg/cluster/environment/bean" repository6 "github.com/devtron-labs/devtron/pkg/cluster/environment/repository" + read2 "github.com/devtron-labs/devtron/pkg/cluster/read" repository2 "github.com/devtron-labs/devtron/pkg/cluster/repository" "github.com/devtron-labs/devtron/pkg/deployment/common" bean4 "github.com/devtron-labs/devtron/pkg/deployment/common/bean" + errors4 "github.com/devtron-labs/devtron/pkg/deployment/common/errors" commonBean "github.com/devtron-labs/devtron/pkg/deployment/gitOps/common/bean" "github.com/devtron-labs/devtron/pkg/deployment/gitOps/config" "github.com/devtron-labs/devtron/pkg/deployment/gitOps/git" + "github.com/devtron-labs/devtron/pkg/deployment/gitOps/validation" + validationBean "github.com/devtron-labs/devtron/pkg/deployment/gitOps/validation/bean" "github.com/devtron-labs/devtron/pkg/deployment/manifest/deployedAppMetrics" "github.com/devtron-labs/devtron/pkg/deployment/manifest/deploymentTemplate" + bean5 "github.com/devtron-labs/devtron/pkg/deployment/manifest/deploymentTemplate/bean" + chartRefRead "github.com/devtron-labs/devtron/pkg/deployment/manifest/deploymentTemplate/chartRef/read" + "github.com/devtron-labs/devtron/pkg/deployment/manifest/deploymentTemplate/read" config2 "github.com/devtron-labs/devtron/pkg/deployment/providerConfig" clientErrors "github.com/devtron-labs/devtron/pkg/errors" "github.com/devtron-labs/devtron/pkg/eventProcessor/out" "github.com/devtron-labs/devtron/pkg/imageDigestPolicy" + "github.com/devtron-labs/devtron/pkg/pipeline/adapter" pipelineConfigBean "github.com/devtron-labs/devtron/pkg/pipeline/bean" "github.com/devtron-labs/devtron/pkg/pipeline/history" repository4 "github.com/devtron-labs/devtron/pkg/pipeline/history/repository" @@ -62,14 +77,17 @@ import ( "github.com/devtron-labs/devtron/pkg/sql" "github.com/devtron-labs/devtron/pkg/variables" repository3 "github.com/devtron-labs/devtron/pkg/variables/repository" - util2 "github.com/devtron-labs/devtron/util" + globalUtil "github.com/devtron-labs/devtron/util" "github.com/devtron-labs/devtron/util/rbac" "github.com/go-pg/pg" errors2 "github.com/juju/errors" "go.opentelemetry.io/otel" "go.uber.org/zap" + chart2 "helm.sh/helm/v3/pkg/chart" + "helm.sh/helm/v3/pkg/chart/loader" "k8s.io/apimachinery/pkg/api/errors" "net/http" + "path/filepath" "strconv" "strings" "time" @@ -82,6 +100,7 @@ type CdPipelineConfigService interface { // if any error occur , will get empty object or nil GetCdPipelineById(pipelineId int) (cdPipeline *bean.CDPipelineConfigObject, err error) CreateCdPipelines(cdPipelines *bean.CdPipelines, ctx context.Context) (*bean.CdPipelines, error) + ValidateLinkExternalArgoCDRequest(request *pipelineConfigBean.MigrateReleaseValidationRequest) pipelineConfigBean.ArgoCdAppLinkValidationResponse // PatchCdPipelines : Handle CD pipeline patch requests, making necessary changes to the configuration and returning the updated version. // Performs Create ,Update and Delete operation. PatchCdPipelines(cdPipelines *bean.CDPatchRequest, ctx context.Context) (*bean.CdPipelines, error) @@ -147,19 +166,27 @@ type CdPipelineConfigServiceImpl struct { propertiesConfigService PropertiesConfigService deploymentTemplateHistoryService deploymentTemplate.DeploymentTemplateHistoryService scopedVariableManager variables.ScopedVariableManager - deploymentConfig *util2.DeploymentServiceTypeConfig + deploymentConfig *globalUtil.DeploymentServiceTypeConfig customTagService CustomTagService ciPipelineConfigService CiPipelineConfigService buildPipelineSwitchService BuildPipelineSwitchService argoClientWrapperService argocdServer.ArgoClientWrapperService deployedAppMetricsService deployedAppMetrics.DeployedAppMetricsService gitOpsConfigReadService config.GitOpsConfigReadService + gitOpsValidationService validation.GitOpsValidationService gitOperationService git.GitOperationService chartService chart.ChartService imageDigestPolicyService imageDigestPolicy.ImageDigestPolicyService pipelineConfigEventPublishService out.PipelineConfigEventPublishService deploymentTypeOverrideService config2.DeploymentTypeOverrideService deploymentConfigService common.DeploymentConfigService + envConfigOverrideService read.EnvConfigOverrideService + chartRefReadService chartRefRead.ChartRefReadService + chartTemplateService util.ChartTemplateService + gitFactory *git.GitFactory + clusterReadService read2.ClusterReadService + installedAppReadService installedAppReader.InstalledAppReadService + chartReadService read3.ChartReadService } func NewCdPipelineConfigServiceImpl(logger *zap.SugaredLogger, pipelineRepository pipelineConfig.PipelineRepository, @@ -173,18 +200,26 @@ func NewCdPipelineConfigServiceImpl(logger *zap.SugaredLogger, pipelineRepositor chartRepository chartRepoRepository.ChartRepository, resourceGroupService resourceGroup2.ResourceGroupService, propertiesConfigService PropertiesConfigService, deploymentTemplateHistoryService deploymentTemplate.DeploymentTemplateHistoryService, - scopedVariableManager variables.ScopedVariableManager, envVariables *util2.EnvironmentVariables, + scopedVariableManager variables.ScopedVariableManager, envVariables *globalUtil.EnvironmentVariables, customTagService CustomTagService, ciPipelineConfigService CiPipelineConfigService, buildPipelineSwitchService BuildPipelineSwitchService, argoClientWrapperService argocdServer.ArgoClientWrapperService, deployedAppMetricsService deployedAppMetrics.DeployedAppMetricsService, gitOpsConfigReadService config.GitOpsConfigReadService, + gitOpsValidationService validation.GitOpsValidationService, gitOperationService git.GitOperationService, chartService chart.ChartService, imageDigestPolicyService imageDigestPolicy.ImageDigestPolicyService, pipelineConfigEventPublishService out.PipelineConfigEventPublishService, deploymentTypeOverrideService config2.DeploymentTypeOverrideService, - deploymentConfigService common.DeploymentConfigService) *CdPipelineConfigServiceImpl { + deploymentConfigService common.DeploymentConfigService, + envConfigOverrideService read.EnvConfigOverrideService, + chartRefReadService chartRefRead.ChartRefReadService, + chartTemplateService util.ChartTemplateService, + gitFactory *git.GitFactory, + clusterReadService read2.ClusterReadService, + installedAppReadService installedAppReader.InstalledAppReadService, + chartReadService read3.ChartReadService) *CdPipelineConfigServiceImpl { return &CdPipelineConfigServiceImpl{ logger: logger, pipelineRepository: pipelineRepository, @@ -216,11 +251,19 @@ func NewCdPipelineConfigServiceImpl(logger *zap.SugaredLogger, pipelineRepositor argoClientWrapperService: argoClientWrapperService, deployedAppMetricsService: deployedAppMetricsService, gitOpsConfigReadService: gitOpsConfigReadService, + gitOpsValidationService: gitOpsValidationService, gitOperationService: gitOperationService, imageDigestPolicyService: imageDigestPolicyService, pipelineConfigEventPublishService: pipelineConfigEventPublishService, deploymentTypeOverrideService: deploymentTypeOverrideService, deploymentConfigService: deploymentConfigService, + envConfigOverrideService: envConfigOverrideService, + chartRefReadService: chartRefReadService, + chartTemplateService: chartTemplateService, + gitFactory: gitFactory, + clusterReadService: clusterReadService, + installedAppReadService: installedAppReadService, + chartReadService: chartReadService, } } @@ -355,6 +398,7 @@ func (impl *CdPipelineConfigServiceImpl) GetCdPipelineById(pipelineId int) (cdPi ParentPipelineId: appWorkflowMapping.ParentId, ParentPipelineType: appWorkflowMapping.ParentType, DeploymentAppType: envDeploymentConfig.DeploymentAppType, + ReleaseMode: envDeploymentConfig.ReleaseMode, DeploymentAppCreated: dbPipeline.DeploymentAppCreated, IsVirtualEnvironment: dbPipeline.Environment.IsVirtualEnvironment, CustomTagObject: customTag, @@ -387,12 +431,15 @@ func (impl *CdPipelineConfigServiceImpl) CreateCdPipelines(pipelineCreateRequest envIds := make([]*int, 0) for _, pipeline := range pipelineCreateRequest.Pipelines { // skip creation of pipeline if envId is not set - if pipeline.EnvironmentId <= 0 { + if pipeline.EnvironmentId <= 0 || pipeline.IsLinkedRelease() { continue } - //making environment array for fetching the clusterIds + // making environment array for fetching the clusterIds envIds = append(envIds, &pipeline.EnvironmentId) - overrideDeploymentType, err := impl.deploymentTypeOverrideService.ValidateAndOverrideDeploymentAppType(pipeline.DeploymentAppType, gitOpsConfigurationStatus.IsGitOpsConfigured, pipeline.EnvironmentId) + // validate and override deployment app type + // NOTE: using gitOpsConfigurationStatus.IsGitOpsConfigured instead of gitOpsConfigurationStatus.IsGitOpsConfiguredAndArgoCdInstalled() + // as we need to allow the user to create pipeline with linked acd app, even if argo cd is not installed + overrideDeploymentType, err := impl.deploymentTypeOverrideService.ValidateAndOverrideDeploymentAppType(pipeline.DeploymentAppType, gitOpsConfigurationStatus.IsGitOpsConfiguredAndArgoCdInstalled(), pipeline.EnvironmentId) if err != nil { impl.logger.Errorw("validation error in creating pipeline", "name", pipeline.Name, "err", err) return nil, err @@ -408,7 +455,6 @@ func (impl *CdPipelineConfigServiceImpl) CreateCdPipelines(pipelineCreateRequest } } - isGitOpsRequiredForCD := impl.IsGitOpsRequiredForCD(pipelineCreateRequest) app, err := impl.appRepo.FindById(pipelineCreateRequest.AppId) if err != nil { @@ -422,17 +468,32 @@ func (impl *CdPipelineConfigServiceImpl) CreateCdPipelines(pipelineCreateRequest return nil, err } - AppDeploymentConfig, err := impl.deploymentConfigService.GetAndMigrateConfigIfAbsentForDevtronApps(app.Id, 0) + for _, pipeline := range pipelineCreateRequest.Pipelines { + if pipeline.IsExternalArgoAppLinkRequest() { + migrationReq := adapter.NewMigrateReleaseValidationRequest(pipeline) + migrationReq.AppId = app.Id + linkCDValidationResponse := impl.ValidateLinkExternalArgoCDRequest(migrationReq) + if !linkCDValidationResponse.IsLinkable { + return nil, + util.NewApiError(http.StatusPreconditionFailed, + linkCDValidationResponse.ErrorDetail.ValidationFailedMessage, + string(linkCDValidationResponse.ErrorDetail.ValidationFailedReason)) + } + } + } + + appDeploymentConfig, err := impl.deploymentConfigService.GetAndMigrateConfigIfAbsentForDevtronApps(app.Id, 0) if err != nil { impl.logger.Errorw("error in fetching deployment config by appId", "appId", app.Id, "err", err) return nil, err } // TODO: creating git repo for all apps irrespective of acd or helm - if gitOpsConfigurationStatus.IsGitOpsConfigured && isGitOpsRequiredForCD && !pipelineCreateRequest.IsCloneAppReq { + if gitOpsConfigurationStatus.IsGitOpsConfiguredAndArgoCdInstalled() && + impl.IsGitOpsRequiredForCD(pipelineCreateRequest) { //TODO: ayush revisit - if gitOps.IsGitOpsRepoNotConfigured(AppDeploymentConfig.RepoURL) { - if gitOpsConfigurationStatus.AllowCustomRepository || AppDeploymentConfig.ConfigType == bean4.CUSTOM.String() { + if gitOps.IsGitOpsRepoNotConfigured(appDeploymentConfig.GetRepoURL()) { + if gitOpsConfigurationStatus.AllowCustomRepository || appDeploymentConfig.ConfigType == bean4.CUSTOM.String() { apiErr := &util.ApiError{ HttpStatusCode: http.StatusConflict, UserMessage: cdWorkflow.GITOPS_REPO_NOT_CONFIGURED, @@ -440,7 +501,8 @@ func (impl *CdPipelineConfigServiceImpl) CreateCdPipelines(pipelineCreateRequest } return nil, apiErr } - _, chartGitAttr, err := impl.appService.CreateGitOpsRepo(app, pipelineCreateRequest.UserId) + targetRevision := appDeploymentConfig.GetTargetRevision() + _, chartGitAttr, err := impl.appService.CreateGitOpsRepo(app, targetRevision, pipelineCreateRequest.UserId) if err != nil { impl.logger.Errorw("error in creating git repo", "err", err) return nil, fmt.Errorf("Create GitOps repository error: %s", err.Error()) @@ -451,7 +513,7 @@ func (impl *CdPipelineConfigServiceImpl) CreateCdPipelines(pipelineCreateRequest return nil, err } // below function will update gitRepoUrl for charts if user has not already provided gitOps repoURL - AppDeploymentConfig, err = impl.chartService.ConfigureGitOpsRepoUrlForApp(pipelineCreateRequest.AppId, chartGitAttr.RepoUrl, chartGitAttr.ChartLocation, false, pipelineCreateRequest.UserId) + appDeploymentConfig, err = impl.chartService.ConfigureGitOpsRepoUrlForApp(pipelineCreateRequest.AppId, chartGitAttr.RepoUrl, chartGitAttr.ChartLocation, false, pipelineCreateRequest.UserId) if err != nil { impl.logger.Errorw("error in updating git repo url in charts", "err", err) return nil, err @@ -460,27 +522,50 @@ func (impl *CdPipelineConfigServiceImpl) CreateCdPipelines(pipelineCreateRequest } for _, pipeline := range pipelineCreateRequest.Pipelines { - // skip creation of DeploymentConfig if envId is not set + var envDeploymentConfig *bean4.DeploymentConfig if pipeline.EnvironmentId > 0 { - envDeploymentConfig := &bean4.DeploymentConfig{ + env, err := impl.environmentRepository.FindById(pipeline.EnvironmentId) + if err != nil { + impl.logger.Errorw("error in fetching environment", "environmentId", pipeline.EnvironmentId, "err", err) + return nil, err + } + envDeploymentConfig = &bean4.DeploymentConfig{ AppId: app.Id, EnvironmentId: pipeline.EnvironmentId, - ConfigType: AppDeploymentConfig.ConfigType, DeploymentAppType: pipeline.DeploymentAppType, - RepoURL: AppDeploymentConfig.RepoURL, - RepoName: AppDeploymentConfig.RepoName, - ReleaseMode: pipeline.ReleaseMode, + RepoURL: appDeploymentConfig.RepoURL, + ReleaseMode: pipeline.GetReleaseMode(), Active: true, } - envDeploymentConfig, err := impl.deploymentConfigService.CreateOrUpdateConfig(nil, envDeploymentConfig, pipelineCreateRequest.UserId) + var releaseConfig *bean4.ReleaseConfiguration + if pipeline.IsExternalArgoAppLinkRequest() { + releaseConfig, err = impl.parseReleaseConfigForExternalAcdApp(pipeline.ApplicationObjectClusterId, pipeline.ApplicationObjectNamespace, pipeline.DeploymentAppName) + if err != nil { + impl.logger.Errorw("error in parsing deployment config for external acd app", "appId", pipeline.AppId, "envId", pipeline.EnvironmentId, "err", err) + return nil, err + } + envDeploymentConfig.ConfigType = bean4.CUSTOM.String() + } else if pipeline.DeploymentAppType == util.PIPELINE_DEPLOYMENT_TYPE_ACD { + releaseConfig, err = impl.parseReleaseConfigForACDApp(app, appDeploymentConfig, env) + if err != nil { + impl.logger.Errorw("error in parsing deployment config for acd app", "appId", pipeline.AppId, "envId", pipeline.EnvironmentId, "err", err) + return nil, err + } + envDeploymentConfig.ConfigType = appDeploymentConfig.ConfigType + } + envDeploymentConfig.ReleaseConfiguration = releaseConfig + if releaseConfig != nil && releaseConfig.ArgoCDSpec.Spec.Source != nil { + envDeploymentConfig = envDeploymentConfig.SetRepoURL(releaseConfig.ArgoCDSpec.Spec.Source.RepoURL) //for backward compatibility + } + envDeploymentConfig, err = impl.deploymentConfigService.CreateOrUpdateConfig(nil, envDeploymentConfig, pipelineCreateRequest.UserId) if err != nil { impl.logger.Errorw("error in fetching creating env config", "appId", app.Id, "envId", pipeline.EnvironmentId, "err", err) return nil, err } } - id, err := impl.createCdPipeline(ctx, app, pipeline, pipelineCreateRequest.UserId) + id, err := impl.createCdPipeline(ctx, app, pipeline, envDeploymentConfig, pipelineCreateRequest.UserId) if err != nil { impl.logger.Errorw("error in creating pipeline", "name", pipeline.Name, "err", err) return nil, err @@ -508,6 +593,254 @@ func (impl *CdPipelineConfigServiceImpl) CreateCdPipelines(pipelineCreateRequest return pipelineCreateRequest, nil } +func (impl *CdPipelineConfigServiceImpl) parseReleaseConfigForACDApp(app *app2.App, AppDeploymentConfig *bean4.DeploymentConfig, env *repository6.Environment) (*bean4.ReleaseConfiguration, error) { + + envOverride, err := impl.envConfigOverrideService.FindLatestChartForAppByAppIdAndEnvId(app.Id, env.Id) + if err != nil && !errors2.IsNotFound(err) { + impl.logger.Errorw("error in fetch") + return nil, err + } + var latestChart *chartRepoRepository.Chart + if !envOverride.IsOverridden() { + latestChart, err = impl.chartRepository.FindLatestChartForAppByAppId(app.Id) + if err != nil { + return nil, err + } + } else { + //if chart is overrides in env, it means it may have different version than app level. + latestChart = envOverride.Chart + } + chartRefId := latestChart.ChartRefId + + chartRef, err := impl.chartRefReadService.FindById(chartRefId) + if err != nil { + impl.logger.Errorw("error in fetching chart", "chartRefId", chartRefId, "err", err) + return nil, err + } + chartLocation := filepath.Join(chartRef.Location, latestChart.ChartVersion) + + return &bean4.ReleaseConfiguration{ + Version: bean4.Version, + ArgoCDSpec: bean4.ArgoCDSpec{ + Metadata: bean4.ApplicationMetadata{ + ClusterId: bean3.DefaultClusterId, + Namespace: argocdServer.DevtronInstalationNs, + }, + Spec: bean4.ApplicationSpec{ + Destination: &bean4.Destination{ + Namespace: env.Namespace, + Server: env.Cluster.ServerUrl, + }, + Source: &bean4.ApplicationSource{ + RepoURL: AppDeploymentConfig.GetRepoURL(), + Path: chartLocation, + TargetRevision: globalUtil.GetDefaultTargetRevision(), + Helm: &bean4.ApplicationSourceHelm{ + ValueFiles: []string{fmt.Sprintf("_%d-values.yaml", env.Id)}, + }, + }, + }, + }, + }, nil +} + +func (impl *CdPipelineConfigServiceImpl) ValidateLinkExternalArgoCDRequest(request *pipelineConfigBean.MigrateReleaseValidationRequest) pipelineConfigBean.ArgoCdAppLinkValidationResponse { + appId := request.AppId + applicationObjectClusterId := request.ApplicationMetadataRequest.ApplicationObjectClusterId + applicationObjectNamespace := request.ApplicationMetadataRequest.ApplicationObjectNamespace + acdAppName := request.DeploymentAppName + + response := pipelineConfigBean.ArgoCdAppLinkValidationResponse{ + IsLinkable: false, + ApplicationMetadata: pipelineConfigBean.NewEmptyApplicationMetadata(), + } + + argoApplicationSpec, err := impl.argoClientWrapperService.GetArgoAppByNameWithK8sClient(context.Background(), applicationObjectClusterId, applicationObjectNamespace, acdAppName) + if err != nil { + impl.logger.Errorw("error in fetching application", "deploymentAppName", acdAppName, "err", err) + return response.SetUnknownErrorDetail(err) + } + if argoApplicationSpec.Spec.HasMultipleSources() { + return response.SetErrorDetail(pipelineConfigBean.UnsupportedApplicationSpec, "application with multiple sources not supported") + } + if argoApplicationSpec.Spec.Source != nil && argoApplicationSpec.Spec.Source.Helm != nil && len(argoApplicationSpec.Spec.Source.Helm.ValueFiles) != 1 { + return response.SetErrorDetail(pipelineConfigBean.UnsupportedApplicationSpec, "application with multiple/ empty helm value files are not supported") + } + if strings.ToLower(argoApplicationSpec.Spec.Source.TargetRevision) == bean7.TargetRevisionHead { + return response.SetErrorDetail(pipelineConfigBean.UnsupportedApplicationSpec, "Target revision head not supported") + } + + targetClusterURL := argoApplicationSpec.Spec.Destination.Server + if len(targetClusterURL) == 0 { + return response.SetErrorDetail(pipelineConfigBean.UnsupportedApplicationSpec, "application with empty destination server is not supported") + } + targetClusterNamespace := argoApplicationSpec.Spec.Destination.Namespace + if len(targetClusterNamespace) == 0 { + return response.SetErrorDetail(pipelineConfigBean.UnsupportedApplicationSpec, "application with empty destination namespace is not supported") + } + if argoApplicationSpec.Spec.Source != nil { + response.ApplicationMetadata.Source.RepoURL = argoApplicationSpec.Spec.Source.RepoURL + response.ApplicationMetadata.Source.ChartPath = argoApplicationSpec.Spec.Source.Chart + } + response.ApplicationMetadata.Status = string(argoApplicationSpec.Status.Health.Status) + + pipelines, err := impl.pipelineRepository.GetArgoPipelineByArgoAppName(acdAppName) + if err != nil && !errors3.Is(err, pg.ErrNoRows) { + return response.SetUnknownErrorDetail(err) + } + + pipeline, err := impl.deploymentConfigService.FilterPipelinesByApplicationClusterIdAndNamespace(pipelines, applicationObjectClusterId, applicationObjectNamespace) + if err != nil && !errors3.Is(err, errors4.PipelineNotFoundError) { + impl.logger.Errorw("error in filtering pipelines by application clusterId and namespace", "applicationObjectClusterId", applicationObjectClusterId, "applicationObjectNamespace", applicationObjectNamespace, "err", err) + return response.SetUnknownErrorDetail(err) + } else if pipeline.Id != 0 { + return response.SetErrorDetail(pipelineConfigBean.ApplicationAlreadyPresent, pipelineConfigBean.PipelineAlreadyPresentMsg) + } + + installedApp, err := impl.installedAppReadService.GetInstalledAppByGitOpsAppName(acdAppName) + if err != nil && !errors3.Is(err, pg.ErrNoRows) { + return response.SetUnknownErrorDetail(err) + } + if installedApp != nil { + // installed app found + if bean3.DefaultClusterId == applicationObjectClusterId && argocdServer.DevtronInstalationNs == applicationObjectNamespace { + return response.SetErrorDetail(pipelineConfigBean.ApplicationAlreadyPresent, pipelineConfigBean.HelmAppAlreadyPresentMsg) + } + } + + response.ApplicationMetadata.Destination.Namespace = targetClusterNamespace + var targetCluster *bean3.ClusterBean + if targetClusterURL == commonBean2.DefaultClusterUrl { + targetCluster, err = impl.clusterReadService.FindById(request.ApplicationMetadataRequest.ApplicationObjectClusterId) + if targetCluster != nil { + response.ApplicationMetadata.Destination.ClusterServerUrl = targetCluster.ServerUrl + } + } else { + response.ApplicationMetadata.Destination.ClusterServerUrl = targetClusterURL + targetCluster, err = impl.clusterReadService.FindByClusterURL(targetClusterURL) + } + if err != nil && !errors3.Is(err, pg.ErrNoRows) { + impl.logger.Errorw("error in getting targetCluster by url", "clusterURL", targetClusterURL, "err", err) + return response.SetUnknownErrorDetail(err) + } else if errors3.Is(err, pg.ErrNoRows) { + impl.logger.Debugw("targetCluster not found by url", "clusterURL", targetClusterURL) + return response.SetErrorDetail(pipelineConfigBean.ClusterNotFound, "targetCluster not added in global configuration") + } + + response.ApplicationMetadata.Destination.ClusterName = targetCluster.ClusterName + + targetEnv, err := impl.environmentRepository.FindOneByNamespaceAndClusterId(targetClusterNamespace, targetCluster.Id) + if err != nil { + if errors3.Is(err, pg.ErrNoRows) { + return response.SetErrorDetail(pipelineConfigBean.EnvironmentNotFound, "environment not added in global configuration") + } + return response.SetUnknownErrorDetail(err) + } + response.ApplicationMetadata.Destination.EnvironmentName = targetEnv.Name + response.ApplicationMetadata.Destination.EnvironmentId = targetEnv.Id + + var requestedGitUrl string + if argoApplicationSpec.Spec.Source != nil { + requestedGitUrl = argoApplicationSpec.Spec.Source.RepoURL + } + validateRequest := &validationBean.ValidateGitOpsRepoUrlRequest{ + RequestedGitUrl: requestedGitUrl, + UseActiveGitOps: true, // oss only supports active gitops + } + sanitisedRepoUrl, err := impl.gitOpsValidationService.ValidateGitOpsRepoUrl(validateRequest) + if err != nil { + if apiError, ok := err.(*util.ApiError); ok && apiError.Code == constants.InvalidGitOpsRepoUrlForPipeline { + return response.SetErrorDetail(pipelineConfigBean.GitOpsNotFound, apiError.InternalMessage) + } + return response.SetUnknownErrorDetail(err) + } + var chartPath, targetRevision string + if argoApplicationSpec.Spec.Source != nil { + chartPath = argoApplicationSpec.Spec.Source.Path + targetRevision = argoApplicationSpec.Spec.Source.TargetRevision + } + helmChart, err := impl.extractHelmChartForExternalArgoApp(sanitisedRepoUrl, targetRevision, chartPath) + if err != nil { + impl.logger.Errorw("error in extracting helm chart from application spec", "acdAppName", acdAppName, "err", err) + return response.SetUnknownErrorDetail(err) + } + + applicationChartName, applicationChartVersion := helmChart.Metadata.Name, helmChart.Metadata.Version + latestChart, err := impl.chartReadService.FindLatestChartForAppByAppId(appId) + if err != nil { + impl.logger.Errorw("error in finding latest chart by appId", "appId", appId, "err", err) + return response.SetUnknownErrorDetail(err) + } + chartRef, err := impl.chartRefReadService.FindById(latestChart.ChartRefId) + if err != nil { + impl.logger.Errorw("error in finding chart ref by chartRefId", "chartRefId", latestChart.ChartRefId, "err", err) + return response.SetUnknownErrorDetail(err) + } + var valuesFilename string + if argoApplicationSpec.Spec.Source != nil && argoApplicationSpec.Spec.Source.Helm != nil { + valuesFilename = argoApplicationSpec.Spec.Source.Helm.ValueFiles[0] + } + response.ApplicationMetadata.Source.ChartMetadata = pipelineConfigBean.ChartMetadata{ + RequiredChartVersion: applicationChartVersion, + SavedChartName: chartRef.Name, + ValuesFilename: valuesFilename, + RequiredChartName: applicationChartName, + } + + if chartRef.Name != applicationChartName { + return response.SetErrorDetail(pipelineConfigBean.ChartTypeMismatch, fmt.Sprintf(pipelineConfigBean.ChartTypeMismatchErrorMsg, applicationChartName, chartRef.Name)) + } + + _, err = impl.chartRefReadService.FindByVersionAndName(applicationChartVersion, chartRef.Name) + if err != nil && !errors3.Is(err, pg.ErrNoRows) { + impl.logger.Errorw("error in finding chart ref by chart name and version", "chartName", applicationChartName, "chartVersion", applicationChartVersion, "err", err) + return response.SetUnknownErrorDetail(err) + } else if errors3.Is(err, pg.ErrNoRows) { + return response.SetErrorDetail(pipelineConfigBean.ChartVersionNotFound, fmt.Sprintf(pipelineConfigBean.ChartVersionNotFoundErrorMsg, applicationChartVersion, chartRef.Name)) + } + response.IsLinkable = true + response.ApplicationMetadata.Status = string(argoApplicationSpec.Status.Health.Status) + + overrideDeploymentType, err := impl.deploymentTypeOverrideService.ValidateAndOverrideDeploymentAppType(util.PIPELINE_DEPLOYMENT_TYPE_ACD, true, targetEnv.Id) + if err != nil { + impl.logger.Errorw("validation error for the used deployment type", "targetEnvId", targetEnv.Id, "deploymentAppType", request.DeploymentAppType, "err", err) + if apiError, ok := err.(*util.ApiError); ok && apiError.Code == constants.InvalidDeploymentAppTypeForPipeline { + return response.SetErrorDetail(pipelineConfigBean.EnforcedPolicyViolation, apiError.InternalMessage) + } + return response.SetUnknownErrorDetail(err) + } + if overrideDeploymentType != util.PIPELINE_DEPLOYMENT_TYPE_ACD { + errMsg := fmt.Sprintf("Cannot migrate Argo CD Application. Deployment via %q is enforced on the target environment.", overrideDeploymentType) + return response.SetErrorDetail(pipelineConfigBean.EnforcedPolicyViolation, errMsg) + } + return response +} + +func (impl *CdPipelineConfigServiceImpl) parseReleaseConfigForExternalAcdApp(clusterId int, namespace, acdAppName string) (*bean4.ReleaseConfiguration, error) { + application, err := impl.argoClientWrapperService.GetArgoAppByNameWithK8sClient(context.Background(), clusterId, namespace, acdAppName) + if err != nil { + impl.logger.Errorw("error in fetching application", "deploymentAppName", acdAppName, "err", err) + return nil, err + } + applicationJSON, err := json.Marshal(application) + if err != nil { + impl.logger.Errorw("error in marshalling application", "applicationName", acdAppName, "err", err) + return nil, err + } + var argoApplicationSpec bean4.ArgoCDSpec + err = json.Unmarshal(applicationJSON, &argoApplicationSpec) + if err != nil { + impl.logger.Errorw("error in unmarshalling application", "applicationName", acdAppName, "err", err) + return nil, err + } + argoApplicationSpec.SetApplicationObjectClusterId(clusterId) + + return &bean4.ReleaseConfiguration{ + Version: bean4.Version, + ArgoCDSpec: argoApplicationSpec, + }, nil +} + func (impl *CdPipelineConfigServiceImpl) CDPipelineCustomTagDBOperations(pipeline *bean.CDPipelineConfigObject) error { if pipeline.EnableCustomTag && (pipeline.CustomTagObject != nil && len(pipeline.CustomTagObject.TagPattern) == 0) { @@ -873,14 +1206,18 @@ func (impl *CdPipelineConfigServiceImpl) DeleteCdPipeline(pipeline *pipelineConf } impl.logger.Debugw("acd app is already deleted for this pipeline", "pipeline", pipeline) if deleteFromAcd { - if _, err := impl.argoClientWrapperService.DeleteArgoApp(ctx, deploymentAppName, cascadeDelete); err != nil { + //TODO: ayush test + applicationObjectClusterId := envDeploymentConfig.GetApplicationObjectClusterId() + applicationNamespace := envDeploymentConfig.GetApplicationObjectNamespace() + + if err := impl.argoClientWrapperService.DeleteArgoAppWithK8sClient(ctx, applicationObjectClusterId, applicationNamespace, deploymentAppName, cascadeDelete); err != nil { impl.logger.Errorw("err in deleting pipeline on argocd", "id", pipeline, "err", err) if forceDelete { impl.logger.Warnw("error while deletion of app in acd, continue to delete in db as this operation is force delete", "error", err) } else { //statusError, _ := err.(*errors2.StatusError) - if cascadeDelete && strings.Contains(err.Error(), "code = NotFound") { + if cascadeDelete && errors.IsNotFound(err) { err = &util.ApiError{ UserMessage: "Could not delete as application not found in argocd", InternalMessage: err.Error(), @@ -951,14 +1288,16 @@ func (impl *CdPipelineConfigServiceImpl) DeleteACDAppCdPipelineWithNonCascade(pi impl.logger.Errorw("error in fetching environment deployment config by appId and envId", "appId", pipeline.AppId, "envId", pipeline.EnvironmentId, "err", err) return err } + applicationObjectClusterId := envDeploymentConfig.GetApplicationObjectClusterId() + applicationObjectNamespace := envDeploymentConfig.GetApplicationObjectNamespace() //delete app from argo cd with non-cascade, if created if pipeline.DeploymentAppCreated && util.IsAcdApp(envDeploymentConfig.DeploymentAppType) { deploymentAppName := pipeline.DeploymentAppName impl.logger.Debugw("acd app is already deleted for this pipeline", "pipeline", pipeline) - if _, err = impl.argoClientWrapperService.DeleteArgoApp(ctx, deploymentAppName, false); err != nil { + if err = impl.argoClientWrapperService.DeleteArgoAppWithK8sClient(ctx, applicationObjectClusterId, applicationObjectNamespace, deploymentAppName, false); err != nil { impl.logger.Errorw("err in deleting pipeline on argocd", "id", pipeline, "err", err) //statusError, _ := err.(*errors2.StatusError) - if !strings.Contains(err.Error(), "code = NotFound") { + if errors.IsNotFound(err) { err = &util.ApiError{ UserMessage: "Could not delete application", InternalMessage: err.Error(), @@ -1128,6 +1467,7 @@ func (impl *CdPipelineConfigServiceImpl) GetCdPipelinesForApp(appId int) (cdPipe RunPreStageInEnv: dbPipeline.RunPreStageInEnv, RunPostStageInEnv: dbPipeline.RunPostStageInEnv, DeploymentAppType: dbPipeline.DeploymentAppType, + ReleaseMode: dbPipeline.GetReleaseMode(), DeploymentAppCreated: dbPipeline.DeploymentAppCreated, ParentPipelineType: appToWorkflowMapping.ParentType, ParentPipelineId: appToWorkflowMapping.ParentId, @@ -1229,15 +1569,6 @@ func (impl *CdPipelineConfigServiceImpl) GetCdPipelinesByEnvironment(request res for _, item := range appWorkflowMappings { pipelineWorkflowMapping[item.ComponentId] = item } - isAppLevelGitOpsConfigured := false - gitOpsConfigStatus, err := impl.gitOpsConfigReadService.IsGitOpsConfigured() - if err != nil { - impl.logger.Errorw("error in fetching global GitOps configuration") - return nil, err - } - if gitOpsConfigStatus.IsGitOpsConfigured && !gitOpsConfigStatus.AllowCustomRepository { - isAppLevelGitOpsConfigured = true - } var strPipelineIds []string for _, pipelineId := range pipelineIds { strPipelineIds = append(strPipelineIds, strconv.Itoa(pipelineId)) @@ -1263,13 +1594,6 @@ func (impl *CdPipelineConfigServiceImpl) GetCdPipelinesByEnvironment(request res } customTagStage = repository5.PIPELINE_STAGE_TYPE_POST_CD } - if !isAppLevelGitOpsConfigured { - isAppLevelGitOpsConfigured, err = impl.chartService.IsGitOpsRepoConfiguredForDevtronApp(dbPipeline.AppId) - if err != nil { - impl.logger.Errorw("error in fetching latest chart details for app by appId") - return nil, err - } - } pipeline := &bean.CDPipelineConfigObject{ Id: dbPipeline.Id, @@ -1286,6 +1610,7 @@ func (impl *CdPipelineConfigServiceImpl) GetCdPipelinesByEnvironment(request res RunPreStageInEnv: dbPipeline.RunPreStageInEnv, RunPostStageInEnv: dbPipeline.RunPostStageInEnv, DeploymentAppType: dbPipeline.DeploymentAppType, + ReleaseMode: dbPipeline.GetReleaseMode(), ParentPipelineType: pipelineWorkflowMapping[dbPipeline.Id].ParentType, ParentPipelineId: pipelineWorkflowMapping[dbPipeline.Id].ParentId, AppName: dbPipeline.AppName, @@ -1295,7 +1620,7 @@ func (impl *CdPipelineConfigServiceImpl) GetCdPipelinesByEnvironment(request res PostDeployStage: dbPipeline.PostDeployStage, CustomTagObject: customTag, CustomTagStage: &customTagStage, - IsGitOpsRepoNotConfigured: !isAppLevelGitOpsConfigured, + IsGitOpsRepoNotConfigured: dbPipeline.IsGitOpsRepoNotConfigured, } pipelines = append(pipelines, pipeline) } @@ -1354,6 +1679,7 @@ func (impl *CdPipelineConfigServiceImpl) GetCdPipelinesByEnvironmentMin(request EnvironmentId: dbPipeline.EnvironmentId, Id: dbPipeline.Id, DeploymentAppType: envDeploymentConfig.DeploymentAppType, + ReleaseMode: envDeploymentConfig.ReleaseMode, IsVirtualEnvironment: dbPipeline.Environment.IsVirtualEnvironment, } cdPipelines = append(cdPipelines, pcObject) @@ -1475,12 +1801,16 @@ func (impl *CdPipelineConfigServiceImpl) GetBulkActionImpactedPipelines(dto *bea } func (impl *CdPipelineConfigServiceImpl) IsGitOpsRequiredForCD(pipelineCreateRequest *bean.CdPipelines) bool { - + if pipelineCreateRequest.IsCloneAppReq { + // if clone app request is there than gitops is not required + return false + } // if deploymentAppType is not coming in request than hasAtLeastOneGitOps will be false - haveAtLeastOneGitOps := false for _, pipeline := range pipelineCreateRequest.Pipelines { - if pipeline.EnvironmentId > 0 && pipeline.DeploymentAppType == util.PIPELINE_DEPLOYMENT_TYPE_ACD { + if pipeline.EnvironmentId > 0 && + pipeline.DeploymentAppType == util.PIPELINE_DEPLOYMENT_TYPE_ACD && + !pipeline.IsExternalArgoAppLinkRequest() { haveAtLeastOneGitOps = true } } @@ -1659,7 +1989,7 @@ func (impl *CdPipelineConfigServiceImpl) validateCDPipelineRequest(pipelineCreat } func (impl *CdPipelineConfigServiceImpl) RegisterInACD(ctx context.Context, chartGitAttr *commonBean.ChartGitAttribute, userId int32) error { - err := impl.argoClientWrapperService.RegisterGitOpsRepoInArgoWithRetry(ctx, chartGitAttr.RepoUrl, userId) + err := impl.argoClientWrapperService.RegisterGitOpsRepoInArgoWithRetry(ctx, chartGitAttr.RepoUrl, chartGitAttr.TargetRevision, userId) if err != nil { impl.logger.Errorw("error while register git repo in argo", "err", err) return err @@ -1667,7 +1997,7 @@ func (impl *CdPipelineConfigServiceImpl) RegisterInACD(ctx context.Context, char return nil } -func (impl *CdPipelineConfigServiceImpl) createCdPipeline(ctx context.Context, app *app2.App, pipeline *bean.CDPipelineConfigObject, userId int32) (pipelineRes int, err error) { +func (impl *CdPipelineConfigServiceImpl) createCdPipeline(ctx context.Context, app *app2.App, pipeline *bean.CDPipelineConfigObject, deploymentConfig *bean4.DeploymentConfig, userId int32) (pipelineRes int, err error) { dbConnection := impl.pipelineRepository.GetConnection() tx, err := dbConnection.Begin() if err != nil { @@ -1678,7 +2008,7 @@ func (impl *CdPipelineConfigServiceImpl) createCdPipeline(ctx context.Context, a if (pipeline.AppWorkflowId == 0 || pipeline.IsSwitchCiPipelineRequest()) && pipeline.ParentPipelineType == "WEBHOOK" { if pipeline.AppWorkflowId == 0 { wf := &appWorkflow.AppWorkflow{ - Name: fmt.Sprintf("wf-%d-%s", app.Id, util2.Generate(4)), + Name: fmt.Sprintf("wf-%d-%s", app.Id, globalUtil.Generate(4)), AppId: app.Id, Active: true, AuditLog: sql.AuditLog{CreatedBy: userId, CreatedOn: time.Now(), UpdatedOn: time.Now(), UpdatedBy: userId}, @@ -1708,7 +2038,7 @@ func (impl *CdPipelineConfigServiceImpl) createCdPipeline(ctx context.Context, a // do not create the pipeline if environment is not set pipelineId := 0 if pipeline.EnvironmentId > 0 { - chart, err := impl.chartRepository.FindLatestChartForAppByAppId(app.Id) + latestChart, err := impl.chartRepository.FindLatestChartForAppByAppId(app.Id) if err != nil { return 0, err } @@ -1721,24 +2051,38 @@ func (impl *CdPipelineConfigServiceImpl) createCdPipeline(ctx context.Context, a } appLevelAppMetricsEnabled = isAppLevelMetricsEnabled - overrideCreateRequest := &pipelineConfigBean.EnvironmentOverrideCreateInternalDTO{ - Chart: chart, - EnvironmentId: pipeline.EnvironmentId, - UserId: userId, - ManualReviewed: false, - ChartStatus: models.CHARTSTATUS_NEW, - IsOverride: false, - IsAppMetricsEnabled: appLevelAppMetricsEnabled, - IsBasicViewLocked: false, - Namespace: pipeline.Namespace, - CurrentViewEditor: chart.CurrentViewEditor, - MergeStrategy: "", + var ( + envOverride *bean5.EnvConfigOverride + updatedAppMetrics bool + ) + if pipeline.IsExternalArgoAppLinkRequest() { + overrideCreateRequest, err := impl.parseEnvOverrideCreateRequestForExternalAcdApp(deploymentConfig, latestChart, app, userId, pipeline, appLevelAppMetricsEnabled) + envOverride, updatedAppMetrics, err = impl.propertiesConfigService.CreateIfRequired(overrideCreateRequest, tx) + if err != nil { + impl.logger.Errorw("error in creating env override", "appId", app.Id, "envId", envOverride.TargetEnvironment, "err", err) + return 0, err + } + } else { + overrideCreateRequest := &pipelineConfigBean.EnvironmentOverrideCreateInternalDTO{ + Chart: latestChart, + EnvironmentId: pipeline.EnvironmentId, + UserId: userId, + ManualReviewed: false, + ChartStatus: models.CHARTSTATUS_NEW, + IsOverride: false, + IsAppMetricsEnabled: appLevelAppMetricsEnabled, + IsBasicViewLocked: false, + Namespace: pipeline.Namespace, + CurrentViewEditor: latestChart.CurrentViewEditor, + MergeStrategy: "", + } + envOverride, updatedAppMetrics, err = impl.propertiesConfigService.CreateIfRequired(overrideCreateRequest, tx) + if err != nil { + return 0, err + } + appLevelAppMetricsEnabled = updatedAppMetrics } - envOverride, updatedAppMetrics, err := impl.propertiesConfigService.CreateIfRequired(overrideCreateRequest, tx) - if err != nil { - return 0, err - } appLevelAppMetricsEnabled = updatedAppMetrics // Get pipeline override based on Deployment strategy //TODO: mark as created in our db @@ -1863,6 +2207,101 @@ func (impl *CdPipelineConfigServiceImpl) createCdPipeline(ctx context.Context, a return pipelineId, nil } +func (impl *CdPipelineConfigServiceImpl) parseEnvOverrideCreateRequestForExternalAcdApp(deploymentConfig *bean4.DeploymentConfig, latestChart *chartRepoRepository.Chart, app *app2.App, userId int32, pipeline *bean.CDPipelineConfigObject, appLevelAppMetricsEnabled bool) (*pipelineConfigBean.EnvironmentOverrideCreateInternalDTO, error) { + values, chartMetadata, err := impl.GetValuesAndChartMetadataForExternalArgoCDApp(deploymentConfig.ReleaseConfiguration.ArgoCDSpec) + if err != nil { + impl.logger.Errorw("error in reading values for external argocd app", "acdAppName", deploymentConfig.ReleaseConfiguration.ArgoCDSpec.Metadata.Name, "err", err) + return nil, err + } + + chartName, chartVersion := chartMetadata.Name, chartMetadata.Version + + chartRef, err := impl.chartRefReadService.FindByVersionAndName(chartVersion, chartName) + if err != nil { + impl.logger.Errorw("error in getting chart ref by name and version", "chartName", chartName, "chartVersion", chartVersion, "err", err) + return nil, err + } + + chartForOverride, err := impl.chartRepository.FindChartByAppIdAndRefId(app.Id, chartRef.Id) + if err != nil && !errors3.Is(err, pg.ErrNoRows) { + impl.logger.Errorw("error in finding chart for this app and chart_ref_id", "appId", app.Id, "chartRefId", chartRef.Id, "err", err) + return nil, err + } else if errors3.Is(err, pg.ErrNoRows) { + chartCreateRequest := bean6.TemplateRequest{ + AppId: app.Id, + ChartRefId: chartRef.Id, + ValuesOverride: []byte("{}"), + UserId: userId, + IsAppMetricsEnabled: false, + } + _, err = impl.chartService.CreateChartFromEnvOverride(chartCreateRequest, context.Background()) + if err != nil { + impl.logger.Errorw("error in creating chart from env override", "appId", app.Id, "chartRefId", chartRef.Id, "err", err) + return nil, err + } + chartForOverride, err = impl.chartRepository.FindChartByAppIdAndRefId(app.Id, chartRef.Id) + if err != nil && !errors3.Is(err, pg.ErrNoRows) { + impl.logger.Errorw("error in finding chart for this app and chart_ref_id", "appId", app.Id, "chartRefId", chartRef.Id, "err", err) + return nil, err + } else if errors3.Is(err, pg.ErrNoRows) { + impl.logger.Errorw("chart not found after creation", "appId", app.Id, "chartRefId", chartRef.Id) + return nil, fmt.Errorf("base deployment chart not found") + } + } + chartForOverride.GlobalOverride = string(values) + overrideCreateRequest := &pipelineConfigBean.EnvironmentOverrideCreateInternalDTO{ + Chart: chartForOverride, + EnvironmentId: pipeline.EnvironmentId, + UserId: userId, + ManualReviewed: false, + ChartStatus: models.CHARTSTATUS_NEW, + IsOverride: true, + IsAppMetricsEnabled: appLevelAppMetricsEnabled, + IsBasicViewLocked: false, + Namespace: pipeline.Namespace, + CurrentViewEditor: latestChart.CurrentViewEditor, + MergeStrategy: models.MERGE_STRATEGY_REPLACE, + } + return overrideCreateRequest, err +} + +func (impl *CdPipelineConfigServiceImpl) GetValuesAndChartMetadataForExternalArgoCDApp(spec bean4.ArgoCDSpec) (json.RawMessage, *chart2.Metadata, error) { + repoURL := spec.Spec.Source.RepoURL + chartPath := spec.Spec.Source.Path + targetRevision := spec.Spec.Source.TargetRevision + //validation is performed before this step, so assuming ValueFiles array has one and only one entry + valuesFileName := spec.Spec.Source.Helm.ValueFiles[0] + helmChart, err := impl.extractHelmChartForExternalArgoApp(repoURL, targetRevision, chartPath) + if err != nil { + impl.logger.Errorw("error in extracting helm ") + return nil, nil, err + } + for _, file := range helmChart.Files { + if file.Name == valuesFileName { + return file.Data, helmChart.Metadata, nil + } + } + return nil, nil, errors2.New(fmt.Sprintf("values file with name %s not found in chart", valuesFileName)) +} + +func (impl *CdPipelineConfigServiceImpl) extractHelmChartForExternalArgoApp(repoURL, targetRevision, chartPath string) (*chart2.Chart, error) { + repoName := impl.gitOpsConfigReadService.GetGitOpsRepoNameFromUrl(repoURL) + chartDir := fmt.Sprintf("%s-%s", repoName, impl.chartTemplateService.GetDir()) + clonedDir, err := impl.gitOperationService.GetClonedDir(context.Background(), chartDir, repoURL, targetRevision) + defer impl.chartTemplateService.CleanDir(clonedDir) + if err != nil { + impl.logger.Errorw("error in cloning in dir for external argo app", "repoURL", repoURL, "err", err) + return nil, err + } + chartFullPath := filepath.Join(clonedDir, chartPath) + helmChart, err := loader.Load(chartFullPath) + if err != nil { + impl.logger.Errorw("error in loading helm chart", "repoURL", repoURL, "chartPath", chartFullPath, "err", err) + return nil, err + } + return helmChart, nil +} + func (impl *CdPipelineConfigServiceImpl) updateCdPipeline(ctx context.Context, pipeline *bean.CDPipelineConfigObject, userID int32) (err error) { if len(pipeline.PreStage.Config) > 0 && !strings.Contains(pipeline.PreStage.Config, "beforeStages") { @@ -2059,14 +2498,16 @@ func (impl *CdPipelineConfigServiceImpl) DeleteCdPipelinePartial(pipeline *pipel } } impl.logger.Debugw("acd app is already deleted for this pipeline", "pipeline", pipeline) - if _, err := impl.argoClientWrapperService.DeleteArgoApp(ctx, deploymentAppName, cascadeDelete); err != nil { + applicationObjectClusterId := envDeploymentConfig.GetApplicationObjectClusterId() + applicationNamespace := envDeploymentConfig.GetApplicationObjectNamespace() + if err = impl.argoClientWrapperService.DeleteArgoAppWithK8sClient(ctx, applicationObjectClusterId, applicationNamespace, deploymentAppName, cascadeDelete); err != nil { impl.logger.Errorw("err in deleting pipeline on argocd", "id", pipeline, "err", err) if forceDelete { impl.logger.Warnw("error while deletion of app in acd, continue to delete in db as this operation is force delete", "error", err) } else { //statusError, _ := err.(*errors2.StatusError) - if cascadeDelete && strings.Contains(err.Error(), "code = NotFound") { + if cascadeDelete && errors.IsNotFound(err) { err = &util.ApiError{ UserMessage: "Could not delete as application not found in argocd", InternalMessage: err.Error(), diff --git a/pkg/pipeline/PipelineStageServiceIT_test.go b/pkg/pipeline/PipelineStageServiceIT_test.go index f14402d689..9f583cbf48 100644 --- a/pkg/pipeline/PipelineStageServiceIT_test.go +++ b/pkg/pipeline/PipelineStageServiceIT_test.go @@ -32,6 +32,7 @@ import ( "github.com/devtron-labs/devtron/pkg/variables/models" "github.com/devtron-labs/devtron/pkg/variables/parsers" repository4 "github.com/devtron-labs/devtron/pkg/variables/repository" + "github.com/devtron-labs/devtron/util" "github.com/go-pg/pg" "github.com/stretchr/testify/assert" "go.uber.org/zap" @@ -669,7 +670,7 @@ func setupSuite(t *testing.T) func(t *testing.T) { RunPostStageInEnv: false, DeploymentAppCreated: false, DeploymentAppType: "helm", - DeploymentAppName: fmt.Sprintf("%s-%s", string(randomAppName), string(randomEnvName)), + DeploymentAppName: util.BuildDeployedAppName(string(randomAppName), string(randomEnvName)), AuditLog: sql.AuditLog{UpdatedBy: userId, CreatedBy: userId, UpdatedOn: time.Now(), CreatedOn: time.Now()}, } err = pipelineRepoImpl.Save([]*pipelineConfig.Pipeline{pipeline}, tx) diff --git a/pkg/pipeline/PropertiesConfig.go b/pkg/pipeline/PropertiesConfig.go index 7deb3bef28..4d37024830 100644 --- a/pkg/pipeline/PropertiesConfig.go +++ b/pkg/pipeline/PropertiesConfig.go @@ -19,8 +19,11 @@ package pipeline import ( "context" "encoding/json" + errors2 "errors" "fmt" + "github.com/devtron-labs/devtron/internal/util" "github.com/devtron-labs/devtron/pkg/cluster/environment/repository" + "github.com/devtron-labs/devtron/pkg/deployment/common" "github.com/devtron-labs/devtron/pkg/deployment/manifest/deployedAppMetrics" bean2 "github.com/devtron-labs/devtron/pkg/deployment/manifest/deployedAppMetrics/bean" "github.com/devtron-labs/devtron/pkg/deployment/manifest/deploymentTemplate" @@ -30,6 +33,7 @@ import ( "github.com/devtron-labs/devtron/pkg/pipeline/bean" "github.com/devtron-labs/devtron/pkg/variables" repository5 "github.com/devtron-labs/devtron/pkg/variables/repository" + "net/http" "time" chartRepoRepository "github.com/devtron-labs/devtron/pkg/chartRepo/repository" @@ -52,7 +56,7 @@ type PropertiesConfigService interface { GetAppIdByChartEnvId(chartEnvId int) (*bean4.EnvConfigOverride, error) GetLatestEnvironmentProperties(appId, environmentId int) (*bean.EnvironmentProperties, error) - ResetEnvironmentProperties(id int) (bool, error) + ResetEnvironmentProperties(id int, userId int32) (bool, error) CreateEnvironmentPropertiesWithNamespace(appId int, propertiesRequest *bean.EnvironmentProperties) (*bean.EnvironmentProperties, error) FetchEnvProperties(appId, envId, chartRefId int) (*bean4.EnvConfigOverride, error) @@ -66,6 +70,7 @@ type PropertiesConfigServiceImpl struct { scopedVariableManager variables.ScopedVariableManager deployedAppMetricsService deployedAppMetrics.DeployedAppMetricsService envConfigOverrideReadService read.EnvConfigOverrideService + deploymentConfigService common.DeploymentConfigService } func NewPropertiesConfigServiceImpl(logger *zap.SugaredLogger, @@ -75,7 +80,8 @@ func NewPropertiesConfigServiceImpl(logger *zap.SugaredLogger, deploymentTemplateHistoryService deploymentTemplate.DeploymentTemplateHistoryService, scopedVariableManager variables.ScopedVariableManager, deployedAppMetricsService deployedAppMetrics.DeployedAppMetricsService, - envConfigOverrideReadService read.EnvConfigOverrideService) *PropertiesConfigServiceImpl { + envConfigOverrideReadService read.EnvConfigOverrideService, + deploymentConfigService common.DeploymentConfigService) *PropertiesConfigServiceImpl { return &PropertiesConfigServiceImpl{ logger: logger, envConfigRepo: envConfigRepo, @@ -85,6 +91,7 @@ func NewPropertiesConfigServiceImpl(logger *zap.SugaredLogger, scopedVariableManager: scopedVariableManager, deployedAppMetricsService: deployedAppMetricsService, envConfigOverrideReadService: envConfigOverrideReadService, + deploymentConfigService: deploymentConfigService, } } @@ -191,6 +198,15 @@ func (impl PropertiesConfigServiceImpl) GetEnvironmentProperties(appId, environm return nil, err } environmentPropertiesResponse.AppMetrics = &isAppMetricsEnabled + + externalReleaseType, err := impl.deploymentConfigService.GetExternalReleaseType(appId, environmentId) + if err != nil { + impl.logger.Errorw("error in getting deployment config by appId and envId", "appId", appId, "envId", environmentId, "err", err) + return nil, err + } + if len(externalReleaseType) != 0 { + environmentPropertiesResponse.EnvironmentConfig.MigratedFrom = &externalReleaseType + } return environmentPropertiesResponse, nil } @@ -200,13 +216,24 @@ func (impl PropertiesConfigServiceImpl) FetchEnvProperties(appId, envId, chartRe func (impl PropertiesConfigServiceImpl) CreateEnvironmentProperties(appId int, environmentProperties *bean.EnvironmentProperties) (*bean.EnvironmentProperties, error) { chart, err := impl.chartRepo.FindChartByAppIdAndRefId(appId, environmentProperties.ChartRefId) - if err != nil && pg.ErrNoRows != err { + if err != nil && !errors2.Is(err, pg.ErrNoRows) { return nil, err - } - if pg.ErrNoRows == err { + } else if errors2.Is(err, pg.ErrNoRows) { impl.logger.Errorw("create new chart set latest=false", "a", "b") return nil, fmt.Errorf("NOCHARTEXIST") } + + externalReleaseType, err := impl.deploymentConfigService.GetExternalReleaseType(chart.AppId, environmentProperties.EnvironmentId) + if err != nil { + impl.logger.Errorw("error in getting deployment config by appId and envId", "appId", chart.AppId, "envId", environmentProperties.EnvironmentId, "err", err) + return nil, err + } + if externalReleaseType.IsArgoApplication() { + return nil, util.NewApiError(http.StatusConflict, + "chart version change is not allowed for external argo application", + "chart version change is not allowed for external argo application") + } + chart.GlobalOverride = string(environmentProperties.EnvOverrideValues) appMetrics := false if environmentProperties.AppMetrics != nil { @@ -326,6 +353,17 @@ func (impl PropertiesConfigServiceImpl) UpdateEnvironmentProperties(appId int, p return nil, err } + chart, err := impl.chartRepo.FindById(overrideDbObj.ChartId) + if err != nil { + impl.logger.Errorw("error in chartRefRepository.FindById", "chartRefId", chart.ChartRefId, "err", err) + return nil, err + } + err = impl.deploymentConfigService.UpdateChartLocationInDeploymentConfig(appId, overrideDbObj.TargetEnvironment, chart.ChartRefId, userId, chart.ChartVersion) + if err != nil { + impl.logger.Errorw("error in UpdateChartLocationInDeploymentConfig", "appId", appId, "envId", overrideDbObj.TargetEnvironment, "err", err) + return nil, err + } + isAppMetricsEnabled := false if propertiesRequest.AppMetrics != nil { isAppMetricsEnabled = *propertiesRequest.AppMetrics @@ -460,7 +498,15 @@ func (impl PropertiesConfigServiceImpl) CreateIfRequired(request *bean.Environme impl.logger.Errorw("error in creating envconfig", "data", envOverride, "error", err) return nil, isAppMetricsEnabled, err } + envOverrideDBObj.Chart = chart envOverride = adapter.EnvOverrideDBToDTO(envOverrideDBObj) + + err = impl.deploymentConfigService.UpdateChartLocationInDeploymentConfig(chart.AppId, envOverride.TargetEnvironment, chart.ChartRefId, userId, envOverride.Chart.ChartVersion) + if err != nil { + impl.logger.Errorw("error in UpdateChartLocationInDeploymentConfig", "appId", chart.AppId, "envId", envOverride.TargetEnvironment, "err", err) + return nil, isAppMetricsEnabled, err + } + err = impl.deploymentTemplateHistoryService.CreateDeploymentTemplateHistoryFromEnvOverrideTemplate(envOverride, tx, isAppMetricsEnabled, 0) if err != nil { impl.logger.Errorw("error in creating entry for env deployment template history", "err", err, "envOverride", envOverride) @@ -550,7 +596,7 @@ func (impl PropertiesConfigServiceImpl) GetLatestEnvironmentProperties(appId, en return environmentProperties, nil } -func (impl PropertiesConfigServiceImpl) ResetEnvironmentProperties(id int) (bool, error) { +func (impl PropertiesConfigServiceImpl) ResetEnvironmentProperties(id int, userId int32) (bool, error) { envOverride, err := impl.envConfigOverrideReadService.GetByIdIncludingInactive(id) if err != nil { return false, err @@ -570,6 +616,18 @@ func (impl PropertiesConfigServiceImpl) ResetEnvironmentProperties(id int) (bool impl.logger.Errorw("error, DeleteEnvLevelMetricsIfPresent", "err", err, "appId", envOverride.Chart.AppId, "envId", envOverride.TargetEnvironment) return false, err } + + chart, err := impl.chartRepo.FindLatestChartForAppByAppId(envOverride.Chart.AppId) + if err != nil { + impl.logger.Errorw("error in chartRefRepository.FindById", "chartRefId", envOverride.Chart.ChartRefId, "err", err) + return false, err + } + err = impl.deploymentConfigService.UpdateChartLocationInDeploymentConfig(envOverride.Chart.AppId, envOverride.TargetEnvironment, chart.ChartRefId, userId, chart.ChartVersion) + if err != nil { + impl.logger.Errorw("error in UpdateChartLocationInDeploymentConfig", "appId", envOverride.Chart.AppId, "envId", envOverride.TargetEnvironment, "err", err) + return false, err + } + //VARIABLES err = impl.scopedVariableManager.RemoveMappedVariables(envOverride.Id, repository5.EntityTypeDeploymentTemplateEnvLevel, envOverride.UpdatedBy, nil) if err != nil { diff --git a/pkg/pipeline/adapter/adapter.go b/pkg/pipeline/adapter/adapter.go index 427d97d1b4..09be8ad8f1 100644 --- a/pkg/pipeline/adapter/adapter.go +++ b/pkg/pipeline/adapter/adapter.go @@ -23,6 +23,7 @@ import ( "github.com/devtron-labs/devtron/internal/sql/repository/pipelineConfig/bean/ciPipeline" "github.com/devtron-labs/devtron/pkg/bean" bean2 "github.com/devtron-labs/devtron/pkg/build/pipeline/bean" + bean3 "github.com/devtron-labs/devtron/pkg/cluster/environment/bean" pipelineConfigBean "github.com/devtron-labs/devtron/pkg/pipeline/bean" "github.com/devtron-labs/devtron/pkg/pipeline/repository" "github.com/devtron-labs/devtron/pkg/pipeline/types" @@ -378,3 +379,18 @@ func GetStepVariableDto(variable *repository.PipelineStageStepVariable) (*pipeli } return variableDto, nil } + +func NewMigrateReleaseValidationRequest(pipeline *bean.CDPipelineConfigObject) *pipelineConfigBean.MigrateReleaseValidationRequest { + request := &pipelineConfigBean.MigrateReleaseValidationRequest{ + AppId: pipeline.AppId, + DeploymentAppName: pipeline.DeploymentAppName, + DeploymentAppType: pipeline.DeploymentAppType, + } + if pipeline.DeploymentAppType == bean3.PIPELINE_DEPLOYMENT_TYPE_ACD { + request.ApplicationMetadataRequest = pipelineConfigBean.ApplicationMetadataRequest{ + ApplicationObjectClusterId: pipeline.ApplicationObjectClusterId, + ApplicationObjectNamespace: pipeline.ApplicationObjectNamespace, + } + } + return request +} diff --git a/pkg/pipeline/bean/EnvironmentProperties.go b/pkg/pipeline/bean/EnvironmentProperties.go index 5ecfc0f578..f56f360ec4 100644 --- a/pkg/pipeline/bean/EnvironmentProperties.go +++ b/pkg/pipeline/bean/EnvironmentProperties.go @@ -20,6 +20,7 @@ import ( "encoding/json" "github.com/devtron-labs/devtron/internal/sql/models" chartRepoRepository "github.com/devtron-labs/devtron/pkg/chartRepo/repository" + "github.com/devtron-labs/devtron/pkg/deployment/common/bean" ) type EnvironmentProperties struct { @@ -41,6 +42,7 @@ type EnvironmentProperties struct { Description string `json:"description" validate:"max=40"` ClusterId int `json:"clusterId"` MergeStrategy models.MergeStrategy `json:"mergeStrategy"` + MigratedFrom *bean.ExternalReleaseType `json:"migratedFrom,omitempty"` } type EnvironmentOverrideCreateInternalDTO struct { diff --git a/pkg/pipeline/bean/ExternalArgoAppLink.go b/pkg/pipeline/bean/ExternalArgoAppLink.go new file mode 100644 index 0000000000..cc5b7c5ded --- /dev/null +++ b/pkg/pipeline/bean/ExternalArgoAppLink.go @@ -0,0 +1,93 @@ +package bean + +type MigrateReleaseValidationRequest struct { + AppId int `json:"appId"` + DeploymentAppName string `json:"deploymentAppName"` + DeploymentAppType string `json:"deploymentAppType"` + ApplicationMetadataRequest ApplicationMetadataRequest `json:"applicationMetadata"` +} + +type ApplicationMetadataRequest struct { + ApplicationObjectClusterId int `json:"applicationObjectClusterId"` + ApplicationObjectNamespace string `json:"applicationObjectNamespace"` +} + +type ArgoCdAppLinkValidationResponse struct { + IsLinkable bool `json:"isLinkable"` + ErrorDetail ErrorDetail `json:"errorDetail"` + ApplicationMetadata ApplicationMetadata `json:"applicationMetadata"` +} + +type ApplicationMetadata struct { + Source Source `json:"source"` + Destination Destination `json:"destination"` + Status string `json:"status"` +} + +func NewEmptyApplicationMetadata() ApplicationMetadata { + return ApplicationMetadata{} +} + +type Source struct { + RepoURL string `json:"repoURL"` + ChartPath string `json:"chartPath"` + ChartMetadata ChartMetadata `json:"chartMetadata"` +} + +type ChartMetadata struct { + RequiredChartVersion string `json:"requiredChartVersion"` + SavedChartName string `json:"savedChartName"` + ValuesFilename string `json:"valuesFilename"` + RequiredChartName string `json:"requiredChartName"` +} + +type Destination struct { + ClusterName string `json:"clusterName"` + ClusterServerUrl string `json:"clusterServerUrl"` + Namespace string `json:"namespace"` + EnvironmentName string `json:"environmentName"` + EnvironmentId int `json:"environmentId"` +} + +func (a *ArgoCdAppLinkValidationResponse) SetErrorDetail(ValidationFailedReason LinkFailedReason, ValidationFailedMessage string) ArgoCdAppLinkValidationResponse { + a.ErrorDetail = ErrorDetail{ + ValidationFailedReason: ValidationFailedReason, + ValidationFailedMessage: ValidationFailedMessage, + } + return *a +} + +func (a *ArgoCdAppLinkValidationResponse) SetUnknownErrorDetail(err error) ArgoCdAppLinkValidationResponse { + a.ErrorDetail = ErrorDetail{ + ValidationFailedReason: InternalServerError, + ValidationFailedMessage: err.Error(), + } + return *a +} + +type LinkFailedReason string + +type ErrorDetail struct { + ValidationFailedReason LinkFailedReason `json:"validationFailedReason"` + ValidationFailedMessage string `json:"validationFailedMessage"` +} + +const ( + ClusterNotFound LinkFailedReason = "ClusterNotFound" + EnvironmentNotFound LinkFailedReason = "EnvironmentNotFound" + ApplicationAlreadyPresent LinkFailedReason = "ApplicationAlreadyPresent" + UnsupportedApplicationSpec LinkFailedReason = "UnsupportedApplicationSpec" + ChartTypeMismatch LinkFailedReason = "ChartTypeMismatch" + ChartVersionNotFound LinkFailedReason = "ChartVersionNotFound" + GitOpsNotFound LinkFailedReason = "GitOpsNotFound" + InternalServerError LinkFailedReason = "InternalServerError" + EnvironmentAlreadyPresent LinkFailedReason = "EnvironmentAlreadyPresent" + EnforcedPolicyViolation LinkFailedReason = "EnforcedPolicyViolation" +) + +const ( + ChartTypeMismatchErrorMsg string = "Argo CD application uses '%s' chart where as this application uses '%s' chart. You can upload your own charts in Global Configuration > Deployment Charts." + ChartVersionNotFoundErrorMsg string = "Chart version %s not found for %s chart" + PipelineAlreadyPresentMsg string = "A pipeline already exist for this environment." + HelmAppAlreadyPresentMsg string = "A helm app already exist for this environment." +) diff --git a/pkg/workflow/dag/WorkflowDagExecutor.go b/pkg/workflow/dag/WorkflowDagExecutor.go index 9426ff56f7..d55ad5d8b8 100644 --- a/pkg/workflow/dag/WorkflowDagExecutor.go +++ b/pkg/workflow/dag/WorkflowDagExecutor.go @@ -408,7 +408,7 @@ func (impl *WorkflowDagExecutorImpl) ProcessDevtronAsyncInstallRequest(cdAsyncIn impl.logger.Errorw("error in getting deployment config by appId and envId", "appId", overrideRequest.AppId, "envId", overrideRequest.EnvId, "err", err) return err } - releaseId, _, releaseErr := impl.cdTriggerService.TriggerRelease(overrideRequest, envDeploymentConfig, newCtx, cdAsyncInstallReq.TriggeredAt, cdAsyncInstallReq.TriggeredBy) + releaseId, _, releaseErr := impl.cdTriggerService.TriggerRelease(newCtx, overrideRequest, envDeploymentConfig, cdAsyncInstallReq.TriggeredAt, cdAsyncInstallReq.TriggeredBy) if releaseErr != nil { impl.logger.Errorw("error encountered in ProcessDevtronAsyncInstallRequest", "err", releaseErr, "cdWfrId", cdWfr.Id) impl.handleAsyncTriggerReleaseError(newCtx, releaseErr, cdWfr, overrideRequest) diff --git a/pkg/workflow/status/WorkflowStatusService.go b/pkg/workflow/status/WorkflowStatusService.go index 7ca800d796..5c109fed8f 100644 --- a/pkg/workflow/status/WorkflowStatusService.go +++ b/pkg/workflow/status/WorkflowStatusService.go @@ -225,16 +225,25 @@ func (impl *WorkflowStatusServiceImpl) UpdatePipelineTimelineAndStatusByLiveAppl return nil, isTimelineUpdated } - if impl.acdConfig.IsManualSyncEnabled() { + // this should only be called when we have git-ops configured + // try fetching status from argo cd + dc, err := impl.deploymentConfigService.GetConfigForDevtronApps(pipeline.AppId, pipeline.EnvironmentId) + if err != nil { + impl.logger.Errorw("error, GetConfigForDevtronApps", "appId", pipeline.AppId, "environmentId", pipeline.EnvironmentId, "err", err) + return nil, isTimelineUpdated + } + + if impl.acdConfig.IsManualSyncEnabled() && dc.IsArgoAppSyncAndRefreshSupported() { // if manual sync check for application sync status isArgoAppSynced := impl.pipelineStatusTimelineService.GetArgoAppSyncStatus(cdWfr.Id) if !isArgoAppSynced { return nil, isTimelineUpdated } } - // this should only be called when we have git-ops configured - // try fetching status from argo cd - app, err := impl.argocdClientWrapperService.GetArgoAppByName(context.Background(), pipeline.DeploymentAppName) + + applicationObjectClusterId := dc.GetApplicationObjectClusterId() + applicationObjectNamespace := dc.GetApplicationObjectNamespace() + app, err := impl.argocdClientWrapperService.GetArgoAppByNameWithK8sClient(context.Background(), applicationObjectClusterId, applicationObjectNamespace, pipeline.DeploymentAppName) if err != nil { impl.logger.Errorw("error in getting acd application", "err", err, "argoAppName", pipeline) // updating cdWfr status @@ -269,7 +278,7 @@ func (impl *WorkflowStatusServiceImpl) UpdatePipelineTimelineAndStatusByLiveAppl impl.logger.Errorw("found empty argo application object", "appName", pipeline.DeploymentAppName) return fmt.Errorf("found empty argo application object"), isTimelineUpdated } - isSucceeded, isTimelineUpdated, pipelineOverride, err = impl.appService.UpdateDeploymentStatusForGitOpsPipelines(app, time.Now(), isAppStore) + isSucceeded, isTimelineUpdated, pipelineOverride, err = impl.appService.UpdateDeploymentStatusForGitOpsPipelines(app, applicationObjectClusterId, time.Now(), isAppStore) if err != nil { impl.logger.Errorw("error in updating deployment status for gitOps cd pipelines", "app", app, "err", err) return err, isTimelineUpdated @@ -302,6 +311,13 @@ func (impl *WorkflowStatusServiceImpl) UpdatePipelineTimelineAndStatusByLiveAppl impl.logger.Errorw("error in getting latest installedAppVersionHistory by installedAppId", "err", err, "installedAppId", installedApp.Id) return nil, isTimelineUpdated } + dc, err := impl.deploymentConfigService.GetConfigForHelmApps(installedApp.AppId, installedApp.EnvironmentId) + if err != nil { + impl.logger.Errorw("error, GetConfigForDevtronApps", "appId", pipeline.AppId, "environmentId", pipeline.EnvironmentId, "err", err) + return nil, isTimelineUpdated + } + applicationObjectClusterId := dc.GetApplicationObjectClusterId() + impl.logger.Debugw("ARGO_PIPELINE_STATUS_UPDATE_REQ", "stage", "checkingDeploymentStatus", "installedApp", installedApp, "installedAppVersionHistory", installedAppVersionHistory) if util3.IsTerminalRunnerStatus(installedAppVersionHistory.Status) { // drop event @@ -364,7 +380,7 @@ func (impl *WorkflowStatusServiceImpl) UpdatePipelineTimelineAndStatusByLiveAppl impl.logger.Errorw("found empty argo application object", "appName", acdAppName) return fmt.Errorf("found empty argo application object"), isTimelineUpdated } - isSucceeded, isTimelineUpdated, pipelineOverride, err = impl.appService.UpdateDeploymentStatusForGitOpsPipelines(app, time.Now(), isAppStore) + isSucceeded, isTimelineUpdated, pipelineOverride, err = impl.appService.UpdateDeploymentStatusForGitOpsPipelines(app, applicationObjectClusterId, time.Now(), isAppStore) if err != nil { impl.logger.Errorw("error in updating deployment status for gitOps cd pipelines", "app", app) return err, isTimelineUpdated @@ -536,7 +552,16 @@ func (impl *WorkflowStatusServiceImpl) syncACDHelmApps(deployedBeforeMinutes int argoAppName := util3.BuildDeployedAppName(appDetails.AppName, envDetails.Name) ctx := context.Background() syncTime := time.Now() - syncErr := impl.argocdClientWrapperService.SyncArgoCDApplicationIfNeededAndRefresh(ctx, argoAppName) + deploymentConfig, err := impl.deploymentConfigService.GetConfigForHelmApps(appDetails.Id, envDetails.Id) + if err != nil { + impl.logger.Errorw("error in getting deployment config db object by appId and envId", "appId", appDetails.Id, "envId", envDetails.Id, "err", err) + return err + } + if deploymentConfig.IsArgoAppSyncAndRefreshSupported() { + return nil + } + targetRevision := deploymentConfig.GetTargetRevision() + syncErr := impl.argocdClientWrapperService.SyncArgoCDApplicationIfNeededAndRefresh(ctx, argoAppName, targetRevision) if syncErr != nil { impl.logger.Errorw("error in syncing argoCD app", "err", syncErr) timelineObject := impl.pipelineStatusTimelineService.NewHelmAppDeploymentStatusTimelineDbObject(installedAppVersionHistoryId, timelineStatus.TIMELINE_STATUS_DEPLOYMENT_FAILED, fmt.Sprintf("error occured in syncing argocd application. err: %s", syncErr.Error()), 1) diff --git a/scripts/sql/32203100_release_config.down.sql b/scripts/sql/32203100_release_config.down.sql new file mode 100644 index 0000000000..273e9f56ae --- /dev/null +++ b/scripts/sql/32203100_release_config.down.sql @@ -0,0 +1,7 @@ +BEGIN; + +-- Drop the column release_config from deployment_config +ALTER TABLE deployment_config + DROP COLUMN IF EXISTS release_config; + +END; \ No newline at end of file diff --git a/scripts/sql/32203100_release_config.up.sql b/scripts/sql/32203100_release_config.up.sql new file mode 100644 index 0000000000..a6392edcbc --- /dev/null +++ b/scripts/sql/32203100_release_config.up.sql @@ -0,0 +1,15 @@ +BEGIN; + +-- Add release_config column to deployment_config +ALTER TABLE deployment_config + ADD COLUMN IF NOT EXISTS release_config jsonb; + +-- Set active to false for all deployment_configs that have an app_id that is inactive +UPDATE deployment_config + SET active = false + WHERE active = true + AND app_id IN ( + SELECT id FROM app WHERE active = false + ); + +END; \ No newline at end of file diff --git a/tests/pipeline/ChartService_test.go b/tests/pipeline/ChartService_test.go index 8791ecc8ad..06aca529ef 100644 --- a/tests/pipeline/ChartService_test.go +++ b/tests/pipeline/ChartService_test.go @@ -24,7 +24,6 @@ import ( "github.com/devtron-labs/devtron/internal/util" "github.com/devtron-labs/devtron/pkg/bulkAction/bean" "github.com/devtron-labs/devtron/pkg/bulkAction/service" - "github.com/devtron-labs/devtron/pkg/chart" "github.com/devtron-labs/devtron/pkg/sql" jsonpatch "github.com/evanphx/json-patch" "io" @@ -43,8 +42,8 @@ func setup() { logger, _ := util.NewSugardLogger() dbConnection, _ := sql.NewDbConnection(config, logger) bulkUpdateRepository := bulkUpdate.NewBulkUpdateRepository(dbConnection, logger) - bulkUpdateService = service.NewBulkUpdateServiceImpl(bulkUpdateRepository, nil, nil, nil, nil, "", - chart.DefaultChart(""), util.MergeUtil{}, nil, nil, nil, nil, nil, + bulkUpdateService = service.NewBulkUpdateServiceImpl(bulkUpdateRepository, nil, nil, nil, nil, nil, + nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil) } diff --git a/util/DeploymentUtil.go b/util/DeploymentUtil.go index 6ac612a299..e01ee2b4ee 100644 --- a/util/DeploymentUtil.go +++ b/util/DeploymentUtil.go @@ -40,10 +40,12 @@ import ( "encoding/binary" "fmt" "github.com/davecgh/go-spew/spew" + argoBean "github.com/devtron-labs/devtron/client/argocdServer/bean" "hash" "hash/fnv" v1 "k8s.io/api/core/v1" "math/rand" + "strings" "sync" "time" ) @@ -191,3 +193,12 @@ func ComputeHash(template *v1.PodTemplateSpec, collisionCount *int32) string { func BuildDeployedAppName(appName string, environmentName string) string { return fmt.Sprintf("%s-%s", appName, environmentName) } + +func IsDefaultTargetRevision(branch string) bool { + branch = strings.TrimSpace(branch) + return branch == argoBean.TargetRevisionMaster || branch == argoBean.TargetRevisionOriginMaster +} + +func GetDefaultTargetRevision() string { + return argoBean.TargetRevisionMaster +} diff --git a/util/GlobalConfig.go b/util/GlobalConfig.go index a30ee0c884..30c4eb40fd 100644 --- a/util/GlobalConfig.go +++ b/util/GlobalConfig.go @@ -29,11 +29,18 @@ type EnvironmentVariables struct { InternalEnvVariables *InternalEnvVariables } +// CATEGORY=CD type DeploymentServiceTypeConfig struct { - ExternallyManagedDeploymentType bool `env:"IS_INTERNAL_USE" envDefault:"false"` - HelmInstallASyncMode bool `env:"RUN_HELM_INSTALL_IN_ASYNC_MODE_HELM_APPS" envDefault:"false"` - UseDeploymentConfigData bool `env:"USE_DEPLOYMENT_CONFIG_DATA" envDefault:"false"` - ShouldCheckNamespaceOnClone bool `env:"SHOULD_CHECK_NAMESPACE_ON_CLONE" envDefault:"false" description:"should we check if namespace exists or not while cloning app" deprecated:"false"` + ExternallyManagedDeploymentType bool `env:"IS_INTERNAL_USE" envDefault:"false"` + HelmInstallASyncMode bool `env:"RUN_HELM_INSTALL_IN_ASYNC_MODE_HELM_APPS" envDefault:"false"` + UseDeploymentConfigData bool `env:"USE_DEPLOYMENT_CONFIG_DATA" envDefault:"false" description:"use deployment config data from deployment_config table" deprecated:"true"` + MigrateDeploymentConfigData bool `env:"MIGRATE_DEPLOYMENT_CONFIG_DATA" envDefault:"false" description:"migrate deployment config data from charts table to deployment_config table" deprecated:"false"` + FeatureMigrateArgoCdApplicationEnable bool `env:"FEATURE_MIGRATE_ARGOCD_APPLICATION_ENABLE" envDefault:"false" description:"enable migration of external argocd application to devtron pipeline" deprecated:"false"` + ShouldCheckNamespaceOnClone bool `env:"SHOULD_CHECK_NAMESPACE_ON_CLONE" envDefault:"false" description:"should we check if namespace exists or not while cloning app" deprecated:"false"` +} + +func (d *DeploymentServiceTypeConfig) IsFeatureMigrateArgoCdApplicationEnable() bool { + return false } type GlobalEnvVariables struct { diff --git a/util/HttpUtil.go b/util/HttpUtil.go index d28e54688d..d5d5c91269 100644 --- a/util/HttpUtil.go +++ b/util/HttpUtil.go @@ -61,16 +61,16 @@ func ReadFromUrlWithRetry(url string) ([]byte, error) { return nil, err } -func GetHost(urlStr string) (string, error) { +func GetHost(urlStr string) (string, string, error) { u, err := url.Parse(urlStr) if err == nil { - return u.Host, nil + return u.Host, u.Scheme, nil } u, err = url.Parse("//" + urlStr) if err != nil { - return "", fmt.Errorf("invalid url: %w", err) + return "", "", fmt.Errorf("invalid url: %w", err) } - return u.Host, nil + return u.Host, u.Scheme, nil } func GetTlsConfig(TLSKey, TLSCert, CACert, folderPath string) (*tls.Config, error) { diff --git a/util/gitUtil/GitUtil.go b/util/gitUtil/GitUtil.go index 85b710aea1..dad6d54247 100644 --- a/util/gitUtil/GitUtil.go +++ b/util/gitUtil/GitUtil.go @@ -16,9 +16,16 @@ package gitUtil -import "strings" +import ( + "fmt" + "strings" +) func GetGitRepoNameFromGitRepoUrl(gitRepoUrl string) string { gitRepoUrl = gitRepoUrl[strings.LastIndex(gitRepoUrl, "/")+1:] return strings.TrimSuffix(gitRepoUrl, ".git") } + +func GetRefBranchHead(branch string) string { + return fmt.Sprintf("refs/heads/%s", branch) +} diff --git a/vendor/github.com/argoproj/argo-cd/v2/common/common.go b/vendor/github.com/argoproj/argo-cd/v2/common/common.go index d7c2d24738..671eecf82b 100644 --- a/vendor/github.com/argoproj/argo-cd/v2/common/common.go +++ b/vendor/github.com/argoproj/argo-cd/v2/common/common.go @@ -1,15 +1,20 @@ package common import ( - "errors" + "context" + "fmt" "os" "path/filepath" "strconv" "time" + "github.com/pkg/errors" + "github.com/redis/go-redis/v9" "github.com/sirupsen/logrus" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes" ) // Component names @@ -113,11 +118,17 @@ const ( // LegacyShardingAlgorithm is the default value for Sharding Algorithm it uses an `uid` based distribution (non-uniform) LegacyShardingAlgorithm = "legacy" - // RoundRobinShardingAlgorithm is a flag value that can be opted for Sharding Algorithm it uses an equal distribution accross all shards + // RoundRobinShardingAlgorithm is a flag value that can be opted for Sharding Algorithm it uses an equal distribution across all shards RoundRobinShardingAlgorithm = "round-robin" - DefaultShardingAlgorithm = LegacyShardingAlgorithm // AppControllerHeartbeatUpdateRetryCount is the retry count for updating the Shard Mapping to the Shard Mapping ConfigMap used by Application Controller AppControllerHeartbeatUpdateRetryCount = 3 + + // ConsistentHashingWithBoundedLoadsAlgorithm uses an algorithm that tries to use an equal distribution across + // all shards but is optimised to handle sharding and/or cluster addition or removal. In case of sharding or + // cluster changes, this algorithm minimises the changes between shard and clusters assignments. + ConsistentHashingWithBoundedLoadsAlgorithm = "consistent-hashing" + + DefaultShardingAlgorithm = LegacyShardingAlgorithm ) // Dex related constants @@ -149,10 +160,14 @@ const ( LabelKeyAppInstance = "app.kubernetes.io/instance" // LabelKeyAppName is the label key to use to uniquely identify the name of the Kubernetes application LabelKeyAppName = "app.kubernetes.io/name" + // LabelKeyAutoLabelClusterInfo if set to true will automatically add extra labels from the cluster info (currently it only adds a k8s version label) + LabelKeyAutoLabelClusterInfo = "argocd.argoproj.io/auto-label-cluster-info" // LabelKeyLegacyApplicationName is the legacy label (v0.10 and below) and is superseded by 'app.kubernetes.io/instance' LabelKeyLegacyApplicationName = "applications.argoproj.io/app-name" // LabelKeySecretType contains the type of argocd secret (currently: 'cluster', 'repository', 'repo-config' or 'repo-creds') LabelKeySecretType = "argocd.argoproj.io/secret-type" + // LabelKeyClusterKubernetesVersion contains the kubernetes version of the cluster secret if it has been enabled + LabelKeyClusterKubernetesVersion = "argocd.argoproj.io/kubernetes-version" // LabelValueSecretTypeCluster indicates a secret type of cluster LabelValueSecretTypeCluster = "cluster" // LabelValueSecretTypeRepository indicates a secret type of repository @@ -162,6 +177,7 @@ const ( // AnnotationKeyAppInstance is the Argo CD application name is used as the instance name AnnotationKeyAppInstance = "argocd.argoproj.io/tracking-id" + AnnotationInstallationID = "argocd.argoproj.io/installation-id" // AnnotationCompareOptions is a comma-separated list of options for comparison AnnotationCompareOptions = "argocd.argoproj.io/compare-options" @@ -184,6 +200,10 @@ const ( // AnnotationKeyAppSkipReconcile tells the Application to skip the Application controller reconcile. // Skip reconcile when the value is "true" or any other string values that can be strconv.ParseBool() to be true. AnnotationKeyAppSkipReconcile = "argocd.argoproj.io/skip-reconcile" + // LabelKeyComponentRepoServer is the label key to identify the component as repo-server + LabelKeyComponentRepoServer = "app.kubernetes.io/component" + // LabelValueComponentRepoServer is the label value for the repo-server component + LabelValueComponentRepoServer = "repo-server" ) // Environment variables for tuning and debugging Argo CD @@ -198,7 +218,7 @@ const ( EnvVarTLSDataPath = "ARGOCD_TLS_DATA_PATH" // EnvGitAttemptsCount specifies number of git remote operations attempts count EnvGitAttemptsCount = "ARGOCD_GIT_ATTEMPTS_COUNT" - // EnvGitRetryMaxDuration specifices max duration of git remote operation retry + // EnvGitRetryMaxDuration specifies max duration of git remote operation retry EnvGitRetryMaxDuration = "ARGOCD_GIT_RETRY_MAX_DURATION" // EnvGitRetryDuration specifies duration of git remote operation retry EnvGitRetryDuration = "ARGOCD_GIT_RETRY_DURATION" @@ -224,7 +244,7 @@ const ( EnvControllerShard = "ARGOCD_CONTROLLER_SHARD" // EnvControllerShardingAlgorithm is the distribution sharding algorithm to be used: legacy or round-robin EnvControllerShardingAlgorithm = "ARGOCD_CONTROLLER_SHARDING_ALGORITHM" - //EnvEnableDynamicClusterDistribution enables dynamic sharding (ALPHA) + // EnvEnableDynamicClusterDistribution enables dynamic sharding (ALPHA) EnvEnableDynamicClusterDistribution = "ARGOCD_ENABLE_DYNAMIC_CLUSTER_DISTRIBUTION" // EnvEnableGRPCTimeHistogramEnv enables gRPC metrics collection EnvEnableGRPCTimeHistogramEnv = "ARGOCD_ENABLE_GRPC_TIME_HISTOGRAM" @@ -238,6 +258,8 @@ const ( EnvLogFormat = "ARGOCD_LOG_FORMAT" // EnvLogLevel log level that is defined by `--loglevel` option EnvLogLevel = "ARGOCD_LOG_LEVEL" + // EnvLogFormatEnableFullTimestamp enables the FullTimestamp option in logs + EnvLogFormatEnableFullTimestamp = "ARGOCD_LOG_FORMAT_ENABLE_FULL_TIMESTAMP" // EnvMaxCookieNumber max number of chunks a cookie can be broken into EnvMaxCookieNumber = "ARGOCD_MAX_COOKIE_NUMBER" // EnvPluginSockFilePath allows to override the pluginSockFilePath for repo server and cmp server @@ -258,6 +280,13 @@ const ( EnvRedisName = "ARGOCD_REDIS_NAME" // EnvRedisHaProxyName is the name of the Argo CD Redis HA proxy component, as specified by the value under the LabelKeyAppName label key. EnvRedisHaProxyName = "ARGOCD_REDIS_HAPROXY_NAME" + // EnvGRPCKeepAliveMin defines the GRPCKeepAliveEnforcementMinimum, used in the grpc.KeepaliveEnforcementPolicy. Expects a "Duration" format (e.g. 10s). + EnvGRPCKeepAliveMin = "ARGOCD_GRPC_KEEP_ALIVE_MIN" + // EnvServerSideDiff defines the env var used to enable ServerSide Diff feature. + // If defined, value must be "true" or "false". + EnvServerSideDiff = "ARGOCD_APPLICATION_CONTROLLER_SERVER_SIDE_DIFF" + // EnvGRPCMaxSizeMB is the environment variable to look for a max GRPC message size + EnvGRPCMaxSizeMB = "ARGOCD_GRPC_MAX_SIZE_MB" ) // Config Management Plugin related constants @@ -336,7 +365,7 @@ func GetCMPChunkSize() int { } // GetCMPWorkDir will return the full path of the work directory used by the CMP server. -// This directory and all it's contents will be deleted durring CMP bootstrap. +// This directory and all it's contents will be deleted during CMP bootstrap. func GetCMPWorkDir() string { if workDir := os.Getenv(EnvCMPWorkDir); workDir != "" { return filepath.Join(workDir, DefaultCMPWorkDirName) @@ -351,11 +380,26 @@ const ( // gRPC settings const ( - GRPCKeepAliveEnforcementMinimum = 10 * time.Second - // GRPCKeepAliveTime is 2x enforcement minimum to ensure network jitter does not introduce ENHANCE_YOUR_CALM errors - GRPCKeepAliveTime = 2 * GRPCKeepAliveEnforcementMinimum + defaultGRPCKeepAliveEnforcementMinimum = 10 * time.Second ) +func GetGRPCKeepAliveEnforcementMinimum() time.Duration { + if GRPCKeepAliveMinStr := os.Getenv(EnvGRPCKeepAliveMin); GRPCKeepAliveMinStr != "" { + GRPCKeepAliveMin, err := time.ParseDuration(GRPCKeepAliveMinStr) + if err != nil { + logrus.Warnf("invalid env var value for %s: cannot parse: %s. Default value %s will be used.", EnvGRPCKeepAliveMin, err, defaultGRPCKeepAliveEnforcementMinimum) + return defaultGRPCKeepAliveEnforcementMinimum + } + return GRPCKeepAliveMin + } + return defaultGRPCKeepAliveEnforcementMinimum +} + +func GetGRPCKeepAliveTime() time.Duration { + // GRPCKeepAliveTime is 2x enforcement minimum to ensure network jitter does not introduce ENHANCE_YOUR_CALM errors + return 2 * GetGRPCKeepAliveEnforcementMinimum() +} + // Security severity logging const ( SecurityField = "security" @@ -376,3 +420,30 @@ const TokenVerificationError = "failed to verify the token" var TokenVerificationErr = errors.New(TokenVerificationError) var PermissionDeniedAPIError = status.Error(codes.PermissionDenied, "permission denied") + +// Redis password consts +const ( + DefaultRedisInitialPasswordSecretName = "argocd-redis" + DefaultRedisInitialPasswordKey = "auth" +) + +/* +SetOptionalRedisPasswordFromKubeConfig sets the optional Redis password if it exists in the k8s namespace's secrets. + +We specify kubeClient as kubernetes.Interface to allow for mocking in tests, but this should be treated as a kubernetes.Clientset param. +*/ +func SetOptionalRedisPasswordFromKubeConfig(ctx context.Context, kubeClient kubernetes.Interface, namespace string, redisOptions *redis.Options) error { + secret, err := kubeClient.CoreV1().Secrets(namespace).Get(ctx, DefaultRedisInitialPasswordSecretName, v1.GetOptions{}) + if err != nil { + return fmt.Errorf("failed to get secret %s/%s: %w", namespace, DefaultRedisInitialPasswordSecretName, err) + } + if secret == nil { + return fmt.Errorf("failed to get secret %s/%s: secret is nil", namespace, DefaultRedisInitialPasswordSecretName) + } + _, ok := secret.Data[DefaultRedisInitialPasswordKey] + if !ok { + return fmt.Errorf("secret %s/%s does not contain key %s", namespace, DefaultRedisInitialPasswordSecretName, DefaultRedisInitialPasswordKey) + } + redisOptions.Password = string(secret.Data[DefaultRedisInitialPasswordKey]) + return nil +} diff --git a/vendor/github.com/argoproj/argo-cd/v2/pkg/apiclient/application/application.pb.go b/vendor/github.com/argoproj/argo-cd/v2/pkg/apiclient/application/application.pb.go index 8fd016ee36..2f73469d10 100644 --- a/vendor/github.com/argoproj/argo-cd/v2/pkg/apiclient/application/application.pb.go +++ b/vendor/github.com/argoproj/argo-cd/v2/pkg/apiclient/application/application.pb.go @@ -44,7 +44,7 @@ const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package type ApplicationQuery struct { // the application's name Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - // forces application reconciliation if set to true + // forces application reconciliation if set to 'hard' Refresh *string `protobuf:"bytes,2,opt,name=refresh" json:"refresh,omitempty"` // the project names to restrict returned list applications Projects []string `protobuf:"bytes,3,rep,name=projects" json:"projects,omitempty"` @@ -214,8 +214,12 @@ type RevisionMetadataQuery struct { // the revision of the app Revision *string `protobuf:"bytes,2,req,name=revision" json:"revision,omitempty"` // the application's namespace - AppNamespace *string `protobuf:"bytes,3,opt,name=appNamespace" json:"appNamespace,omitempty"` - Project *string `protobuf:"bytes,4,opt,name=project" json:"project,omitempty"` + AppNamespace *string `protobuf:"bytes,3,opt,name=appNamespace" json:"appNamespace,omitempty"` + Project *string `protobuf:"bytes,4,opt,name=project" json:"project,omitempty"` + // source index (for multi source apps) + SourceIndex *int32 `protobuf:"varint,5,opt,name=sourceIndex" json:"sourceIndex,omitempty"` + // versionId from historical data (for multi source apps) + VersionId *int32 `protobuf:"varint,6,opt,name=versionId" json:"versionId,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -282,6 +286,20 @@ func (m *RevisionMetadataQuery) GetProject() string { return "" } +func (m *RevisionMetadataQuery) GetSourceIndex() int32 { + if m != nil && m.SourceIndex != nil { + return *m.SourceIndex + } + return 0 +} + +func (m *RevisionMetadataQuery) GetVersionId() int32 { + if m != nil && m.VersionId != nil { + return *m.VersionId + } + return 0 +} + // ApplicationEventsQuery is a query for application resource events type ApplicationResourceEventsQuery struct { Name *string `protobuf:"bytes,1,req,name=name" json:"name,omitempty"` @@ -376,6 +394,8 @@ type ApplicationManifestQuery struct { Revision *string `protobuf:"bytes,2,opt,name=revision" json:"revision,omitempty"` AppNamespace *string `protobuf:"bytes,3,opt,name=appNamespace" json:"appNamespace,omitempty"` Project *string `protobuf:"bytes,4,opt,name=project" json:"project,omitempty"` + SourcePositions []int64 `protobuf:"varint,5,rep,name=sourcePositions" json:"sourcePositions,omitempty"` + Revisions []string `protobuf:"bytes,6,rep,name=revisions" json:"revisions,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -442,6 +462,20 @@ func (m *ApplicationManifestQuery) GetProject() string { return "" } +func (m *ApplicationManifestQuery) GetSourcePositions() []int64 { + if m != nil { + return m.SourcePositions + } + return nil +} + +func (m *ApplicationManifestQuery) GetRevisions() []string { + if m != nil { + return m.Revisions + } + return nil +} + type FileChunk struct { Chunk []byte `protobuf:"bytes,1,req,name=chunk" json:"chunk,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` @@ -957,6 +991,8 @@ type ApplicationSyncRequest struct { SyncOptions *SyncOptions `protobuf:"bytes,11,opt,name=syncOptions" json:"syncOptions,omitempty"` AppNamespace *string `protobuf:"bytes,12,opt,name=appNamespace" json:"appNamespace,omitempty"` Project *string `protobuf:"bytes,13,opt,name=project" json:"project,omitempty"` + SourcePositions []int64 `protobuf:"varint,14,rep,name=sourcePositions" json:"sourcePositions,omitempty"` + Revisions []string `protobuf:"bytes,15,rep,name=revisions" json:"revisions,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -1079,6 +1115,20 @@ func (m *ApplicationSyncRequest) GetProject() string { return "" } +func (m *ApplicationSyncRequest) GetSourcePositions() []int64 { + if m != nil { + return m.SourcePositions + } + return nil +} + +func (m *ApplicationSyncRequest) GetRevisions() []string { + if m != nil { + return m.Revisions + } + return nil +} + // ApplicationUpdateSpecRequest is a request to update application spec type ApplicationUpdateSpecRequest struct { Name *string `protobuf:"bytes,1,req,name=name" json:"name,omitempty"` @@ -2792,175 +2842,179 @@ func init() { } var fileDescriptor_df6e82b174b5eaec = []byte{ - // 2673 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x5a, 0xcd, 0x8f, 0x1c, 0x47, - 0x15, 0xa7, 0x66, 0xbf, 0x66, 0xde, 0xec, 0xfa, 0xa3, 0x12, 0x2f, 0x9d, 0xf6, 0xc6, 0x6c, 0xda, - 0x76, 0xbc, 0x59, 0x7b, 0x67, 0xec, 0xc1, 0x20, 0x67, 0x93, 0x08, 0xec, 0xf5, 0x27, 0xac, 0x1d, - 0xd3, 0x6b, 0x63, 0x14, 0x0e, 0x50, 0xe9, 0xae, 0x9d, 0x6d, 0xb6, 0xa7, 0xbb, 0xdd, 0xdd, 0x33, - 0xd6, 0xca, 0xf8, 0x12, 0x64, 0x09, 0xa1, 0x08, 0x04, 0xe4, 0x80, 0x10, 0x02, 0x14, 0x14, 0x09, - 0x21, 0x10, 0x17, 0x14, 0x21, 0x21, 0x24, 0xb8, 0x20, 0x38, 0x20, 0x21, 0x38, 0x72, 0x41, 0x16, - 0xe2, 0x08, 0x97, 0xfc, 0x01, 0xa8, 0xaa, 0xab, 0xba, 0xab, 0xe7, 0xa3, 0x67, 0x96, 0x19, 0x14, - 0xdf, 0xfa, 0xd5, 0x54, 0xbd, 0xf7, 0xab, 0x57, 0xbf, 0x7a, 0xaf, 0xea, 0xd5, 0xc0, 0x89, 0x88, - 0x86, 0x1d, 0x1a, 0xd6, 0x49, 0x10, 0xb8, 0x8e, 0x45, 0x62, 0xc7, 0xf7, 0xd4, 0xef, 0x5a, 0x10, - 0xfa, 0xb1, 0x8f, 0xab, 0x4a, 0x93, 0xbe, 0xd4, 0xf4, 0xfd, 0xa6, 0x4b, 0xeb, 0x24, 0x70, 0xea, - 0xc4, 0xf3, 0xfc, 0x98, 0x37, 0x47, 0x49, 0x57, 0xdd, 0xd8, 0xbd, 0x10, 0xd5, 0x1c, 0x9f, 0xff, - 0x6a, 0xf9, 0x21, 0xad, 0x77, 0xce, 0xd5, 0x9b, 0xd4, 0xa3, 0x21, 0x89, 0xa9, 0x2d, 0xfa, 0x9c, - 0xcf, 0xfa, 0xb4, 0x88, 0xb5, 0xe3, 0x78, 0x34, 0xdc, 0xab, 0x07, 0xbb, 0x4d, 0xd6, 0x10, 0xd5, - 0x5b, 0x34, 0x26, 0xfd, 0x46, 0x6d, 0x36, 0x9d, 0x78, 0xa7, 0xfd, 0x66, 0xcd, 0xf2, 0x5b, 0x75, - 0x12, 0x36, 0xfd, 0x20, 0xf4, 0xbf, 0xc2, 0x3f, 0xd6, 0x2c, 0xbb, 0xde, 0x69, 0x64, 0x0a, 0xd4, - 0xb9, 0x74, 0xce, 0x11, 0x37, 0xd8, 0x21, 0xbd, 0xda, 0xae, 0x0c, 0xd1, 0x16, 0xd2, 0xc0, 0x17, - 0xbe, 0xe1, 0x9f, 0x4e, 0xec, 0x87, 0x7b, 0xca, 0x67, 0xa2, 0xc6, 0xf8, 0x00, 0xc1, 0xa1, 0x8b, - 0x99, 0xbd, 0xcf, 0xb5, 0x69, 0xb8, 0x87, 0x31, 0x4c, 0x7b, 0xa4, 0x45, 0x35, 0xb4, 0x8c, 0x56, - 0x2a, 0x26, 0xff, 0xc6, 0x1a, 0xcc, 0x85, 0x74, 0x3b, 0xa4, 0xd1, 0x8e, 0x56, 0xe2, 0xcd, 0x52, - 0xc4, 0x3a, 0x94, 0x99, 0x71, 0x6a, 0xc5, 0x91, 0x36, 0xb5, 0x3c, 0xb5, 0x52, 0x31, 0x53, 0x19, - 0xaf, 0xc0, 0xc1, 0x90, 0x46, 0x7e, 0x3b, 0xb4, 0xe8, 0xe7, 0x69, 0x18, 0x39, 0xbe, 0xa7, 0x4d, - 0xf3, 0xd1, 0xdd, 0xcd, 0x4c, 0x4b, 0x44, 0x5d, 0x6a, 0xc5, 0x7e, 0xa8, 0xcd, 0xf0, 0x2e, 0xa9, - 0xcc, 0xf0, 0x30, 0xe0, 0xda, 0x6c, 0x82, 0x87, 0x7d, 0x63, 0x03, 0xe6, 0x49, 0x10, 0xdc, 0x22, - 0x2d, 0x1a, 0x05, 0xc4, 0xa2, 0xda, 0x1c, 0xff, 0x2d, 0xd7, 0xc6, 0x30, 0x0b, 0x24, 0x5a, 0x99, - 0x03, 0x93, 0xa2, 0xb1, 0x01, 0x95, 0x5b, 0xbe, 0x4d, 0x07, 0x4f, 0xb7, 0x5b, 0x7d, 0xa9, 0x57, - 0xbd, 0xf1, 0x18, 0xc1, 0x11, 0x93, 0x76, 0x1c, 0x86, 0xff, 0x26, 0x8d, 0x89, 0x4d, 0x62, 0xd2, - 0xad, 0xb1, 0x94, 0x6a, 0xd4, 0xa1, 0x1c, 0x8a, 0xce, 0x5a, 0x89, 0xb7, 0xa7, 0x72, 0x8f, 0xb5, - 0xa9, 0xe2, 0xc9, 0x24, 0x2e, 0x4c, 0x27, 0xf3, 0x2f, 0x04, 0xc7, 0x94, 0x35, 0x34, 0x85, 0x67, - 0xaf, 0x74, 0xa8, 0x17, 0x47, 0x83, 0x01, 0x9d, 0x81, 0xc3, 0x72, 0x11, 0xba, 0xe7, 0xd9, 0xfb, - 0x03, 0x83, 0xa8, 0x36, 0x4a, 0x88, 0x6a, 0x1b, 0x5e, 0x86, 0xaa, 0x94, 0xef, 0xde, 0xb8, 0x2c, - 0x60, 0xaa, 0x4d, 0x3d, 0x13, 0x9d, 0x29, 0x9e, 0xe8, 0x6c, 0x7e, 0xa2, 0x5f, 0x47, 0xa0, 0x29, - 0x13, 0xbd, 0x49, 0x3c, 0x67, 0x9b, 0x46, 0xf1, 0xa8, 0x3e, 0x47, 0x13, 0xf4, 0xf9, 0x0b, 0x50, - 0xb9, 0xea, 0xb8, 0x74, 0x63, 0xa7, 0xed, 0xed, 0xe2, 0x67, 0x61, 0xc6, 0x62, 0x1f, 0xdc, 0xf6, - 0xbc, 0x99, 0x08, 0xc6, 0xb7, 0x11, 0xbc, 0x30, 0x08, 0xed, 0x3d, 0x27, 0xde, 0x61, 0xe3, 0xa3, - 0x41, 0xb0, 0xad, 0x1d, 0x6a, 0xed, 0x46, 0xed, 0x96, 0xa4, 0x8a, 0x94, 0xc7, 0x84, 0xfd, 0x33, - 0x04, 0x2b, 0x43, 0x31, 0xdd, 0x0b, 0x49, 0x10, 0xd0, 0x10, 0x5f, 0x85, 0x99, 0xfb, 0xec, 0x07, - 0xbe, 0x31, 0xaa, 0x8d, 0x5a, 0x4d, 0x0d, 0xac, 0x43, 0xb5, 0x5c, 0xff, 0x88, 0x99, 0x0c, 0xc7, - 0x35, 0xe9, 0x9e, 0x12, 0xd7, 0xb3, 0x98, 0xd3, 0x93, 0x7a, 0x91, 0xf5, 0xe7, 0xdd, 0x2e, 0xcd, - 0xc2, 0x74, 0x40, 0xc2, 0xd8, 0x38, 0x02, 0xcf, 0xe4, 0x69, 0x1d, 0xf8, 0x5e, 0x44, 0x8d, 0xdf, - 0xe4, 0x59, 0xb0, 0x11, 0x52, 0x12, 0x53, 0x93, 0xde, 0x6f, 0xd3, 0x28, 0xc6, 0xbb, 0xa0, 0xc6, - 0x7a, 0xee, 0xd5, 0x6a, 0xe3, 0x46, 0x2d, 0x0b, 0x96, 0x35, 0x19, 0x2c, 0xf9, 0xc7, 0x97, 0x2c, - 0xbb, 0xd6, 0x69, 0xd4, 0x82, 0xdd, 0x66, 0x8d, 0x85, 0xde, 0x1c, 0x32, 0x19, 0x7a, 0xd5, 0xa9, - 0x9a, 0xaa, 0x76, 0xbc, 0x08, 0xb3, 0xed, 0x20, 0xa2, 0x61, 0xcc, 0x67, 0x56, 0x36, 0x85, 0xc4, - 0xd6, 0xaf, 0x43, 0x5c, 0xc7, 0x26, 0x71, 0xb2, 0x3e, 0x65, 0x33, 0x95, 0x8d, 0xdf, 0xe6, 0xd1, - 0xdf, 0x0d, 0xec, 0x0f, 0x0b, 0xbd, 0x8a, 0xb2, 0x94, 0x47, 0xa9, 0x32, 0x68, 0x2a, 0xcf, 0xa0, - 0x5f, 0xe5, 0xf1, 0x5f, 0xa6, 0x2e, 0xcd, 0xf0, 0xf7, 0x23, 0xb3, 0x06, 0x73, 0x16, 0x89, 0x2c, - 0x62, 0x4b, 0x2b, 0x52, 0x64, 0x01, 0x28, 0x08, 0xfd, 0x80, 0x34, 0xb9, 0xa6, 0xdb, 0xbe, 0xeb, - 0x58, 0x7b, 0xc2, 0x5c, 0xef, 0x0f, 0x3d, 0xc4, 0x9f, 0x2e, 0x26, 0xfe, 0x4c, 0x1e, 0xf6, 0x71, - 0xa8, 0x6e, 0xed, 0x79, 0xd6, 0xeb, 0x01, 0xcf, 0xf5, 0x6c, 0xc7, 0x3a, 0x31, 0x6d, 0x45, 0x1a, - 0xe2, 0x79, 0x21, 0x11, 0x8c, 0xf7, 0x67, 0x60, 0x51, 0x99, 0x1b, 0x1b, 0x50, 0x34, 0xb3, 0xa2, - 0xe8, 0xb2, 0x08, 0xb3, 0x76, 0xb8, 0x67, 0xb6, 0x3d, 0x41, 0x00, 0x21, 0x31, 0xc3, 0x41, 0xd8, - 0xf6, 0x12, 0xf8, 0x65, 0x33, 0x11, 0xf0, 0x36, 0x94, 0xa3, 0x98, 0x65, 0xf7, 0xe6, 0x1e, 0x07, - 0x5e, 0x6d, 0x7c, 0x66, 0xbc, 0x45, 0x67, 0xd0, 0xb7, 0x84, 0x46, 0x33, 0xd5, 0x8d, 0xef, 0x43, - 0x45, 0x46, 0xe3, 0x48, 0x9b, 0x5b, 0x9e, 0x5a, 0xa9, 0x36, 0xb6, 0xc6, 0x37, 0xf4, 0x7a, 0xc0, - 0x4e, 0x26, 0x4a, 0xe6, 0x31, 0x33, 0x2b, 0x78, 0x09, 0x2a, 0x2d, 0x11, 0x1f, 0x22, 0x91, 0x85, - 0xb3, 0x06, 0xfc, 0x05, 0x98, 0x71, 0xbc, 0x6d, 0x3f, 0xd2, 0x2a, 0x1c, 0xcc, 0xa5, 0xf1, 0xc0, - 0xdc, 0xf0, 0xb6, 0x7d, 0x33, 0x51, 0x88, 0xef, 0xc3, 0x42, 0x48, 0xe3, 0x70, 0x4f, 0x7a, 0x41, - 0x03, 0xee, 0xd7, 0xcf, 0x8e, 0x67, 0xc1, 0x54, 0x55, 0x9a, 0x79, 0x0b, 0x78, 0x1d, 0xaa, 0x51, - 0xc6, 0x31, 0xad, 0xca, 0x0d, 0x6a, 0x39, 0x45, 0x0a, 0x07, 0x4d, 0xb5, 0x73, 0x0f, 0xbb, 0xe7, - 0x8b, 0xd9, 0xbd, 0x90, 0x67, 0xf7, 0x7f, 0x10, 0x2c, 0xf5, 0x04, 0x95, 0xad, 0x80, 0x16, 0xd2, - 0x97, 0xc0, 0x74, 0x14, 0x50, 0x8b, 0x67, 0x98, 0x6a, 0xe3, 0xe6, 0xc4, 0xa2, 0x0c, 0xb7, 0xcb, - 0x55, 0x17, 0x05, 0xc2, 0x31, 0xf7, 0xf3, 0x8f, 0x10, 0x7c, 0x54, 0xb1, 0x79, 0x9b, 0xc4, 0xd6, - 0x4e, 0xd1, 0x64, 0xd9, 0xbe, 0x63, 0x7d, 0x44, 0x3e, 0x4d, 0x04, 0x46, 0x4e, 0xfe, 0x71, 0x67, - 0x2f, 0x60, 0x00, 0xd9, 0x2f, 0x59, 0xc3, 0x98, 0x87, 0x95, 0x9f, 0x23, 0xd0, 0xd5, 0xd8, 0xeb, - 0xbb, 0xee, 0x9b, 0xc4, 0xda, 0x2d, 0x02, 0x79, 0x00, 0x4a, 0x8e, 0xcd, 0x11, 0x4e, 0x99, 0x25, - 0xc7, 0xde, 0x67, 0x10, 0xe9, 0x86, 0x3b, 0x5b, 0x0c, 0x77, 0x2e, 0x0f, 0xf7, 0x83, 0x2e, 0xb8, - 0x72, 0x2b, 0x17, 0xc0, 0x5d, 0x82, 0x8a, 0xd7, 0x75, 0x70, 0xcc, 0x1a, 0xfa, 0x1c, 0x18, 0x4b, - 0x3d, 0x07, 0x46, 0x0d, 0xe6, 0x3a, 0xe9, 0xb5, 0x80, 0xfd, 0x2c, 0x45, 0x36, 0xc5, 0x66, 0xe8, - 0xb7, 0x03, 0xe1, 0xf4, 0x44, 0x60, 0x28, 0x76, 0x1d, 0xcf, 0xd6, 0x66, 0x13, 0x14, 0xec, 0x7b, - 0xff, 0x17, 0x81, 0xdc, 0xb4, 0x7f, 0x51, 0x82, 0x8f, 0xf5, 0x99, 0xf6, 0x50, 0x3e, 0x3d, 0x1d, - 0x73, 0x4f, 0x59, 0x3d, 0x37, 0x90, 0xd5, 0xe5, 0x61, 0xac, 0xae, 0x14, 0xfb, 0x0b, 0xf2, 0xfe, - 0xfa, 0x69, 0x09, 0x96, 0xfb, 0xf8, 0x6b, 0xf8, 0x31, 0xe0, 0xa9, 0x71, 0xd8, 0xb6, 0x1f, 0x0a, - 0x96, 0x94, 0xcd, 0x44, 0x60, 0xfb, 0xcc, 0x0f, 0x83, 0x1d, 0xe2, 0x71, 0x76, 0x94, 0x4d, 0x21, - 0x8d, 0xe9, 0xaa, 0x6f, 0x94, 0x40, 0x93, 0xfe, 0xb9, 0x68, 0x71, 0x6f, 0xb5, 0xbd, 0xa7, 0xdf, - 0x45, 0x8b, 0x30, 0x4b, 0x38, 0x5a, 0x41, 0x2a, 0x21, 0xf5, 0x38, 0xa3, 0x5c, 0xec, 0x8c, 0x4a, - 0xde, 0x19, 0x8f, 0x11, 0x1c, 0xcd, 0x3b, 0x23, 0xda, 0x74, 0xa2, 0x58, 0x1e, 0xea, 0xf1, 0x36, - 0xcc, 0x25, 0x76, 0x92, 0x23, 0x59, 0xb5, 0xb1, 0x39, 0x6e, 0xa2, 0xce, 0x39, 0x5e, 0x2a, 0x37, - 0x5e, 0x86, 0xa3, 0x7d, 0xa3, 0x9c, 0x80, 0xa1, 0x43, 0x59, 0x1e, 0x4e, 0xc4, 0xd2, 0xa4, 0xb2, - 0xf1, 0x78, 0x3a, 0x9f, 0x72, 0x7c, 0x7b, 0xd3, 0x6f, 0x16, 0xdc, 0xaf, 0x8b, 0x97, 0x93, 0xb9, - 0xca, 0xb7, 0x95, 0xab, 0xb4, 0x14, 0xd9, 0x38, 0xcb, 0xf7, 0x62, 0xe2, 0x78, 0x34, 0x14, 0x59, - 0x31, 0x6b, 0x60, 0xcb, 0x10, 0x39, 0x9e, 0x45, 0xb7, 0xa8, 0xe5, 0x7b, 0x76, 0xc4, 0xd7, 0x73, - 0xca, 0xcc, 0xb5, 0xe1, 0xeb, 0x50, 0xe1, 0xf2, 0x1d, 0xa7, 0x95, 0xa4, 0x81, 0x6a, 0x63, 0xb5, - 0x96, 0xd4, 0xac, 0x6a, 0x6a, 0xcd, 0x2a, 0xf3, 0x61, 0x8b, 0xc6, 0xa4, 0xd6, 0x39, 0x57, 0x63, - 0x23, 0xcc, 0x6c, 0x30, 0xc3, 0x12, 0x13, 0xc7, 0xdd, 0x74, 0x3c, 0x7e, 0x60, 0x64, 0xa6, 0xb2, - 0x06, 0x46, 0x95, 0x6d, 0xdf, 0x75, 0xfd, 0x07, 0x72, 0xdf, 0x24, 0x12, 0x1b, 0xd5, 0xf6, 0x62, - 0xc7, 0xe5, 0xf6, 0x13, 0x22, 0x64, 0x0d, 0x7c, 0x94, 0xe3, 0xc6, 0x34, 0x14, 0x1b, 0x46, 0x48, - 0x29, 0x19, 0xab, 0x49, 0x19, 0x46, 0xee, 0xd7, 0x84, 0xb6, 0xf3, 0x2a, 0x6d, 0xbb, 0xb7, 0xc2, - 0x42, 0x9f, 0x5a, 0x04, 0xaf, 0x4a, 0xd1, 0x8e, 0xe3, 0xb7, 0x23, 0xed, 0x40, 0x72, 0xf4, 0x90, - 0x72, 0x0f, 0x95, 0x0f, 0x16, 0x53, 0xf9, 0x50, 0x9e, 0xca, 0xbf, 0x43, 0x50, 0xde, 0xf4, 0x9b, - 0x57, 0xbc, 0x38, 0xdc, 0xe3, 0xb7, 0x1b, 0xdf, 0x8b, 0xa9, 0x27, 0xf9, 0x22, 0x45, 0xb6, 0x08, - 0xb1, 0xd3, 0xa2, 0x5b, 0x31, 0x69, 0x05, 0xe2, 0x8c, 0xb5, 0xaf, 0x45, 0x48, 0x07, 0x33, 0xc7, - 0xb8, 0x24, 0x8a, 0xf9, 0x8e, 0x2f, 0x9b, 0xfc, 0x9b, 0x4d, 0x21, 0xed, 0xb0, 0x15, 0x87, 0x62, - 0xbb, 0xe7, 0xda, 0x54, 0x8a, 0xcd, 0x24, 0xd8, 0x84, 0x68, 0xb4, 0xe0, 0xb9, 0xf4, 0xd0, 0x7e, - 0x87, 0x86, 0x2d, 0xc7, 0x23, 0xc5, 0xd1, 0x7b, 0x84, 0x72, 0x58, 0xc1, 0x9d, 0xd1, 0xcf, 0x6d, - 0x3a, 0x76, 0x06, 0xbe, 0xe7, 0x78, 0xb6, 0xff, 0xa0, 0x60, 0xf3, 0x8c, 0x67, 0xf0, 0xaf, 0xf9, - 0x8a, 0x98, 0x62, 0x31, 0xdd, 0xe9, 0xd7, 0x61, 0x81, 0xc5, 0x84, 0x0e, 0x15, 0x3f, 0x88, 0xb0, - 0x63, 0x0c, 0x2a, 0x72, 0x64, 0x3a, 0xcc, 0xfc, 0x40, 0xbc, 0x09, 0x07, 0x49, 0x14, 0x39, 0x4d, - 0x8f, 0xda, 0x52, 0x57, 0x69, 0x64, 0x5d, 0xdd, 0x43, 0x93, 0xeb, 0x32, 0xef, 0x21, 0xd6, 0x5b, - 0x8a, 0xc6, 0xd7, 0x10, 0x1c, 0xe9, 0xab, 0x24, 0xdd, 0x39, 0x48, 0x09, 0xe3, 0x3a, 0x94, 0x23, - 0x6b, 0x87, 0xda, 0x6d, 0x97, 0xca, 0x1a, 0x92, 0x94, 0xd9, 0x6f, 0x76, 0x3b, 0x59, 0x7d, 0x91, - 0x46, 0x52, 0x19, 0x1f, 0x03, 0x68, 0x11, 0xaf, 0x4d, 0x5c, 0x0e, 0x61, 0x9a, 0x43, 0x50, 0x5a, - 0x8c, 0x25, 0xd0, 0xfb, 0x51, 0x47, 0xd4, 0x66, 0xfe, 0x8d, 0xe0, 0x80, 0x0c, 0xaa, 0x62, 0x75, - 0x57, 0xe0, 0xa0, 0xe2, 0x86, 0x5b, 0xd9, 0x42, 0x77, 0x37, 0x0f, 0x09, 0x98, 0x92, 0x25, 0x53, - 0xf9, 0xa2, 0x74, 0x27, 0x57, 0x56, 0x1e, 0x39, 0xdf, 0xa1, 0x09, 0x9d, 0x1f, 0xbf, 0x0a, 0xda, - 0x4d, 0xe2, 0x91, 0x26, 0xb5, 0xd3, 0x69, 0xa7, 0x14, 0xfb, 0xb2, 0x5a, 0x64, 0x18, 0xfb, 0x4a, - 0x9f, 0x1e, 0xb5, 0x9c, 0xed, 0x6d, 0x59, 0xb0, 0x08, 0xa1, 0xbc, 0xe9, 0x78, 0xbb, 0xec, 0xde, - 0xcb, 0x66, 0x1c, 0x3b, 0xb1, 0x2b, 0xbd, 0x9b, 0x08, 0xf8, 0x10, 0x4c, 0xb5, 0x43, 0x57, 0x30, - 0x80, 0x7d, 0xe2, 0x65, 0xa8, 0xda, 0x34, 0xb2, 0x42, 0x27, 0x10, 0xeb, 0xcf, 0x8b, 0xb4, 0x4a, - 0x13, 0x5b, 0x07, 0xc7, 0xf2, 0xbd, 0x0d, 0x97, 0x44, 0x91, 0x4c, 0x40, 0x69, 0x83, 0xf1, 0x2a, - 0x2c, 0x30, 0x9b, 0xd9, 0x34, 0x4f, 0xe7, 0xa7, 0x79, 0x24, 0x07, 0x5f, 0xc2, 0x93, 0x88, 0x09, - 0x3c, 0xc3, 0xf2, 0xfe, 0xc5, 0x20, 0x10, 0x4a, 0x46, 0x3c, 0x0e, 0x4d, 0xf5, 0xcb, 0x9f, 0x7d, - 0x6b, 0x9c, 0x8d, 0xbf, 0x1f, 0x07, 0xac, 0xee, 0x13, 0x1a, 0x76, 0x1c, 0x8b, 0xe2, 0xef, 0x20, - 0x98, 0x66, 0xa6, 0xf1, 0xf3, 0x83, 0xb6, 0x25, 0xe7, 0xab, 0x3e, 0xb9, 0x8b, 0x30, 0xb3, 0x66, - 0x2c, 0xbd, 0xf5, 0xb7, 0x7f, 0x7e, 0xb7, 0xb4, 0x88, 0x9f, 0xe5, 0x2f, 0x4a, 0x9d, 0x73, 0xea, - 0xeb, 0x4e, 0x84, 0xdf, 0x46, 0x80, 0xc5, 0x39, 0x48, 0xa9, 0xd9, 0xe3, 0xd3, 0x83, 0x20, 0xf6, - 0xa9, 0xed, 0xeb, 0xcf, 0x2b, 0x59, 0xa5, 0x66, 0xf9, 0x21, 0x65, 0x39, 0x84, 0x77, 0xe0, 0x00, - 0x56, 0x39, 0x80, 0x13, 0xd8, 0xe8, 0x07, 0xa0, 0xfe, 0x90, 0x79, 0xf4, 0x51, 0x9d, 0x26, 0x76, - 0xdf, 0x45, 0x30, 0x73, 0x8f, 0xdf, 0x21, 0x86, 0x38, 0x69, 0x6b, 0x62, 0x4e, 0xe2, 0xe6, 0x38, - 0x5a, 0xe3, 0x38, 0x47, 0xfa, 0x3c, 0x3e, 0x2a, 0x91, 0x46, 0x71, 0x48, 0x49, 0x2b, 0x07, 0xf8, - 0x2c, 0xc2, 0xef, 0x21, 0x98, 0x4d, 0x8a, 0xbe, 0xf8, 0xe4, 0x20, 0x94, 0xb9, 0xa2, 0xb0, 0x3e, - 0xb9, 0x0a, 0xaa, 0xf1, 0x12, 0xc7, 0x78, 0xdc, 0xe8, 0xbb, 0x9c, 0xeb, 0xb9, 0xfa, 0xea, 0x3b, - 0x08, 0xa6, 0xae, 0xd1, 0xa1, 0x7c, 0x9b, 0x20, 0xb8, 0x1e, 0x07, 0xf6, 0x59, 0x6a, 0xfc, 0x13, - 0x04, 0xcf, 0x5d, 0xa3, 0x71, 0xff, 0xf4, 0x88, 0x57, 0x86, 0xe7, 0x2c, 0x41, 0xbb, 0xd3, 0x23, - 0xf4, 0x4c, 0xf3, 0x42, 0x9d, 0x23, 0x7b, 0x09, 0x9f, 0x2a, 0x22, 0x61, 0xb4, 0xe7, 0x59, 0x0f, - 0x04, 0x8e, 0x3f, 0x21, 0x38, 0xd4, 0xfd, 0xb6, 0x86, 0xf3, 0x09, 0xb5, 0xef, 0xd3, 0x9b, 0x7e, - 0x6b, 0xdc, 0x28, 0x9b, 0x57, 0x6a, 0x5c, 0xe4, 0xc8, 0x5f, 0xc1, 0x2f, 0x17, 0x21, 0x97, 0x65, - 0xdf, 0xa8, 0xfe, 0x50, 0x7e, 0x3e, 0xe2, 0xef, 0xc0, 0x1c, 0xf6, 0x9f, 0x11, 0x3c, 0x2b, 0xf5, - 0x6e, 0xec, 0x90, 0x30, 0xbe, 0x4c, 0xd9, 0x19, 0x3a, 0x1a, 0x69, 0x3e, 0x63, 0x66, 0x0d, 0xd5, - 0x9e, 0x71, 0x85, 0xcf, 0xe5, 0x53, 0xf8, 0xb5, 0x7d, 0xcf, 0xc5, 0x62, 0x6a, 0x6c, 0x01, 0xfb, - 0x2d, 0x04, 0xf3, 0xd7, 0x68, 0x7c, 0x33, 0xad, 0xe2, 0x9e, 0x1c, 0xe9, 0x65, 0x48, 0x5f, 0xaa, - 0x29, 0xcf, 0xcf, 0xf2, 0xa7, 0x94, 0x22, 0x6b, 0x1c, 0xdc, 0x29, 0x7c, 0xb2, 0x08, 0x5c, 0x56, - 0x39, 0x7e, 0x17, 0xc1, 0x11, 0x15, 0x44, 0xf6, 0xa2, 0xf6, 0x89, 0xfd, 0xbd, 0x53, 0x89, 0xd7, - 0xae, 0x21, 0xe8, 0x1a, 0x1c, 0xdd, 0x19, 0xa3, 0x3f, 0x81, 0x5b, 0x3d, 0x28, 0xd6, 0xd1, 0xea, - 0x0a, 0xc2, 0xbf, 0x47, 0x30, 0x9b, 0x14, 0x63, 0x07, 0xfb, 0x28, 0xf7, 0x02, 0x34, 0xc9, 0x68, - 0x20, 0x56, 0x5b, 0x3f, 0xdb, 0xdf, 0xa1, 0xea, 0x78, 0x49, 0xd5, 0x1a, 0xf7, 0x72, 0x3e, 0x8c, - 0xbd, 0x8f, 0x00, 0xb2, 0x82, 0x32, 0x7e, 0xa9, 0x78, 0x1e, 0x4a, 0xd1, 0x59, 0x9f, 0x6c, 0x49, - 0xd9, 0xa8, 0xf1, 0xf9, 0xac, 0xe8, 0xcb, 0x85, 0x31, 0x24, 0xa0, 0xd6, 0x7a, 0x52, 0x7c, 0xfe, - 0x31, 0x82, 0x19, 0x5e, 0xc7, 0xc3, 0x27, 0x06, 0x61, 0x56, 0xcb, 0x7c, 0x93, 0x74, 0xfd, 0x8b, - 0x1c, 0xea, 0x72, 0xa3, 0x28, 0x10, 0xaf, 0xa3, 0x55, 0xdc, 0x81, 0xd9, 0xa4, 0x72, 0x36, 0x98, - 0x1e, 0xb9, 0xca, 0x9a, 0xbe, 0x5c, 0x70, 0x30, 0x48, 0x88, 0x2a, 0x72, 0xc0, 0xea, 0xb0, 0x1c, - 0x30, 0xcd, 0xc2, 0x34, 0x3e, 0x5e, 0x14, 0xc4, 0xff, 0x0f, 0x8e, 0x39, 0xcd, 0xd1, 0x9d, 0x34, - 0x96, 0x87, 0xe5, 0x01, 0xe6, 0x9d, 0xef, 0x21, 0x38, 0xd4, 0x7d, 0xb8, 0xc6, 0x47, 0xbb, 0x62, - 0xa6, 0x7a, 0xd7, 0xd0, 0xf3, 0x5e, 0x1c, 0x74, 0x30, 0x37, 0x3e, 0xcd, 0x51, 0xac, 0xe3, 0x0b, - 0x43, 0x77, 0xc6, 0x2d, 0x19, 0x75, 0x98, 0xa2, 0xb5, 0xec, 0x55, 0xeb, 0xd7, 0x08, 0xe6, 0xa5, - 0xde, 0x3b, 0x21, 0xa5, 0xc5, 0xb0, 0x26, 0xb7, 0x11, 0x98, 0x2d, 0xe3, 0x55, 0x0e, 0xff, 0x93, - 0xf8, 0xfc, 0x88, 0xf0, 0x25, 0xec, 0xb5, 0x98, 0x21, 0xfd, 0x03, 0x82, 0xc3, 0xf7, 0x12, 0xde, - 0x7f, 0x48, 0xf8, 0x37, 0x38, 0xfe, 0xd7, 0xf0, 0x2b, 0x05, 0xe7, 0xbc, 0x61, 0xd3, 0x38, 0x8b, - 0xf0, 0x2f, 0x11, 0x94, 0xe5, 0xab, 0x0a, 0x3e, 0x35, 0x70, 0x63, 0xe4, 0xdf, 0x5d, 0x26, 0x49, - 0x66, 0x71, 0xa8, 0x31, 0x4e, 0x14, 0xa6, 0x53, 0x61, 0x9f, 0x11, 0xfa, 0x1d, 0x04, 0x38, 0xbd, - 0x33, 0xa7, 0xb7, 0x68, 0xfc, 0x62, 0xce, 0xd4, 0xc0, 0xc2, 0x8c, 0x7e, 0x6a, 0x68, 0xbf, 0x7c, - 0x2a, 0x5d, 0x2d, 0x4c, 0xa5, 0x7e, 0x6a, 0xff, 0x9b, 0x08, 0xaa, 0xd7, 0x68, 0x7a, 0x07, 0x29, - 0xf0, 0x65, 0xfe, 0x51, 0x48, 0x5f, 0x19, 0xde, 0x51, 0x20, 0x3a, 0xc3, 0x11, 0xbd, 0x88, 0x8b, - 0x5d, 0x25, 0x01, 0xfc, 0x00, 0xc1, 0xc2, 0x6d, 0x95, 0xa2, 0xf8, 0xcc, 0x30, 0x4b, 0xb9, 0x48, - 0x3e, 0x3a, 0xae, 0x8f, 0x73, 0x5c, 0x6b, 0xc6, 0x48, 0xb8, 0xd6, 0xc5, 0xfb, 0xca, 0x0f, 0x51, - 0x72, 0x89, 0xed, 0xaa, 0x67, 0xff, 0xaf, 0x7e, 0x2b, 0x28, 0x8b, 0x1b, 0xe7, 0x39, 0xbe, 0x1a, - 0x3e, 0x33, 0x0a, 0xbe, 0xba, 0x28, 0x72, 0xe3, 0xef, 0x23, 0x38, 0xcc, 0xdf, 0x1a, 0x54, 0xc5, - 0x5d, 0x29, 0x66, 0xd0, 0xcb, 0xc4, 0x08, 0x29, 0x46, 0xc4, 0x1f, 0x63, 0x5f, 0xa0, 0xd6, 0xe5, - 0x3b, 0xc2, 0xb7, 0x10, 0x1c, 0x90, 0x49, 0x4d, 0xac, 0xee, 0xda, 0x30, 0xc7, 0xed, 0x37, 0x09, - 0x0a, 0xba, 0xad, 0x8e, 0x46, 0xb7, 0xf7, 0x10, 0xcc, 0x89, 0x6a, 0x7e, 0xc1, 0x51, 0x41, 0x29, - 0xf7, 0xeb, 0x5d, 0x35, 0x0e, 0x51, 0x0c, 0x36, 0xbe, 0xc8, 0xcd, 0xde, 0xc5, 0xf5, 0x22, 0xb3, - 0x81, 0x6f, 0x47, 0xf5, 0x87, 0xa2, 0x12, 0xfb, 0xa8, 0xee, 0xfa, 0xcd, 0xe8, 0x0d, 0x03, 0x17, - 0x26, 0x44, 0xd6, 0xe7, 0x2c, 0xc2, 0x31, 0x54, 0x18, 0x39, 0x78, 0xe1, 0x04, 0x2f, 0x77, 0x95, - 0x59, 0x7a, 0x6a, 0x2a, 0xba, 0xde, 0x53, 0x88, 0xc9, 0x32, 0xa0, 0xb8, 0xc6, 0xe2, 0x17, 0x0a, - 0xcd, 0x72, 0x43, 0x6f, 0x23, 0x38, 0xac, 0xb2, 0x3d, 0x31, 0x3f, 0x32, 0xd7, 0x8b, 0x50, 0x88, - 0x43, 0x35, 0x5e, 0x1d, 0x89, 0x48, 0x1c, 0xce, 0xa5, 0xab, 0x7f, 0x7c, 0x72, 0x0c, 0xfd, 0xe5, - 0xc9, 0x31, 0xf4, 0x8f, 0x27, 0xc7, 0xd0, 0x1b, 0x17, 0x46, 0xfb, 0x4f, 0xad, 0xe5, 0x3a, 0xd4, - 0x8b, 0x55, 0xf5, 0xff, 0x0d, 0x00, 0x00, 0xff, 0xff, 0x30, 0xc0, 0x40, 0x7a, 0x39, 0x2c, 0x00, - 0x00, + // 2742 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x5a, 0x4d, 0x8c, 0x1b, 0x49, + 0x15, 0xa6, 0xec, 0xf1, 0x8c, 0xe7, 0x79, 0x26, 0x93, 0xd4, 0x26, 0x83, 0xd7, 0x99, 0x0d, 0xde, + 0x4e, 0xb2, 0x71, 0x26, 0x19, 0x3b, 0x31, 0x01, 0x65, 0x67, 0x77, 0x05, 0xc9, 0xe4, 0x17, 0x26, + 0xd9, 0xd0, 0x93, 0x10, 0xb4, 0x1c, 0xa0, 0xb6, 0xbb, 0xc6, 0xd3, 0x4c, 0xbb, 0xbb, 0xd3, 0xdd, + 0x76, 0x18, 0x85, 0x5c, 0x16, 0xed, 0x05, 0xad, 0x40, 0xc0, 0x1e, 0x10, 0x42, 0x80, 0x16, 0xad, + 0x84, 0x10, 0x88, 0x0b, 0x42, 0x48, 0x08, 0x09, 0x0e, 0x20, 0x38, 0x20, 0xad, 0xe0, 0xc8, 0x05, + 0x45, 0x88, 0x23, 0x5c, 0xf6, 0x8c, 0x50, 0x55, 0x57, 0x75, 0x57, 0xfb, 0xa7, 0xed, 0xc1, 0x46, + 0x9b, 0x5b, 0xbf, 0x72, 0xd5, 0x7b, 0xdf, 0x7b, 0xf5, 0xea, 0xbd, 0x57, 0xaf, 0x0c, 0x27, 0x02, + 0xea, 0x77, 0xa9, 0xdf, 0x20, 0x9e, 0x67, 0x5b, 0x06, 0x09, 0x2d, 0xd7, 0x51, 0xbf, 0xeb, 0x9e, + 0xef, 0x86, 0x2e, 0x2e, 0x29, 0x43, 0x95, 0x95, 0x96, 0xeb, 0xb6, 0x6c, 0xda, 0x20, 0x9e, 0xd5, + 0x20, 0x8e, 0xe3, 0x86, 0x7c, 0x38, 0x88, 0xa6, 0x56, 0xb4, 0xdd, 0x8b, 0x41, 0xdd, 0x72, 0xf9, + 0xaf, 0x86, 0xeb, 0xd3, 0x46, 0xf7, 0x7c, 0xa3, 0x45, 0x1d, 0xea, 0x93, 0x90, 0x9a, 0x62, 0xce, + 0x85, 0x64, 0x4e, 0x9b, 0x18, 0x3b, 0x96, 0x43, 0xfd, 0xbd, 0x86, 0xb7, 0xdb, 0x62, 0x03, 0x41, + 0xa3, 0x4d, 0x43, 0x32, 0x68, 0xd5, 0x66, 0xcb, 0x0a, 0x77, 0x3a, 0xaf, 0xd7, 0x0d, 0xb7, 0xdd, + 0x20, 0x7e, 0xcb, 0xf5, 0x7c, 0xf7, 0x4b, 0xfc, 0x63, 0xcd, 0x30, 0x1b, 0xdd, 0x66, 0xc2, 0x40, + 0xd5, 0xa5, 0x7b, 0x9e, 0xd8, 0xde, 0x0e, 0xe9, 0xe7, 0x76, 0x75, 0x04, 0x37, 0x9f, 0x7a, 0xae, + 0xb0, 0x0d, 0xff, 0xb4, 0x42, 0xd7, 0xdf, 0x53, 0x3e, 0x23, 0x36, 0xda, 0xfb, 0x08, 0x0e, 0x5e, + 0x4a, 0xe4, 0x7d, 0xa6, 0x43, 0xfd, 0x3d, 0x8c, 0x61, 0xc6, 0x21, 0x6d, 0x5a, 0x46, 0x55, 0x54, + 0x9b, 0xd7, 0xf9, 0x37, 0x2e, 0xc3, 0x9c, 0x4f, 0xb7, 0x7d, 0x1a, 0xec, 0x94, 0x73, 0x7c, 0x58, + 0x92, 0xb8, 0x02, 0x45, 0x26, 0x9c, 0x1a, 0x61, 0x50, 0xce, 0x57, 0xf3, 0xb5, 0x79, 0x3d, 0xa6, + 0x71, 0x0d, 0x96, 0x7c, 0x1a, 0xb8, 0x1d, 0xdf, 0xa0, 0x9f, 0xa5, 0x7e, 0x60, 0xb9, 0x4e, 0x79, + 0x86, 0xaf, 0xee, 0x1d, 0x66, 0x5c, 0x02, 0x6a, 0x53, 0x23, 0x74, 0xfd, 0x72, 0x81, 0x4f, 0x89, + 0x69, 0x86, 0x87, 0x01, 0x2f, 0xcf, 0x46, 0x78, 0xd8, 0x37, 0xd6, 0x60, 0x81, 0x78, 0xde, 0x6d, + 0xd2, 0xa6, 0x81, 0x47, 0x0c, 0x5a, 0x9e, 0xe3, 0xbf, 0xa5, 0xc6, 0x18, 0x66, 0x81, 0xa4, 0x5c, + 0xe4, 0xc0, 0x24, 0xa9, 0x6d, 0xc0, 0xfc, 0x6d, 0xd7, 0xa4, 0xc3, 0xd5, 0xed, 0x65, 0x9f, 0xeb, + 0x67, 0xaf, 0xfd, 0x1e, 0xc1, 0x11, 0x9d, 0x76, 0x2d, 0x86, 0xff, 0x16, 0x0d, 0x89, 0x49, 0x42, + 0xd2, 0xcb, 0x31, 0x17, 0x73, 0xac, 0x40, 0xd1, 0x17, 0x93, 0xcb, 0x39, 0x3e, 0x1e, 0xd3, 0x7d, + 0xd2, 0xf2, 0xd9, 0xca, 0x44, 0x26, 0x94, 0x24, 0xae, 0x42, 0x29, 0xb2, 0xe5, 0x4d, 0xc7, 0xa4, + 0x5f, 0xe6, 0xd6, 0x2b, 0xe8, 0xea, 0x10, 0x5e, 0x81, 0xf9, 0x6e, 0x64, 0xe7, 0x9b, 0x26, 0xb7, + 0x62, 0x41, 0x4f, 0x06, 0xb4, 0x7f, 0x22, 0x38, 0xa6, 0xf8, 0x80, 0x2e, 0x76, 0xe6, 0x6a, 0x97, + 0x3a, 0x61, 0x30, 0x5c, 0xa1, 0xb3, 0x70, 0x48, 0x6e, 0x62, 0xaf, 0x9d, 0xfa, 0x7f, 0x60, 0x2a, + 0xaa, 0x83, 0x52, 0x45, 0x75, 0x8c, 0x29, 0x22, 0xe9, 0x7b, 0x37, 0xaf, 0x08, 0x35, 0xd5, 0xa1, + 0x3e, 0x43, 0x15, 0xb2, 0x0d, 0x35, 0x9b, 0x32, 0x94, 0xf6, 0x1e, 0x82, 0xb2, 0xa2, 0xe8, 0x2d, + 0xe2, 0x58, 0xdb, 0x34, 0x08, 0xc7, 0xdd, 0x33, 0x34, 0xc5, 0x3d, 0xab, 0xc1, 0x52, 0xa4, 0xd5, + 0x1d, 0x76, 0x1e, 0x59, 0xfc, 0x29, 0x17, 0xaa, 0xf9, 0x5a, 0x5e, 0xef, 0x1d, 0x66, 0x7b, 0x27, + 0x65, 0x06, 0xe5, 0x59, 0xee, 0xc6, 0xc9, 0x80, 0xf6, 0x3c, 0xcc, 0x5f, 0xb3, 0x6c, 0xba, 0xb1, + 0xd3, 0x71, 0x76, 0xf1, 0x61, 0x28, 0x18, 0xec, 0x83, 0xeb, 0xb0, 0xa0, 0x47, 0x84, 0xf6, 0x4d, + 0x04, 0xcf, 0x0f, 0xd3, 0xfa, 0xbe, 0x15, 0xee, 0xb0, 0xf5, 0xc1, 0x30, 0xf5, 0x8d, 0x1d, 0x6a, + 0xec, 0x06, 0x9d, 0xb6, 0x74, 0x59, 0x49, 0x4f, 0xa6, 0xbe, 0xf6, 0x13, 0x04, 0xb5, 0x91, 0x98, + 0xee, 0xfb, 0xc4, 0xf3, 0xa8, 0x8f, 0xaf, 0x41, 0xe1, 0x01, 0xfb, 0x81, 0x1f, 0xd0, 0x52, 0xb3, + 0x5e, 0x57, 0x03, 0xfc, 0x48, 0x2e, 0x37, 0x3e, 0xa4, 0x47, 0xcb, 0x71, 0x5d, 0x9a, 0x27, 0xc7, + 0xf9, 0x2c, 0xa7, 0xf8, 0xc4, 0x56, 0x64, 0xf3, 0xf9, 0xb4, 0xcb, 0xb3, 0x30, 0xe3, 0x11, 0x3f, + 0xd4, 0x8e, 0xc0, 0x33, 0xe9, 0xe3, 0xe1, 0xb9, 0x4e, 0x40, 0xb5, 0x5f, 0xa7, 0xbd, 0x69, 0xc3, + 0xa7, 0x24, 0xa4, 0x3a, 0x7d, 0xd0, 0xa1, 0x41, 0x88, 0x77, 0x41, 0xcd, 0x39, 0xdc, 0xaa, 0xa5, + 0xe6, 0xcd, 0x7a, 0x12, 0xb4, 0xeb, 0x32, 0x68, 0xf3, 0x8f, 0x2f, 0x18, 0x66, 0xbd, 0xdb, 0xac, + 0x7b, 0xbb, 0xad, 0x3a, 0x4b, 0x01, 0x29, 0x64, 0x32, 0x05, 0xa8, 0xaa, 0xea, 0x2a, 0x77, 0xbc, + 0x0c, 0xb3, 0x1d, 0x2f, 0xa0, 0x7e, 0xc8, 0x35, 0x2b, 0xea, 0x82, 0x62, 0xfb, 0xd7, 0x25, 0xb6, + 0x65, 0x92, 0x30, 0xda, 0x9f, 0xa2, 0x1e, 0xd3, 0xda, 0x6f, 0xd2, 0xe8, 0xef, 0x79, 0xe6, 0x07, + 0x85, 0x5e, 0x45, 0x99, 0x4b, 0xa3, 0x54, 0x3d, 0x28, 0x9f, 0xf6, 0xa0, 0x5f, 0xa4, 0xf1, 0x5f, + 0xa1, 0x36, 0x4d, 0xf0, 0x0f, 0x72, 0xe6, 0x32, 0xcc, 0x19, 0x24, 0x30, 0x88, 0x29, 0xa5, 0x48, + 0x92, 0x05, 0x32, 0xcf, 0x77, 0x3d, 0xd2, 0xe2, 0x9c, 0xee, 0xb8, 0xb6, 0x65, 0xec, 0x09, 0x71, + 0xfd, 0x3f, 0xf4, 0x39, 0xfe, 0x4c, 0xb6, 0xe3, 0x17, 0xd2, 0xb0, 0x8f, 0x43, 0x69, 0x6b, 0xcf, + 0x31, 0x5e, 0xf5, 0xa2, 0xc3, 0x7d, 0x18, 0x0a, 0x56, 0x48, 0xdb, 0x41, 0x19, 0xf1, 0x83, 0x1d, + 0x11, 0xda, 0x7f, 0x0a, 0xb0, 0xac, 0xe8, 0xc6, 0x16, 0x64, 0x69, 0x96, 0x15, 0xa5, 0x96, 0x61, + 0xd6, 0xf4, 0xf7, 0xf4, 0x8e, 0x23, 0x1c, 0x40, 0x50, 0x4c, 0xb0, 0xe7, 0x77, 0x9c, 0x08, 0x7e, + 0x51, 0x8f, 0x08, 0xbc, 0x0d, 0xc5, 0x20, 0x64, 0x55, 0x46, 0x6b, 0x8f, 0x03, 0x2f, 0x35, 0x3f, + 0x35, 0xd9, 0xa6, 0x33, 0xe8, 0x5b, 0x82, 0xa3, 0x1e, 0xf3, 0xc6, 0x0f, 0x58, 0x4c, 0x8b, 0x02, + 0x5d, 0x50, 0x9e, 0xab, 0xe6, 0x6b, 0xa5, 0xe6, 0xd6, 0xe4, 0x82, 0x5e, 0xf5, 0x58, 0x85, 0xa4, + 0x64, 0x30, 0x3d, 0x91, 0xc2, 0xc2, 0x68, 0x5b, 0xc4, 0x87, 0x40, 0x54, 0x03, 0xc9, 0x00, 0xfe, + 0x1c, 0x14, 0x2c, 0x67, 0xdb, 0x0d, 0xca, 0xf3, 0x1c, 0xcc, 0xe5, 0xc9, 0xc0, 0xdc, 0x74, 0xb6, + 0x5d, 0x3d, 0x62, 0x88, 0x1f, 0xc0, 0xa2, 0x4f, 0x43, 0x7f, 0x4f, 0x5a, 0xa1, 0x0c, 0xdc, 0xae, + 0x9f, 0x9e, 0x4c, 0x82, 0xae, 0xb2, 0xd4, 0xd3, 0x12, 0xf0, 0x3a, 0x94, 0x82, 0xc4, 0xc7, 0xca, + 0x25, 0x2e, 0xb0, 0x9c, 0x62, 0xa4, 0xf8, 0xa0, 0xae, 0x4e, 0xee, 0xf3, 0xee, 0x85, 0x6c, 0xef, + 0x5e, 0x1c, 0x99, 0xd5, 0x0e, 0x8c, 0x91, 0xd5, 0x96, 0x7a, 0xb3, 0xda, 0xbf, 0x11, 0xac, 0xf4, + 0x05, 0xa7, 0x2d, 0x8f, 0x66, 0x1e, 0x03, 0x02, 0x33, 0x81, 0x47, 0x0d, 0x9e, 0xa9, 0x4a, 0xcd, + 0x5b, 0x53, 0x8b, 0x56, 0x5c, 0x2e, 0x67, 0x9d, 0x15, 0x50, 0x27, 0x8c, 0x0b, 0x3f, 0x40, 0xf0, + 0x61, 0x45, 0xe6, 0x1d, 0x12, 0x1a, 0x3b, 0x59, 0xca, 0xb2, 0xf3, 0xcb, 0xe6, 0x88, 0xbc, 0x1c, + 0x11, 0xcc, 0xaa, 0xfc, 0xe3, 0xee, 0x9e, 0xc7, 0x00, 0xb2, 0x5f, 0x92, 0x81, 0x09, 0x8b, 0xa7, + 0x9f, 0x22, 0xa8, 0xa8, 0x31, 0xdc, 0xb5, 0xed, 0xd7, 0x89, 0xb1, 0x9b, 0x05, 0xf2, 0x00, 0xe4, + 0x2c, 0x93, 0x23, 0xcc, 0xeb, 0x39, 0xcb, 0xdc, 0x67, 0x30, 0xea, 0x85, 0x3b, 0x9b, 0x0d, 0x77, + 0x2e, 0x0d, 0xf7, 0xfd, 0x1e, 0xb8, 0x32, 0x24, 0x64, 0xc0, 0x5d, 0x81, 0x79, 0xa7, 0xa7, 0x90, + 0x4d, 0x06, 0x06, 0x14, 0xb0, 0xb9, 0xbe, 0x02, 0xb6, 0x0c, 0x73, 0xdd, 0xf8, 0x9a, 0xc3, 0x7e, + 0x96, 0x24, 0x53, 0xb1, 0xe5, 0xbb, 0x1d, 0x4f, 0x18, 0x3d, 0x22, 0x18, 0x8a, 0x5d, 0xcb, 0x61, + 0x25, 0x39, 0x47, 0xc1, 0xbe, 0xf7, 0x7f, 0xb1, 0x49, 0xa9, 0xfd, 0xb3, 0x1c, 0x7c, 0x64, 0x80, + 0xda, 0x23, 0xfd, 0xe9, 0xe9, 0xd0, 0x3d, 0xf6, 0xea, 0xb9, 0xa1, 0x5e, 0x5d, 0x1c, 0xe5, 0xd5, + 0xf3, 0xd9, 0xf6, 0x82, 0xb4, 0xbd, 0x7e, 0x9c, 0x83, 0xea, 0x00, 0x7b, 0x8d, 0x2e, 0x27, 0x9e, + 0x1a, 0x83, 0x6d, 0xbb, 0xbe, 0xf0, 0x92, 0xa2, 0x1e, 0x11, 0xec, 0x9c, 0xb9, 0xbe, 0xb7, 0x43, + 0x1c, 0xee, 0x1d, 0x45, 0x5d, 0x50, 0x13, 0x9a, 0xea, 0x6b, 0x39, 0x28, 0x4b, 0xfb, 0x5c, 0x32, + 0xb8, 0xb5, 0x3a, 0xce, 0xd3, 0x6f, 0xa2, 0x65, 0x98, 0x25, 0x1c, 0xad, 0x70, 0x2a, 0x41, 0xf5, + 0x19, 0xa3, 0x98, 0x6d, 0x8c, 0xf9, 0xb4, 0x31, 0xde, 0x44, 0x70, 0x34, 0x6d, 0x8c, 0x60, 0xd3, + 0x0a, 0x42, 0x79, 0x39, 0xc0, 0xdb, 0x30, 0x17, 0xc9, 0x89, 0x4a, 0xbb, 0x52, 0x73, 0x73, 0xd2, + 0x84, 0x9f, 0x32, 0xbc, 0x64, 0xae, 0xbd, 0x08, 0x47, 0x07, 0x46, 0x39, 0x01, 0xa3, 0x02, 0x45, + 0x59, 0xe4, 0x88, 0xad, 0x89, 0x69, 0xed, 0xcd, 0x99, 0x74, 0xca, 0x71, 0xcd, 0x4d, 0xb7, 0x95, + 0x71, 0xdf, 0xcf, 0xde, 0x4e, 0x66, 0x2a, 0xd7, 0x54, 0xae, 0xf6, 0x92, 0x64, 0xeb, 0x0c, 0xd7, + 0x09, 0x89, 0xe5, 0x50, 0x5f, 0x64, 0xc5, 0x64, 0x80, 0x6d, 0x43, 0x60, 0x39, 0x06, 0xdd, 0xa2, + 0x86, 0xeb, 0x98, 0x01, 0xdf, 0xcf, 0xbc, 0x9e, 0x1a, 0xc3, 0x37, 0x60, 0x9e, 0xd3, 0x77, 0xad, + 0x76, 0x94, 0x06, 0x4a, 0xcd, 0xd5, 0x7a, 0xd4, 0x83, 0xab, 0xab, 0x3d, 0xb8, 0xc4, 0x86, 0x6d, + 0x1a, 0x92, 0x7a, 0xf7, 0x7c, 0x9d, 0xad, 0xd0, 0x93, 0xc5, 0x0c, 0x4b, 0x48, 0x2c, 0x7b, 0xd3, + 0x72, 0x78, 0xe1, 0xc9, 0x44, 0x25, 0x03, 0xcc, 0x55, 0xb6, 0x5d, 0xdb, 0x76, 0x1f, 0xca, 0x73, + 0x13, 0x51, 0x6c, 0x55, 0xc7, 0x09, 0x2d, 0x9b, 0xcb, 0x8f, 0x1c, 0x21, 0x19, 0xe0, 0xab, 0x2c, + 0x3b, 0xa4, 0xbe, 0x38, 0x30, 0x82, 0x8a, 0x9d, 0xb1, 0x14, 0xb5, 0x95, 0xe4, 0x79, 0x8d, 0xdc, + 0x76, 0x41, 0x75, 0xdb, 0xde, 0xa3, 0xb0, 0x38, 0xa0, 0x37, 0xc2, 0xbb, 0x6c, 0xb4, 0x6b, 0xb9, + 0x1d, 0x56, 0x53, 0xf1, 0xd2, 0x43, 0xd2, 0x7d, 0xae, 0xbc, 0x94, 0xed, 0xca, 0x07, 0xd3, 0xae, + 0xfc, 0x5b, 0x04, 0xc5, 0x4d, 0xb7, 0x75, 0xd5, 0x09, 0xfd, 0x3d, 0x7e, 0x4b, 0x72, 0x9d, 0x90, + 0x3a, 0xd2, 0x5f, 0x24, 0xc9, 0x36, 0x21, 0xb4, 0xda, 0x74, 0x2b, 0x24, 0x6d, 0x4f, 0xd4, 0x58, + 0xfb, 0xda, 0x84, 0x78, 0x31, 0x33, 0x8c, 0x4d, 0x82, 0x90, 0x9f, 0xf8, 0xa2, 0xce, 0xbf, 0x99, + 0x0a, 0xf1, 0x84, 0xad, 0xd0, 0x17, 0xc7, 0x3d, 0x35, 0xa6, 0xba, 0x58, 0x21, 0xc2, 0x26, 0x48, + 0xad, 0x0d, 0xcf, 0xc6, 0xc5, 0xff, 0x5d, 0xea, 0xb7, 0x2d, 0x87, 0x64, 0x47, 0xef, 0x31, 0xda, + 0x7b, 0x19, 0x77, 0x4f, 0x37, 0x75, 0xe8, 0x58, 0x2d, 0x7d, 0xdf, 0x72, 0x4c, 0xf7, 0x61, 0xc6, + 0xe1, 0x99, 0x4c, 0xe0, 0x5f, 0xd2, 0x1d, 0x3a, 0x45, 0x62, 0x7c, 0xd2, 0x6f, 0xc0, 0x22, 0x8b, + 0x09, 0x5d, 0x2a, 0x7e, 0x10, 0x61, 0x47, 0x1b, 0xd6, 0x2c, 0x49, 0x78, 0xe8, 0xe9, 0x85, 0x78, + 0x13, 0x96, 0x48, 0x10, 0x58, 0x2d, 0x87, 0x9a, 0x92, 0x57, 0x6e, 0x6c, 0x5e, 0xbd, 0x4b, 0xa3, + 0x6b, 0x37, 0x9f, 0x21, 0xf6, 0x5b, 0x92, 0xda, 0x57, 0x11, 0x1c, 0x19, 0xc8, 0x24, 0x3e, 0x39, + 0x48, 0x09, 0xe3, 0x15, 0x28, 0x06, 0xc6, 0x0e, 0x35, 0x3b, 0x36, 0x95, 0xbd, 0x28, 0x49, 0xb3, + 0xdf, 0xcc, 0x4e, 0xb4, 0xfb, 0x22, 0x8d, 0xc4, 0x34, 0x3e, 0x06, 0xd0, 0x26, 0x4e, 0x87, 0xd8, + 0x1c, 0xc2, 0x0c, 0x87, 0xa0, 0x8c, 0x68, 0x2b, 0x50, 0x19, 0xe4, 0x3a, 0xa2, 0xc7, 0xf3, 0x2f, + 0x04, 0x07, 0x64, 0x50, 0x15, 0xbb, 0x5b, 0x83, 0x25, 0xc5, 0x0c, 0xb7, 0x93, 0x8d, 0xee, 0x1d, + 0x1e, 0x11, 0x30, 0xa5, 0x97, 0xe4, 0xd3, 0x4d, 0xf6, 0x6e, 0xaa, 0x4d, 0x3e, 0x76, 0xbe, 0x43, + 0x53, 0xaa, 0x1f, 0xbf, 0x02, 0xe5, 0x5b, 0xc4, 0x21, 0x2d, 0x6a, 0xc6, 0x6a, 0xc7, 0x2e, 0xf6, + 0x45, 0xb5, 0x59, 0x31, 0x71, 0x6b, 0x20, 0x2e, 0xb5, 0xac, 0xed, 0x6d, 0xd9, 0xf8, 0xf0, 0xa1, + 0xb8, 0x69, 0x39, 0xbb, 0xec, 0xfe, 0xcc, 0x34, 0x0e, 0xad, 0xd0, 0x96, 0xd6, 0x8d, 0x08, 0x7c, + 0x10, 0xf2, 0x1d, 0xdf, 0x16, 0x1e, 0xc0, 0x3e, 0x71, 0x15, 0x4a, 0x26, 0x0d, 0x0c, 0xdf, 0xf2, + 0xc4, 0xfe, 0xf3, 0xa6, 0xb1, 0x32, 0xc4, 0xf6, 0xc1, 0x32, 0x5c, 0x67, 0xc3, 0x26, 0x41, 0x20, + 0x13, 0x50, 0x3c, 0xa0, 0xbd, 0x0c, 0x8b, 0x4c, 0x66, 0xa2, 0xe6, 0x99, 0xb4, 0x9a, 0x47, 0x52, + 0xf0, 0x25, 0x3c, 0x89, 0x98, 0xc0, 0x33, 0x2c, 0xef, 0x5f, 0xf2, 0x3c, 0xc1, 0x64, 0xcc, 0x72, + 0x28, 0x3f, 0x28, 0x7f, 0x0e, 0xec, 0x95, 0x36, 0xff, 0x76, 0x1c, 0xb0, 0x7a, 0x4e, 0xa8, 0xdf, + 0xb5, 0x0c, 0x8a, 0xbf, 0x85, 0x60, 0x86, 0x89, 0xc6, 0xcf, 0x0d, 0x3b, 0x96, 0xdc, 0x5f, 0x2b, + 0xd3, 0xbb, 0x08, 0x33, 0x69, 0xda, 0xca, 0x1b, 0x7f, 0xfd, 0xc7, 0xb7, 0x73, 0xcb, 0xf8, 0x30, + 0x7f, 0x21, 0xeb, 0x9e, 0x57, 0x5f, 0xab, 0x02, 0xfc, 0x16, 0x02, 0x2c, 0xea, 0x20, 0xe5, 0x0d, + 0x01, 0x9f, 0x19, 0x06, 0x71, 0xc0, 0x5b, 0x43, 0xe5, 0x39, 0x25, 0xab, 0xd4, 0x0d, 0xd7, 0xa7, + 0x2c, 0x87, 0xf0, 0x09, 0x1c, 0xc0, 0x2a, 0x07, 0x70, 0x02, 0x6b, 0x83, 0x00, 0x34, 0x1e, 0x31, + 0x8b, 0x3e, 0x6e, 0xd0, 0x48, 0xee, 0x3b, 0x08, 0x0a, 0xf7, 0xf9, 0x1d, 0x62, 0x84, 0x91, 0xb6, + 0xa6, 0x66, 0x24, 0x2e, 0x8e, 0xa3, 0xd5, 0x8e, 0x73, 0xa4, 0xcf, 0xe1, 0xa3, 0x12, 0x69, 0x10, + 0xfa, 0x94, 0xb4, 0x53, 0x80, 0xcf, 0x21, 0xfc, 0x2e, 0x82, 0xd9, 0xa8, 0x79, 0x8c, 0x4f, 0x0e, + 0x43, 0x99, 0x6a, 0x2e, 0x57, 0xa6, 0xd7, 0x89, 0xd5, 0x4e, 0x73, 0x8c, 0xc7, 0xb5, 0x81, 0xdb, + 0xb9, 0x9e, 0xea, 0xd3, 0xbe, 0x8d, 0x20, 0x7f, 0x9d, 0x8e, 0xf4, 0xb7, 0x29, 0x82, 0xeb, 0x33, + 0xe0, 0x80, 0xad, 0xc6, 0x3f, 0x42, 0xf0, 0xec, 0x75, 0x1a, 0x0e, 0x4e, 0x8f, 0xb8, 0x36, 0x3a, + 0x67, 0x09, 0xb7, 0x3b, 0x33, 0xc6, 0xcc, 0x38, 0x2f, 0x34, 0x38, 0xb2, 0xd3, 0xf8, 0x54, 0x96, + 0x13, 0x06, 0x7b, 0x8e, 0xf1, 0x50, 0xe0, 0xf8, 0x13, 0x82, 0x83, 0xbd, 0x6f, 0x85, 0x38, 0x9d, + 0x50, 0x07, 0x3e, 0x25, 0x56, 0x6e, 0x4f, 0x1a, 0x65, 0xd3, 0x4c, 0xb5, 0x4b, 0x1c, 0xf9, 0x4b, + 0xf8, 0xc5, 0x2c, 0xe4, 0x71, 0x27, 0xae, 0xf1, 0x48, 0x7e, 0x3e, 0xe6, 0xef, 0xda, 0x1c, 0xf6, + 0x9f, 0x11, 0x1c, 0x96, 0x7c, 0x37, 0x76, 0x88, 0x1f, 0x5e, 0xa1, 0xac, 0x86, 0x0e, 0xc6, 0xd2, + 0x67, 0xc2, 0xac, 0xa1, 0xca, 0xd3, 0xae, 0x72, 0x5d, 0x3e, 0x81, 0x5f, 0xd9, 0xb7, 0x2e, 0x06, + 0x63, 0x63, 0x0a, 0xd8, 0x6f, 0x20, 0x58, 0xb8, 0x4e, 0xc3, 0x5b, 0x71, 0x37, 0xf8, 0xe4, 0x58, + 0x2f, 0x4c, 0x95, 0x95, 0xba, 0xf2, 0x9c, 0x2e, 0x7f, 0x8a, 0x5d, 0x64, 0x8d, 0x83, 0x3b, 0x85, + 0x4f, 0x66, 0x81, 0x4b, 0x3a, 0xd0, 0xef, 0x20, 0x38, 0xa2, 0x82, 0x48, 0x5e, 0xe6, 0x3e, 0xb6, + 0xbf, 0xf7, 0x2e, 0xf1, 0x6a, 0x36, 0x02, 0x5d, 0x93, 0xa3, 0x3b, 0xab, 0x0d, 0x76, 0xe0, 0x76, + 0x1f, 0x8a, 0x75, 0xb4, 0x5a, 0x43, 0xf8, 0x77, 0x08, 0x66, 0xa3, 0x66, 0xec, 0x70, 0x1b, 0xa5, + 0x5e, 0x92, 0xa6, 0x19, 0x0d, 0xc4, 0x6e, 0x57, 0xce, 0x0d, 0x36, 0xa8, 0xba, 0x5e, 0xba, 0x6a, + 0x9d, 0x5b, 0x39, 0x1d, 0xc6, 0x7e, 0x89, 0x00, 0x92, 0x86, 0x32, 0x3e, 0x9d, 0xad, 0x87, 0xd2, + 0x74, 0xae, 0x4c, 0xb7, 0xa5, 0xac, 0xd5, 0xb9, 0x3e, 0xb5, 0x4a, 0x35, 0x33, 0x86, 0x78, 0xd4, + 0x58, 0x8f, 0x9a, 0xcf, 0x3f, 0x44, 0x50, 0xe0, 0x7d, 0x3c, 0x7c, 0x62, 0x18, 0x66, 0xb5, 0xcd, + 0x37, 0x4d, 0xd3, 0xbf, 0xc0, 0xa1, 0x56, 0x9b, 0x59, 0x81, 0x78, 0x1d, 0xad, 0xe2, 0x2e, 0xcc, + 0x46, 0x9d, 0xb3, 0xe1, 0xee, 0x91, 0xea, 0xac, 0x55, 0xaa, 0x19, 0x85, 0x41, 0xe4, 0xa8, 0x22, + 0x07, 0xac, 0x8e, 0xca, 0x01, 0x33, 0x2c, 0x4c, 0xe3, 0xe3, 0x59, 0x41, 0xfc, 0xff, 0x60, 0x98, + 0x33, 0x1c, 0xdd, 0x49, 0xad, 0x3a, 0x2a, 0x0f, 0x30, 0xeb, 0x7c, 0x07, 0xc1, 0xc1, 0xde, 0xe2, + 0x1a, 0x1f, 0xed, 0x89, 0x99, 0xea, 0x5d, 0xa3, 0x92, 0xb6, 0xe2, 0xb0, 0xc2, 0x5c, 0xfb, 0x24, + 0x47, 0xb1, 0x8e, 0x2f, 0x8e, 0x3c, 0x19, 0xb7, 0x65, 0xd4, 0x61, 0x8c, 0xd6, 0x92, 0xd7, 0xb1, + 0x5f, 0x21, 0x58, 0x90, 0x7c, 0xef, 0xfa, 0x94, 0x66, 0xc3, 0x9a, 0xde, 0x41, 0x60, 0xb2, 0xb4, + 0x97, 0x39, 0xfc, 0x8f, 0xe3, 0x0b, 0x63, 0xc2, 0x97, 0xb0, 0xd7, 0x42, 0x86, 0xf4, 0x0f, 0x08, + 0x0e, 0xdd, 0x8f, 0xfc, 0xfe, 0x03, 0xc2, 0xbf, 0xc1, 0xf1, 0xbf, 0x82, 0x5f, 0xca, 0xa8, 0xf3, + 0x46, 0xa9, 0x71, 0x0e, 0xe1, 0x9f, 0x23, 0x28, 0xca, 0x57, 0x15, 0x7c, 0x6a, 0xe8, 0xc1, 0x48, + 0xbf, 0xbb, 0x4c, 0xd3, 0x99, 0x45, 0x51, 0xa3, 0x9d, 0xc8, 0x4c, 0xa7, 0x42, 0x3e, 0x73, 0xe8, + 0xb7, 0x11, 0xe0, 0xf8, 0xce, 0x1c, 0xdf, 0xa2, 0xf1, 0x0b, 0x29, 0x51, 0x43, 0x1b, 0x33, 0x95, + 0x53, 0x23, 0xe7, 0xa5, 0x53, 0xe9, 0x6a, 0x66, 0x2a, 0x75, 0x63, 0xf9, 0x5f, 0x47, 0x50, 0xba, + 0x4e, 0xe3, 0x3b, 0x48, 0x86, 0x2d, 0xd3, 0x8f, 0x42, 0x95, 0xda, 0xe8, 0x89, 0x02, 0xd1, 0x59, + 0x8e, 0xe8, 0x05, 0x9c, 0x6d, 0x2a, 0x09, 0xe0, 0x7b, 0x08, 0x16, 0xef, 0xa8, 0x2e, 0x8a, 0xcf, + 0x8e, 0x92, 0x94, 0x8a, 0xe4, 0xe3, 0xe3, 0xfa, 0x28, 0xc7, 0xb5, 0xa6, 0x8d, 0x85, 0x6b, 0x5d, + 0xbc, 0xaf, 0x7c, 0x1f, 0x45, 0x97, 0xd8, 0x9e, 0x7e, 0xf6, 0xff, 0x6a, 0xb7, 0x8c, 0xb6, 0xb8, + 0x76, 0x81, 0xe3, 0xab, 0xe3, 0xb3, 0xe3, 0xe0, 0x6b, 0x88, 0x26, 0x37, 0xfe, 0x2e, 0x82, 0x43, + 0xfc, 0xad, 0x41, 0x65, 0xdc, 0x93, 0x62, 0x86, 0xbd, 0x4c, 0x8c, 0x91, 0x62, 0x44, 0xfc, 0xd1, + 0xf6, 0x05, 0x6a, 0x5d, 0xbe, 0x23, 0x7c, 0x03, 0xc1, 0x01, 0x99, 0xd4, 0xc4, 0xee, 0xae, 0x8d, + 0x32, 0xdc, 0x7e, 0x93, 0xa0, 0x70, 0xb7, 0xd5, 0xf1, 0xdc, 0xed, 0x5d, 0x04, 0x73, 0xa2, 0x9b, + 0x9f, 0x51, 0x2a, 0x28, 0xed, 0xfe, 0x4a, 0x4f, 0x8f, 0x43, 0x34, 0x83, 0xb5, 0xcf, 0x73, 0xb1, + 0xf7, 0x70, 0x23, 0x4b, 0xac, 0xe7, 0x9a, 0x41, 0xe3, 0x91, 0xe8, 0xc4, 0x3e, 0x6e, 0xd8, 0x6e, + 0x2b, 0x78, 0x4d, 0xc3, 0x99, 0x09, 0x91, 0xcd, 0x39, 0x87, 0x70, 0x08, 0xf3, 0xcc, 0x39, 0x78, + 0xe3, 0x04, 0x57, 0x7b, 0xda, 0x2c, 0x7d, 0x3d, 0x95, 0x4a, 0xa5, 0xaf, 0x11, 0x93, 0x64, 0x40, + 0x71, 0x8d, 0xc5, 0xcf, 0x67, 0x8a, 0xe5, 0x82, 0xde, 0x42, 0x70, 0x48, 0xf5, 0xf6, 0x48, 0xfc, + 0xd8, 0xbe, 0x9e, 0x85, 0x42, 0x14, 0xd5, 0x78, 0x75, 0x2c, 0x47, 0xe2, 0x70, 0x2e, 0x5f, 0xfb, + 0xe3, 0x93, 0x63, 0xe8, 0xbd, 0x27, 0xc7, 0xd0, 0xdf, 0x9f, 0x1c, 0x43, 0xaf, 0x5d, 0x1c, 0xef, + 0x3f, 0xc2, 0x86, 0x6d, 0x51, 0x27, 0x54, 0xd9, 0xff, 0x37, 0x00, 0x00, 0xff, 0xff, 0x45, 0x63, + 0x3b, 0x00, 0x09, 0x2d, 0x00, 0x00, } // Reference imports to suppress errors if they are not otherwise used. @@ -4339,6 +4393,16 @@ func (m *RevisionMetadataQuery) MarshalToSizedBuffer(dAtA []byte) (int, error) { i -= len(m.XXX_unrecognized) copy(dAtA[i:], m.XXX_unrecognized) } + if m.VersionId != nil { + i = encodeVarintApplication(dAtA, i, uint64(*m.VersionId)) + i-- + dAtA[i] = 0x30 + } + if m.SourceIndex != nil { + i = encodeVarintApplication(dAtA, i, uint64(*m.SourceIndex)) + i-- + dAtA[i] = 0x28 + } if m.Project != nil { i -= len(*m.Project) copy(dAtA[i:], *m.Project) @@ -4469,6 +4533,22 @@ func (m *ApplicationManifestQuery) MarshalToSizedBuffer(dAtA []byte) (int, error i -= len(m.XXX_unrecognized) copy(dAtA[i:], m.XXX_unrecognized) } + if len(m.Revisions) > 0 { + for iNdEx := len(m.Revisions) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Revisions[iNdEx]) + copy(dAtA[i:], m.Revisions[iNdEx]) + i = encodeVarintApplication(dAtA, i, uint64(len(m.Revisions[iNdEx]))) + i-- + dAtA[i] = 0x32 + } + } + if len(m.SourcePositions) > 0 { + for iNdEx := len(m.SourcePositions) - 1; iNdEx >= 0; iNdEx-- { + i = encodeVarintApplication(dAtA, i, uint64(m.SourcePositions[iNdEx])) + i-- + dAtA[i] = 0x28 + } + } if m.Project != nil { i -= len(*m.Project) copy(dAtA[i:], *m.Project) @@ -4948,6 +5028,22 @@ func (m *ApplicationSyncRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) i -= len(m.XXX_unrecognized) copy(dAtA[i:], m.XXX_unrecognized) } + if len(m.Revisions) > 0 { + for iNdEx := len(m.Revisions) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Revisions[iNdEx]) + copy(dAtA[i:], m.Revisions[iNdEx]) + i = encodeVarintApplication(dAtA, i, uint64(len(m.Revisions[iNdEx]))) + i-- + dAtA[i] = 0x7a + } + } + if len(m.SourcePositions) > 0 { + for iNdEx := len(m.SourcePositions) - 1; iNdEx >= 0; iNdEx-- { + i = encodeVarintApplication(dAtA, i, uint64(m.SourcePositions[iNdEx])) + i-- + dAtA[i] = 0x70 + } + } if m.Project != nil { i -= len(*m.Project) copy(dAtA[i:], *m.Project) @@ -6648,6 +6744,12 @@ func (m *RevisionMetadataQuery) Size() (n int) { l = len(*m.Project) n += 1 + l + sovApplication(uint64(l)) } + if m.SourceIndex != nil { + n += 1 + sovApplication(uint64(*m.SourceIndex)) + } + if m.VersionId != nil { + n += 1 + sovApplication(uint64(*m.VersionId)) + } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } @@ -6712,6 +6814,17 @@ func (m *ApplicationManifestQuery) Size() (n int) { l = len(*m.Project) n += 1 + l + sovApplication(uint64(l)) } + if len(m.SourcePositions) > 0 { + for _, e := range m.SourcePositions { + n += 1 + sovApplication(uint64(e)) + } + } + if len(m.Revisions) > 0 { + for _, s := range m.Revisions { + l = len(s) + n += 1 + l + sovApplication(uint64(l)) + } + } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } @@ -6965,6 +7078,17 @@ func (m *ApplicationSyncRequest) Size() (n int) { l = len(*m.Project) n += 1 + l + sovApplication(uint64(l)) } + if len(m.SourcePositions) > 0 { + for _, e := range m.SourcePositions { + n += 1 + sovApplication(uint64(e)) + } + } + if len(m.Revisions) > 0 { + for _, s := range m.Revisions { + l = len(s) + n += 1 + l + sovApplication(uint64(l)) + } + } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } @@ -8244,6 +8368,46 @@ func (m *RevisionMetadataQuery) Unmarshal(dAtA []byte) error { s := string(dAtA[iNdEx:postIndex]) m.Project = &s iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field SourceIndex", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApplication + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.SourceIndex = &v + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field VersionId", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApplication + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.VersionId = &v default: iNdEx = preIndex skippy, err := skipApplication(dAtA[iNdEx:]) @@ -8689,6 +8853,114 @@ func (m *ApplicationManifestQuery) Unmarshal(dAtA []byte) error { s := string(dAtA[iNdEx:postIndex]) m.Project = &s iNdEx = postIndex + case 5: + if wireType == 0 { + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApplication + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.SourcePositions = append(m.SourcePositions, v) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApplication + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLengthApplication + } + postIndex := iNdEx + packedLen + if postIndex < 0 { + return ErrInvalidLengthApplication + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + var elementCount int + var count int + for _, integer := range dAtA[iNdEx:postIndex] { + if integer < 128 { + count++ + } + } + elementCount = count + if elementCount != 0 && len(m.SourcePositions) == 0 { + m.SourcePositions = make([]int64, 0, elementCount) + } + for iNdEx < postIndex { + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApplication + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.SourcePositions = append(m.SourcePositions, v) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field SourcePositions", wireType) + } + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Revisions", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApplication + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApplication + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthApplication + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Revisions = append(m.Revisions, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipApplication(dAtA[iNdEx:]) @@ -10153,6 +10425,114 @@ func (m *ApplicationSyncRequest) Unmarshal(dAtA []byte) error { s := string(dAtA[iNdEx:postIndex]) m.Project = &s iNdEx = postIndex + case 14: + if wireType == 0 { + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApplication + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.SourcePositions = append(m.SourcePositions, v) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApplication + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLengthApplication + } + postIndex := iNdEx + packedLen + if postIndex < 0 { + return ErrInvalidLengthApplication + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + var elementCount int + var count int + for _, integer := range dAtA[iNdEx:postIndex] { + if integer < 128 { + count++ + } + } + elementCount = count + if elementCount != 0 && len(m.SourcePositions) == 0 { + m.SourcePositions = make([]int64, 0, elementCount) + } + for iNdEx < postIndex { + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApplication + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.SourcePositions = append(m.SourcePositions, v) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field SourcePositions", wireType) + } + case 15: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Revisions", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApplication + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApplication + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthApplication + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Revisions = append(m.Revisions, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipApplication(dAtA[iNdEx:]) diff --git a/vendor/github.com/argoproj/argo-cd/v2/pkg/apiclient/repository/repository.pb.go b/vendor/github.com/argoproj/argo-cd/v2/pkg/apiclient/repository/repository.pb.go index 5540580c21..8dbb20ce7b 100644 --- a/vendor/github.com/argoproj/argo-cd/v2/pkg/apiclient/repository/repository.pb.go +++ b/vendor/github.com/argoproj/argo-cd/v2/pkg/apiclient/repository/repository.pb.go @@ -163,12 +163,16 @@ func (m *AppInfo) GetPath() string { // RepoAppDetailsQuery contains query information for app details request type RepoAppDetailsQuery struct { - Source *v1alpha1.ApplicationSource `protobuf:"bytes,1,opt,name=source,proto3" json:"source,omitempty"` - AppName string `protobuf:"bytes,2,opt,name=appName,proto3" json:"appName,omitempty"` - AppProject string `protobuf:"bytes,3,opt,name=appProject,proto3" json:"appProject,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Source *v1alpha1.ApplicationSource `protobuf:"bytes,1,opt,name=source,proto3" json:"source,omitempty"` + AppName string `protobuf:"bytes,2,opt,name=appName,proto3" json:"appName,omitempty"` + AppProject string `protobuf:"bytes,3,opt,name=appProject,proto3" json:"appProject,omitempty"` + // source index (for multi source apps) + SourceIndex int32 `protobuf:"varint,4,opt,name=sourceIndex,proto3" json:"sourceIndex,omitempty"` + // versionId from historical data (for multi source apps) + VersionId int32 `protobuf:"varint,5,opt,name=versionId,proto3" json:"versionId,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } func (m *RepoAppDetailsQuery) Reset() { *m = RepoAppDetailsQuery{} } @@ -225,6 +229,20 @@ func (m *RepoAppDetailsQuery) GetAppProject() string { return "" } +func (m *RepoAppDetailsQuery) GetSourceIndex() int32 { + if m != nil { + return m.SourceIndex + } + return 0 +} + +func (m *RepoAppDetailsQuery) GetVersionId() int32 { + if m != nil { + return m.VersionId + } + return 0 +} + // RepoAppsResponse contains applications of specified repository type RepoAppsResponse struct { Items []*AppInfo `protobuf:"bytes,1,rep,name=items,proto3" json:"items,omitempty"` @@ -278,7 +296,9 @@ type RepoQuery struct { // Repo URL for query Repo string `protobuf:"bytes,1,opt,name=repo,proto3" json:"repo,omitempty"` // Whether to force a cache refresh on repo's connection state - ForceRefresh bool `protobuf:"varint,2,opt,name=forceRefresh,proto3" json:"forceRefresh,omitempty"` + ForceRefresh bool `protobuf:"varint,2,opt,name=forceRefresh,proto3" json:"forceRefresh,omitempty"` + // App project for query + AppProject string `protobuf:"bytes,3,opt,name=appProject,proto3" json:"appProject,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -331,6 +351,13 @@ func (m *RepoQuery) GetForceRefresh() bool { return false } +func (m *RepoQuery) GetAppProject() string { + if m != nil { + return m.AppProject + } + return "" +} + // RepoAccessQuery is a query for checking access to a repo type RepoAccessQuery struct { // The URL to the repo @@ -703,79 +730,81 @@ func init() { } var fileDescriptor_8d38260443475705 = []byte{ - // 1146 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x57, 0x5f, 0x6f, 0x1b, 0x45, - 0x10, 0xd7, 0x25, 0x8d, 0x9b, 0x4c, 0x9a, 0xd4, 0xd9, 0x84, 0x72, 0xb8, 0x69, 0x1a, 0x5d, 0x4b, - 0x15, 0xa2, 0x72, 0xd7, 0x18, 0x21, 0x50, 0x11, 0x48, 0xce, 0x1f, 0x35, 0x11, 0x11, 0x29, 0x57, - 0x85, 0x07, 0x04, 0x42, 0x9b, 0xf3, 0xc4, 0xbe, 0xf6, 0x7c, 0xb7, 0xdd, 0x5d, 0x1b, 0xac, 0xaa, - 0x2f, 0x3c, 0x21, 0xc1, 0x0b, 0x42, 0x48, 0xbc, 0x21, 0x24, 0x24, 0x1e, 0xf8, 0x02, 0x7c, 0x04, - 0x1e, 0x91, 0xf8, 0x02, 0x28, 0xe2, 0x73, 0x20, 0xb4, 0xbb, 0xe7, 0xbb, 0x73, 0x62, 0x3b, 0xa9, - 0x08, 0x79, 0xdb, 0xf9, 0xcd, 0xdc, 0xcc, 0x6f, 0x7f, 0x3b, 0x3b, 0x6b, 0x83, 0x23, 0x90, 0x77, - 0x90, 0x7b, 0x1c, 0x59, 0x22, 0x42, 0x99, 0xf0, 0x6e, 0x61, 0xe9, 0x32, 0x9e, 0xc8, 0x84, 0x40, - 0x8e, 0x54, 0x16, 0x1b, 0x49, 0xd2, 0x88, 0xd0, 0xa3, 0x2c, 0xf4, 0x68, 0x1c, 0x27, 0x92, 0xca, - 0x30, 0x89, 0x85, 0x89, 0xac, 0xec, 0x36, 0x42, 0xd9, 0x6c, 0x1f, 0xb8, 0x41, 0xd2, 0xf2, 0x28, - 0x6f, 0x24, 0x8c, 0x27, 0x8f, 0xf5, 0xe2, 0xf5, 0xa0, 0xee, 0x75, 0xaa, 0x1e, 0x7b, 0xd2, 0x50, - 0x5f, 0x0a, 0x8f, 0x32, 0x16, 0x85, 0x81, 0xfe, 0xd6, 0xeb, 0xac, 0xd1, 0x88, 0x35, 0xe9, 0x9a, - 0xd7, 0xc0, 0x18, 0x39, 0x95, 0x58, 0x4f, 0xb3, 0x6d, 0x9d, 0x92, 0x4d, 0xd3, 0x3a, 0x95, 0xbe, - 0xd3, 0x85, 0x19, 0x1f, 0x59, 0x52, 0x63, 0x4c, 0x7c, 0xd8, 0x46, 0xde, 0x25, 0x04, 0x2e, 0xa9, - 0x20, 0xdb, 0x5a, 0xb6, 0x56, 0xa6, 0x7c, 0xbd, 0x26, 0x15, 0x98, 0xe4, 0xd8, 0x09, 0x45, 0x98, - 0xc4, 0xf6, 0x98, 0xc6, 0x33, 0x9b, 0xd8, 0x70, 0x99, 0x32, 0xf6, 0x01, 0x6d, 0xa1, 0x3d, 0xae, - 0x5d, 0x3d, 0x93, 0x2c, 0x01, 0x50, 0xc6, 0x1e, 0xf2, 0xe4, 0x31, 0x06, 0xd2, 0xbe, 0xa4, 0x9d, - 0x05, 0xc4, 0x59, 0x83, 0xcb, 0x35, 0xc6, 0x76, 0xe2, 0xc3, 0x44, 0x15, 0x95, 0x5d, 0x86, 0xbd, - 0xa2, 0x6a, 0xad, 0x30, 0x46, 0x65, 0x33, 0x2d, 0xa8, 0xd7, 0xce, 0x6f, 0x16, 0xcc, 0xa7, 0x74, - 0x37, 0x51, 0xd2, 0x30, 0x4a, 0x49, 0x37, 0xa0, 0x24, 0x92, 0x36, 0x0f, 0x4c, 0x86, 0xe9, 0xea, - 0x9e, 0x9b, 0xab, 0xe3, 0xf6, 0xd4, 0xd1, 0x8b, 0xcf, 0x82, 0xba, 0xdb, 0xa9, 0xba, 0xec, 0x49, - 0xc3, 0x55, 0x5a, 0xbb, 0x05, 0xad, 0xdd, 0x9e, 0xd6, 0x6e, 0x2d, 0x07, 0x1f, 0xe9, 0xb4, 0x7e, - 0x9a, 0xbe, 0xb8, 0xdb, 0xb1, 0x51, 0xbb, 0x1d, 0x3f, 0xb1, 0xdb, 0x77, 0xa1, 0xdc, 0x13, 0xda, - 0x47, 0xc1, 0x92, 0x58, 0x20, 0x79, 0x0d, 0x26, 0x42, 0x89, 0x2d, 0x61, 0x5b, 0xcb, 0xe3, 0x2b, - 0xd3, 0xd5, 0x79, 0xb7, 0x70, 0x3c, 0xa9, 0x34, 0xbe, 0x89, 0x70, 0x36, 0x60, 0x4a, 0x7d, 0x3e, - 0xfc, 0x8c, 0x1c, 0xb8, 0x72, 0x98, 0x28, 0xaa, 0x78, 0xc8, 0x51, 0x18, 0xd9, 0x26, 0xfd, 0x3e, - 0xcc, 0xf9, 0x69, 0x02, 0xae, 0x6a, 0x12, 0x41, 0x80, 0x62, 0xf4, 0x79, 0xb7, 0x05, 0xf2, 0x38, - 0xdf, 0x66, 0x66, 0x2b, 0x1f, 0xa3, 0x42, 0x7c, 0x9e, 0xf0, 0x7a, 0xba, 0xcb, 0xcc, 0x26, 0xb7, - 0x61, 0x46, 0x88, 0xe6, 0x43, 0x1e, 0x76, 0xa8, 0xc4, 0xf7, 0xb1, 0x9b, 0x1e, 0x7a, 0x3f, 0xa8, - 0x32, 0x84, 0xb1, 0xc0, 0xa0, 0xcd, 0xd1, 0x9e, 0xd0, 0x2c, 0x33, 0x9b, 0xdc, 0x85, 0x39, 0x19, - 0x89, 0x8d, 0x28, 0xc4, 0x58, 0x6e, 0x20, 0x97, 0x9b, 0x54, 0x52, 0xbb, 0xa4, 0xb3, 0x9c, 0x74, - 0x90, 0x55, 0x28, 0xf7, 0x81, 0xaa, 0xe4, 0x65, 0x1d, 0x7c, 0x02, 0xcf, 0x5a, 0x6c, 0xaa, 0xbf, - 0xc5, 0xf4, 0x1e, 0xc1, 0x60, 0x7a, 0x7f, 0x8b, 0x30, 0x85, 0x31, 0x3d, 0x88, 0x70, 0x2f, 0x08, - 0xed, 0x69, 0x4d, 0x2f, 0x07, 0xc8, 0x3d, 0x98, 0x37, 0x9d, 0x55, 0x53, 0x27, 0x9b, 0xed, 0xf3, - 0x8a, 0x4e, 0x30, 0xc8, 0x45, 0x96, 0x61, 0x3a, 0x83, 0x77, 0x36, 0xed, 0x99, 0x65, 0x6b, 0x65, - 0xdc, 0x2f, 0x42, 0xe4, 0x6d, 0x78, 0x39, 0x37, 0x63, 0x21, 0x69, 0x14, 0xe9, 0xd6, 0xdb, 0xd9, - 0xb4, 0x67, 0x75, 0xf4, 0x30, 0x37, 0x79, 0x0f, 0x2a, 0x99, 0x6b, 0x2b, 0x96, 0xc8, 0x19, 0x0f, - 0x05, 0xae, 0x53, 0x81, 0xfb, 0x3c, 0xb2, 0xaf, 0x6a, 0x52, 0x23, 0x22, 0xc8, 0x02, 0x4c, 0x30, - 0x9e, 0x7c, 0xd1, 0xb5, 0xcb, 0x3a, 0xd4, 0x18, 0xaa, 0xc7, 0x59, 0xda, 0xc6, 0x73, 0xa6, 0xc7, - 0x53, 0x93, 0x54, 0x61, 0xa1, 0x11, 0xb0, 0x47, 0xc8, 0x3b, 0x61, 0x80, 0xb5, 0x20, 0x48, 0xda, - 0xb1, 0xd6, 0x9c, 0xe8, 0xb0, 0x81, 0x3e, 0xe2, 0x02, 0xd1, 0x3d, 0xb8, 0x2d, 0x25, 0x5b, 0xa7, - 0x22, 0x0c, 0x6a, 0x6d, 0xd9, 0xb4, 0xe7, 0xb5, 0xb0, 0x03, 0x3c, 0xce, 0x2c, 0x5c, 0x51, 0x2d, - 0xda, 0xbb, 0x23, 0xce, 0x2f, 0x16, 0xcc, 0x29, 0x60, 0x83, 0x23, 0x95, 0xe8, 0xe3, 0xd3, 0x36, - 0x0a, 0x49, 0x3e, 0x29, 0x74, 0xed, 0x74, 0x75, 0xfb, 0xbf, 0x5d, 0x77, 0x3f, 0xbb, 0x75, 0x69, - 0xff, 0x5f, 0x83, 0x52, 0x9b, 0x09, 0xe4, 0x32, 0xbd, 0x45, 0xa9, 0xa5, 0x7a, 0x23, 0xe0, 0x58, - 0x17, 0x7b, 0x71, 0xd4, 0xd5, 0xcd, 0x3f, 0xe9, 0xe7, 0x80, 0xf3, 0xd4, 0x10, 0xdd, 0x67, 0xf5, - 0x8b, 0x22, 0x5a, 0xfd, 0x67, 0xd6, 0xd4, 0x34, 0x60, 0x2a, 0x3e, 0xf9, 0xc6, 0x82, 0x4b, 0xbb, - 0xa1, 0x90, 0xe4, 0xa5, 0xe2, 0x40, 0xc9, 0xc6, 0x47, 0x65, 0xf7, 0xbc, 0x58, 0xa8, 0x22, 0xce, - 0xcd, 0x2f, 0xff, 0xfc, 0xfb, 0xbb, 0xb1, 0x6b, 0x64, 0x41, 0x3f, 0x7b, 0x9d, 0xb5, 0xfc, 0x8d, - 0x09, 0x51, 0x7c, 0x35, 0x66, 0x91, 0xaf, 0x2d, 0x18, 0x7f, 0x80, 0x43, 0xd9, 0x9c, 0x9b, 0x26, - 0xce, 0x2d, 0xcd, 0xe4, 0x06, 0xb9, 0x3e, 0x88, 0x89, 0xf7, 0x4c, 0x59, 0xcf, 0xc9, 0xf7, 0x16, - 0x94, 0x15, 0x6f, 0xbf, 0xe0, 0xbb, 0x18, 0xa1, 0x16, 0x47, 0x09, 0x45, 0x3e, 0x85, 0x49, 0x43, - 0xeb, 0x70, 0x28, 0x9d, 0x72, 0x3f, 0x7c, 0x28, 0x9c, 0x15, 0x9d, 0xd2, 0x21, 0xcb, 0x23, 0x76, - 0xec, 0x71, 0x95, 0xb2, 0x65, 0xd2, 0xab, 0xe7, 0x87, 0xbc, 0x72, 0x3c, 0x7d, 0xf6, 0xfa, 0x57, - 0x16, 0x07, 0xb9, 0xb2, 0xbb, 0x78, 0xa6, 0x72, 0x54, 0x95, 0xf8, 0xd6, 0x82, 0x99, 0x07, 0x28, - 0xf3, 0x77, 0x9a, 0xdc, 0x1c, 0x90, 0xb9, 0xf8, 0x86, 0x57, 0x9c, 0xe1, 0x01, 0x19, 0x81, 0x77, - 0x34, 0x81, 0x37, 0x9d, 0x7b, 0x83, 0x09, 0x98, 0x47, 0x5a, 0xe7, 0xd9, 0xf7, 0x77, 0x35, 0x95, - 0xba, 0xc9, 0x70, 0xdf, 0x5a, 0x25, 0x1d, 0x4d, 0x69, 0x1b, 0xa3, 0xd6, 0x46, 0x93, 0x72, 0x39, - 0x54, 0xe6, 0xa5, 0x22, 0x9c, 0x87, 0x67, 0x24, 0x5c, 0x4d, 0x62, 0x85, 0xdc, 0x19, 0xa5, 0x42, - 0x13, 0xa3, 0x56, 0x60, 0xca, 0xfc, 0x60, 0x41, 0xc9, 0x4c, 0x2f, 0x72, 0xe3, 0x78, 0xc5, 0xbe, - 0xa9, 0x76, 0x8e, 0x57, 0xe1, 0x55, 0xcd, 0x71, 0xd1, 0x19, 0xd8, 0x6b, 0xf7, 0xf5, 0xf0, 0x50, - 0x57, 0xf3, 0x47, 0x0b, 0xca, 0x3d, 0x0a, 0xbd, 0x6f, 0x2f, 0x8e, 0xa4, 0x73, 0x3a, 0x49, 0xf2, - 0xb3, 0x05, 0x25, 0x33, 0x51, 0x4f, 0xf2, 0xea, 0x9b, 0xb4, 0xe7, 0xc8, 0x6b, 0xcd, 0x1c, 0x70, - 0x65, 0x44, 0x9b, 0x6b, 0x2a, 0xcf, 0x73, 0x21, 0x7f, 0xb5, 0xa0, 0xdc, 0xa3, 0x33, 0x5c, 0xc8, - 0xff, 0x8b, 0xb0, 0xfb, 0x62, 0x84, 0x09, 0x85, 0xd2, 0x26, 0x46, 0x28, 0x71, 0xd8, 0x15, 0xb0, - 0x8f, 0xc3, 0x59, 0xf3, 0xdf, 0x31, 0x33, 0x76, 0x75, 0xd4, 0x8c, 0x55, 0x82, 0x34, 0xa1, 0x6c, - 0x4a, 0x14, 0xf4, 0x78, 0xe1, 0x62, 0xb7, 0xce, 0x50, 0x8c, 0x3c, 0x83, 0xd9, 0x8f, 0x68, 0x14, - 0x2a, 0x65, 0xcd, 0xef, 0x5a, 0x72, 0xfd, 0xc4, 0x24, 0xc9, 0x7f, 0xef, 0x8e, 0xa8, 0x56, 0xd5, - 0xd5, 0xee, 0x3a, 0xb7, 0x47, 0xdd, 0xeb, 0x4e, 0x5a, 0xca, 0x28, 0xb9, 0xbe, 0xf5, 0xfb, 0xd1, - 0x92, 0xf5, 0xc7, 0xd1, 0x92, 0xf5, 0xd7, 0xd1, 0x92, 0xf5, 0xf1, 0x5b, 0x67, 0xfb, 0x87, 0x17, - 0xe8, 0x1f, 0xa6, 0x85, 0xff, 0x62, 0x07, 0x25, 0xfd, 0x67, 0xec, 0x8d, 0x7f, 0x03, 0x00, 0x00, - 0xff, 0xff, 0x52, 0xa9, 0xe9, 0x17, 0x71, 0x0e, 0x00, 0x00, + // 1178 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x57, 0x5d, 0x6f, 0x1b, 0x45, + 0x17, 0xd6, 0x26, 0x8d, 0x93, 0x9c, 0x7c, 0xd4, 0x99, 0xe4, 0xed, 0xbb, 0xb8, 0x6e, 0x1a, 0x6d, + 0x4b, 0x15, 0xa2, 0xb2, 0x6e, 0x8c, 0x10, 0xa8, 0x08, 0x24, 0xe7, 0x43, 0x4d, 0x44, 0x44, 0xca, + 0x56, 0xe1, 0x02, 0x81, 0xd0, 0x64, 0x7d, 0x62, 0x6f, 0xbb, 0xde, 0x9d, 0xce, 0x8c, 0x4d, 0xad, + 0xaa, 0x37, 0x5c, 0x21, 0xc1, 0x0d, 0x42, 0x48, 0xdc, 0x21, 0x24, 0x24, 0x2e, 0xf8, 0x23, 0x5c, + 0x22, 0xf1, 0x07, 0x50, 0xc4, 0x8f, 0xe0, 0x0a, 0xa1, 0x99, 0x59, 0xef, 0xae, 0x13, 0xdb, 0x49, + 0x45, 0xc8, 0xdd, 0xcc, 0x73, 0xce, 0x9e, 0xf3, 0xcc, 0xb3, 0xe7, 0x9c, 0x9d, 0x05, 0x47, 0x20, + 0xef, 0x20, 0xaf, 0x70, 0x64, 0xb1, 0x08, 0x64, 0xcc, 0xbb, 0xb9, 0xa5, 0xcb, 0x78, 0x2c, 0x63, + 0x02, 0x19, 0x52, 0x2a, 0x37, 0xe2, 0xb8, 0x11, 0x62, 0x85, 0xb2, 0xa0, 0x42, 0xa3, 0x28, 0x96, + 0x54, 0x06, 0x71, 0x24, 0x8c, 0x67, 0x69, 0xaf, 0x11, 0xc8, 0x66, 0xfb, 0xd0, 0xf5, 0xe3, 0x56, + 0x85, 0xf2, 0x46, 0xcc, 0x78, 0xfc, 0x58, 0x2f, 0x5e, 0xf7, 0xeb, 0x95, 0x4e, 0xb5, 0xc2, 0x9e, + 0x34, 0xd4, 0x93, 0xa2, 0x42, 0x19, 0x0b, 0x03, 0x5f, 0x3f, 0x5b, 0xe9, 0xac, 0xd3, 0x90, 0x35, + 0xe9, 0x7a, 0xa5, 0x81, 0x11, 0x72, 0x2a, 0xb1, 0x9e, 0x44, 0xdb, 0x3e, 0x23, 0x9a, 0xa6, 0x75, + 0x26, 0x7d, 0xa7, 0x0b, 0x73, 0x1e, 0xb2, 0xb8, 0xc6, 0x98, 0xf8, 0xb0, 0x8d, 0xbc, 0x4b, 0x08, + 0x5c, 0x51, 0x4e, 0xb6, 0xb5, 0x62, 0xad, 0x4e, 0x7b, 0x7a, 0x4d, 0x4a, 0x30, 0xc5, 0xb1, 0x13, + 0x88, 0x20, 0x8e, 0xec, 0x31, 0x8d, 0xa7, 0x7b, 0x62, 0xc3, 0x24, 0x65, 0xec, 0x03, 0xda, 0x42, + 0x7b, 0x5c, 0x9b, 0x7a, 0x5b, 0xb2, 0x0c, 0x40, 0x19, 0x7b, 0xc8, 0xe3, 0xc7, 0xe8, 0x4b, 0xfb, + 0x8a, 0x36, 0xe6, 0x10, 0x67, 0x1d, 0x26, 0x6b, 0x8c, 0xed, 0x46, 0x47, 0xb1, 0x4a, 0x2a, 0xbb, + 0x0c, 0x7b, 0x49, 0xd5, 0x5a, 0x61, 0x8c, 0xca, 0x66, 0x92, 0x50, 0xaf, 0x9d, 0xbf, 0x2c, 0x58, + 0x4c, 0xe8, 0x6e, 0xa1, 0xa4, 0x41, 0x98, 0x90, 0x6e, 0x40, 0x41, 0xc4, 0x6d, 0xee, 0x9b, 0x08, + 0x33, 0xd5, 0x7d, 0x37, 0x53, 0xc7, 0xed, 0xa9, 0xa3, 0x17, 0x9f, 0xf9, 0x75, 0xb7, 0x53, 0x75, + 0xd9, 0x93, 0x86, 0xab, 0xb4, 0x76, 0x73, 0x5a, 0xbb, 0x3d, 0xad, 0xdd, 0x5a, 0x06, 0x3e, 0xd2, + 0x61, 0xbd, 0x24, 0x7c, 0xfe, 0xb4, 0x63, 0xa3, 0x4e, 0x3b, 0x7e, 0xf2, 0xb4, 0x64, 0x05, 0x66, + 0x4c, 0x8c, 0xdd, 0xa8, 0x8e, 0xcf, 0xb4, 0x1c, 0x13, 0x5e, 0x1e, 0x22, 0x65, 0x98, 0xee, 0x20, + 0x57, 0xa2, 0xee, 0xd6, 0xed, 0x09, 0x6d, 0xcf, 0x00, 0xe7, 0x5d, 0x28, 0xf6, 0x5e, 0x94, 0x87, + 0x82, 0xc5, 0x91, 0x40, 0xf2, 0x1a, 0x4c, 0x04, 0x12, 0x5b, 0xc2, 0xb6, 0x56, 0xc6, 0x57, 0x67, + 0xaa, 0x8b, 0x6e, 0xee, 0xf5, 0x26, 0xd2, 0x7a, 0xc6, 0xc3, 0xf1, 0x61, 0x5a, 0x3d, 0x3e, 0xfc, + 0x1d, 0x3b, 0x30, 0x7b, 0x14, 0xab, 0xa3, 0xe2, 0x11, 0x47, 0x61, 0x64, 0x9f, 0xf2, 0xfa, 0xb0, + 0xb3, 0xce, 0xe8, 0xfc, 0x38, 0x01, 0x57, 0x35, 0x49, 0xdf, 0x47, 0x31, 0xba, 0x9e, 0xda, 0x02, + 0x79, 0x94, 0xc9, 0x98, 0xee, 0x95, 0x8d, 0x51, 0x21, 0x3e, 0x8f, 0x79, 0x3d, 0xc9, 0x90, 0xee, + 0xc9, 0x6d, 0x98, 0x13, 0xa2, 0xf9, 0x90, 0x07, 0x1d, 0x2a, 0xf1, 0x7d, 0xec, 0x26, 0x45, 0xd5, + 0x0f, 0xaa, 0x08, 0x41, 0x24, 0xd0, 0x6f, 0x73, 0xd4, 0x32, 0x4e, 0x79, 0xe9, 0x9e, 0xdc, 0x85, + 0x05, 0x19, 0x8a, 0xcd, 0x30, 0xc0, 0x48, 0x6e, 0x22, 0x97, 0x5b, 0x54, 0x52, 0xbb, 0xa0, 0xa3, + 0x9c, 0x36, 0x90, 0x35, 0x28, 0xf6, 0x81, 0x2a, 0xe5, 0xa4, 0x76, 0x3e, 0x85, 0xa7, 0x25, 0x3c, + 0xdd, 0x5f, 0xc2, 0xfa, 0x8c, 0x60, 0x30, 0x7d, 0xbe, 0x32, 0x4c, 0x63, 0x44, 0x0f, 0x43, 0xdc, + 0xf7, 0x03, 0x7b, 0x46, 0xd3, 0xcb, 0x00, 0x72, 0x0f, 0x16, 0x4d, 0xe5, 0xd6, 0x94, 0xaa, 0xe9, + 0x39, 0x67, 0x75, 0x80, 0x41, 0x26, 0x55, 0x57, 0x29, 0xbc, 0xbb, 0x65, 0xcf, 0xad, 0x58, 0xab, + 0xe3, 0x5e, 0x1e, 0x22, 0x6f, 0xc3, 0xff, 0xb3, 0x6d, 0x24, 0x24, 0x0d, 0x43, 0x5d, 0xda, 0xbb, + 0x5b, 0xf6, 0xbc, 0xf6, 0x1e, 0x66, 0x26, 0xef, 0x41, 0x29, 0x35, 0x6d, 0x47, 0x12, 0x39, 0xe3, + 0x81, 0xc0, 0x0d, 0x2a, 0xf0, 0x80, 0x87, 0xf6, 0x55, 0x4d, 0x6a, 0x84, 0x07, 0x59, 0x82, 0x09, + 0xc6, 0xe3, 0x67, 0x5d, 0xbb, 0xa8, 0x5d, 0xcd, 0x46, 0xf5, 0x10, 0x4b, 0x4a, 0x68, 0xc1, 0xf4, + 0x50, 0xb2, 0x25, 0x55, 0x58, 0x6a, 0xf8, 0xec, 0x11, 0xf2, 0x4e, 0xe0, 0x63, 0xcd, 0xf7, 0xe3, + 0x76, 0xa4, 0x35, 0x27, 0xda, 0x6d, 0xa0, 0x8d, 0xb8, 0x40, 0x74, 0x8d, 0xee, 0x48, 0xc9, 0x36, + 0xa8, 0x08, 0xfc, 0x5a, 0x5b, 0x36, 0xed, 0x45, 0x2d, 0xec, 0x00, 0x8b, 0x33, 0x0f, 0xb3, 0xaa, + 0x44, 0x7b, 0x3d, 0xe4, 0xfc, 0x6c, 0xc1, 0x82, 0x02, 0x36, 0x39, 0x52, 0x89, 0x1e, 0x3e, 0x6d, + 0xa3, 0x90, 0xe4, 0x93, 0x5c, 0xd5, 0xce, 0x54, 0x77, 0xfe, 0xdd, 0x38, 0xf1, 0xd2, 0xae, 0x4c, + 0xea, 0xff, 0x1a, 0x14, 0xda, 0x4c, 0x20, 0x97, 0x49, 0x97, 0x25, 0x3b, 0x55, 0x1b, 0x3e, 0xc7, + 0xba, 0xd8, 0x8f, 0xc2, 0xae, 0x2e, 0xfe, 0x29, 0x2f, 0x03, 0x9c, 0xa7, 0x86, 0xe8, 0x01, 0xab, + 0x5f, 0x16, 0xd1, 0xea, 0xdf, 0xf3, 0x26, 0xa7, 0x01, 0x13, 0xf1, 0xc9, 0xd7, 0x16, 0x5c, 0xd9, + 0x0b, 0x84, 0x24, 0xff, 0xcb, 0x0f, 0x9c, 0x74, 0xbc, 0x94, 0xf6, 0x2e, 0x8a, 0x85, 0x4a, 0xe2, + 0xdc, 0xfc, 0xe2, 0xf7, 0x3f, 0xbf, 0x1d, 0xbb, 0x46, 0x96, 0xf4, 0x67, 0xb5, 0xb3, 0x9e, 0x7d, + 0xc3, 0x02, 0x14, 0x5f, 0x8e, 0x59, 0xe4, 0x2b, 0x0b, 0xc6, 0x1f, 0xe0, 0x50, 0x36, 0x17, 0xa6, + 0x89, 0x73, 0x4b, 0x33, 0xb9, 0x41, 0xae, 0x0f, 0x62, 0x52, 0x79, 0xae, 0x76, 0x2f, 0xc8, 0x77, + 0x16, 0x14, 0x15, 0x6f, 0x2f, 0x67, 0xbb, 0x1c, 0xa1, 0xca, 0xa3, 0x84, 0x22, 0x9f, 0xc2, 0x94, + 0xa1, 0x75, 0x34, 0x94, 0x4e, 0xb1, 0x1f, 0x3e, 0x12, 0xce, 0xaa, 0x0e, 0xe9, 0x90, 0x95, 0x11, + 0x27, 0xae, 0x70, 0x15, 0xb2, 0x65, 0xc2, 0xab, 0xcf, 0x13, 0x79, 0xe5, 0x64, 0xf8, 0xf4, 0x76, + 0x51, 0x2a, 0x0f, 0x32, 0xa5, 0xbd, 0x78, 0xae, 0x74, 0x54, 0xa5, 0xf8, 0xc6, 0x82, 0xb9, 0x07, + 0x28, 0xb3, 0x7b, 0x00, 0xb9, 0x39, 0x20, 0x72, 0xfe, 0x8e, 0x50, 0x72, 0x86, 0x3b, 0xa4, 0x04, + 0xde, 0xd1, 0x04, 0xde, 0x74, 0xee, 0x0d, 0x26, 0x60, 0xbe, 0xd6, 0x3a, 0xce, 0x81, 0xb7, 0xa7, + 0xa9, 0xd4, 0x4d, 0x84, 0xfb, 0xd6, 0x1a, 0xe9, 0x68, 0x4a, 0x3b, 0x18, 0xb6, 0x36, 0x9b, 0x94, + 0xcb, 0xa1, 0x32, 0x2f, 0xe7, 0xe1, 0xcc, 0x3d, 0x25, 0xe1, 0x6a, 0x12, 0xab, 0xe4, 0xce, 0x28, + 0x15, 0x9a, 0x18, 0xb6, 0x7c, 0x93, 0xe6, 0x7b, 0x0b, 0x0a, 0x66, 0x7a, 0x91, 0x1b, 0x27, 0x33, + 0xf6, 0x4d, 0xb5, 0x0b, 0x6c, 0x85, 0x57, 0x35, 0xc7, 0xb2, 0x33, 0xb0, 0xd6, 0xee, 0xeb, 0xe1, + 0xa1, 0x5a, 0xf3, 0x07, 0x0b, 0x8a, 0x3d, 0x0a, 0xbd, 0x67, 0x2f, 0x8f, 0xa4, 0x73, 0x36, 0x49, + 0xf2, 0x93, 0x05, 0x05, 0x33, 0x51, 0x4f, 0xf3, 0xea, 0x9b, 0xb4, 0x17, 0xc8, 0x6b, 0xdd, 0xbc, + 0xe0, 0xd2, 0x88, 0x32, 0xd7, 0x54, 0x5e, 0x64, 0x42, 0xfe, 0x62, 0x41, 0xb1, 0x47, 0x67, 0xb8, + 0x90, 0xff, 0x15, 0x61, 0xf7, 0xe5, 0x08, 0x13, 0x0a, 0x85, 0x2d, 0x0c, 0x51, 0xe2, 0xb0, 0x16, + 0xb0, 0x4f, 0xc2, 0x69, 0xf1, 0xdf, 0x31, 0x33, 0x76, 0x6d, 0xd4, 0x8c, 0x55, 0x82, 0x34, 0xa1, + 0x68, 0x52, 0xe4, 0xf4, 0x78, 0xe9, 0x64, 0xb7, 0xce, 0x91, 0x8c, 0x3c, 0x87, 0xf9, 0x8f, 0x68, + 0x18, 0x28, 0x65, 0xcd, 0xbd, 0x96, 0x5c, 0x3f, 0x35, 0x49, 0xb2, 0xfb, 0xee, 0x88, 0x6c, 0x55, + 0x9d, 0xed, 0xae, 0x73, 0x7b, 0x54, 0x5f, 0x77, 0x92, 0x54, 0x46, 0xc9, 0x8d, 0xed, 0x5f, 0x8f, + 0x97, 0xad, 0xdf, 0x8e, 0x97, 0xad, 0x3f, 0x8e, 0x97, 0xad, 0x8f, 0xdf, 0x3a, 0xdf, 0x1f, 0xa4, + 0xaf, 0x2f, 0xa6, 0xb9, 0x7f, 0xbd, 0xc3, 0x82, 0xfe, 0xd9, 0x7b, 0xe3, 0x9f, 0x00, 0x00, 0x00, + 0xff, 0xff, 0x56, 0xc6, 0x8e, 0x59, 0xd1, 0x0e, 0x00, 0x00, } // Reference imports to suppress errors if they are not otherwise used. @@ -1476,6 +1505,16 @@ func (m *RepoAppDetailsQuery) MarshalToSizedBuffer(dAtA []byte) (int, error) { i -= len(m.XXX_unrecognized) copy(dAtA[i:], m.XXX_unrecognized) } + if m.VersionId != 0 { + i = encodeVarintRepository(dAtA, i, uint64(m.VersionId)) + i-- + dAtA[i] = 0x28 + } + if m.SourceIndex != 0 { + i = encodeVarintRepository(dAtA, i, uint64(m.SourceIndex)) + i-- + dAtA[i] = 0x20 + } if len(m.AppProject) > 0 { i -= len(m.AppProject) copy(dAtA[i:], m.AppProject) @@ -1570,6 +1609,13 @@ func (m *RepoQuery) MarshalToSizedBuffer(dAtA []byte) (int, error) { i -= len(m.XXX_unrecognized) copy(dAtA[i:], m.XXX_unrecognized) } + if len(m.AppProject) > 0 { + i -= len(m.AppProject) + copy(dAtA[i:], m.AppProject) + i = encodeVarintRepository(dAtA, i, uint64(len(m.AppProject))) + i-- + dAtA[i] = 0x1a + } if m.ForceRefresh { i-- if m.ForceRefresh { @@ -1958,6 +2004,12 @@ func (m *RepoAppDetailsQuery) Size() (n int) { if l > 0 { n += 1 + l + sovRepository(uint64(l)) } + if m.SourceIndex != 0 { + n += 1 + sovRepository(uint64(m.SourceIndex)) + } + if m.VersionId != 0 { + n += 1 + sovRepository(uint64(m.VersionId)) + } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } @@ -1995,6 +2047,10 @@ func (m *RepoQuery) Size() (n int) { if m.ForceRefresh { n += 2 } + l = len(m.AppProject) + if l > 0 { + n += 1 + l + sovRepository(uint64(l)) + } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } @@ -2559,6 +2615,44 @@ func (m *RepoAppDetailsQuery) Unmarshal(dAtA []byte) error { } m.AppProject = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field SourceIndex", wireType) + } + m.SourceIndex = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRepository + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.SourceIndex |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field VersionId", wireType) + } + m.VersionId = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRepository + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.VersionId |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } default: iNdEx = preIndex skippy, err := skipRepository(dAtA[iNdEx:]) @@ -2747,6 +2841,38 @@ func (m *RepoQuery) Unmarshal(dAtA []byte) error { } } m.ForceRefresh = bool(v != 0) + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AppProject", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRepository + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRepository + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthRepository + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AppProject = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipRepository(dAtA[iNdEx:]) diff --git a/vendor/github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1/app_project_types.go b/vendor/github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1/app_project_types.go index 81f95ab624..5baa9ce165 100644 --- a/vendor/github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1/app_project_types.go +++ b/vendor/github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1/app_project_types.go @@ -112,7 +112,6 @@ func (p *AppProject) GetJWTToken(roleName string, issuedAt int64, id string) (*J return &token, i, nil } } - } if issuedAt != -1 { @@ -144,10 +143,10 @@ func (p AppProject) RemoveJWTToken(roleIndex int, issuedAt int64, id string) err } if err1 == nil || err2 == nil { - //If we find this token from either places, we can say there are no error + // If we find this token from either places, we can say there are no error return nil } else { - //If we could not locate this taken from either places, we can return any of the errors + // If we could not locate this taken from either places, we can return any of the errors return err2 } } @@ -428,7 +427,7 @@ func (proj AppProject) IsDestinationPermitted(dst ApplicationDestination, projec if destinationMatched && proj.Spec.PermitOnlyProjectScopedClusters { clusters, err := projectClusters(proj.Name) if err != nil { - return false, fmt.Errorf("could not retrieve project clusters: %s", err) + return false, fmt.Errorf("could not retrieve project clusters: %w", err) } for _, cluster := range clusters { @@ -563,5 +562,5 @@ func (p AppProject) IsAppNamespacePermitted(app *Application, controllerNs strin return true } - return glob.MatchStringInList(p.Spec.SourceNamespaces, app.Namespace, false) + return glob.MatchStringInList(p.Spec.SourceNamespaces, app.Namespace, glob.REGEXP) } diff --git a/vendor/github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1/application_defaults.go b/vendor/github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1/application_defaults.go index 2bc9b1bd0d..ad8112af8c 100644 --- a/vendor/github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1/application_defaults.go +++ b/vendor/github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1/application_defaults.go @@ -9,6 +9,9 @@ const ( // ResourcesFinalizerName is the finalizer value which we inject to finalize deletion of an application ResourcesFinalizerName string = "resources-finalizer.argocd.argoproj.io" + // PostDeleteFinalizerName is the finalizer that controls post-delete hooks execution + PostDeleteFinalizerName string = "post-delete-finalizer.argocd.argoproj.io" + // ForegroundPropagationPolicyFinalizer is the finalizer we inject to delete application with foreground propagation policy ForegroundPropagationPolicyFinalizer string = "resources-finalizer.argocd.argoproj.io/foreground" diff --git a/vendor/github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1/applicationset_types.go b/vendor/github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1/applicationset_types.go index 7a7cc971f9..6c2b629dfd 100644 --- a/vendor/github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1/applicationset_types.go +++ b/vendor/github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1/applicationset_types.go @@ -65,6 +65,7 @@ type ApplicationSetSpec struct { // ApplyNestedSelectors enables selectors defined within the generators of two level-nested matrix or merge generators ApplyNestedSelectors bool `json:"applyNestedSelectors,omitempty" protobuf:"bytes,8,name=applyNestedSelectors"` IgnoreApplicationDifferences ApplicationSetIgnoreDifferences `json:"ignoreApplicationDifferences,omitempty" protobuf:"bytes,9,name=ignoreApplicationDifferences"` + TemplatePatch *string `json:"templatePatch,omitempty" protobuf:"bytes,10,name=templatePatch"` } type ApplicationPreservedFields struct { @@ -229,7 +230,7 @@ type ApplicationSetTerminalGenerator struct { SCMProvider *SCMProviderGenerator `json:"scmProvider,omitempty" protobuf:"bytes,4,name=scmProvider"` ClusterDecisionResource *DuckTypeGenerator `json:"clusterDecisionResource,omitempty" protobuf:"bytes,5,name=clusterDecisionResource"` PullRequest *PullRequestGenerator `json:"pullRequest,omitempty" protobuf:"bytes,6,name=pullRequest"` - Plugin *PluginGenerator `json:"plugin,omitempty" protobuf:"bytes,7,name=pullRequest"` + Plugin *PluginGenerator `json:"plugin,omitempty" protobuf:"bytes,7,name=plugin"` // Selector allows to post-filter all generator. Selector *metav1.LabelSelector `json:"selector,omitempty" protobuf:"bytes,8,name=selector"` @@ -433,6 +434,22 @@ type SCMProviderGenerator struct { // Values contains key/value pairs which are passed directly as parameters to the template Values map[string]string `json:"values,omitempty" protobuf:"bytes,11,name=values"` AWSCodeCommit *SCMProviderGeneratorAWSCodeCommit `json:"awsCodeCommit,omitempty" protobuf:"bytes,12,opt,name=awsCodeCommit"` + // If you add a new SCM provider, update CustomApiUrl below. +} + +func (g *SCMProviderGenerator) CustomApiUrl() string { + if g.Github != nil { + return g.Github.API + } else if g.Gitlab != nil { + return g.Gitlab.API + } else if g.Gitea != nil { + return g.Gitea.API + } else if g.BitbucketServer != nil { + return g.BitbucketServer.API + } else if g.AzureDevOps != nil { + return g.AzureDevOps.API + } + return "" } // SCMProviderGeneratorGitea defines a connection info specific to Gitea. @@ -575,6 +592,29 @@ type PullRequestGenerator struct { Bitbucket *PullRequestGeneratorBitbucket `json:"bitbucket,omitempty" protobuf:"bytes,8,opt,name=bitbucket"` // Additional provider to use and config for it. AzureDevOps *PullRequestGeneratorAzureDevOps `json:"azuredevops,omitempty" protobuf:"bytes,9,opt,name=azuredevops"` + // If you add a new SCM provider, update CustomApiUrl below. +} + +func (p *PullRequestGenerator) CustomApiUrl() string { + if p.Github != nil { + return p.Github.API + } + if p.GitLab != nil { + return p.GitLab.API + } + if p.Gitea != nil { + return p.Gitea.API + } + if p.BitbucketServer != nil { + return p.BitbucketServer.API + } + if p.Bitbucket != nil { + return p.Bitbucket.API + } + if p.AzureDevOps != nil { + return p.AzureDevOps.API + } + return "" } // PullRequestGeneratorGitea defines connection info specific to Gitea. @@ -719,9 +759,11 @@ type ApplicationSetStatus struct { // Important: Run "make" to regenerate code after modifying this file Conditions []ApplicationSetCondition `json:"conditions,omitempty" protobuf:"bytes,1,name=conditions"` ApplicationStatus []ApplicationSetApplicationStatus `json:"applicationStatus,omitempty" protobuf:"bytes,2,name=applicationStatus"` + // Resources is a list of Applications resources managed by this application set. + Resources []ResourceStatus `json:"resources,omitempty" protobuf:"bytes,3,opt,name=resources"` } -// ApplicationSetCondition contains details about an applicationset condition, which is usally an error or warning +// ApplicationSetCondition contains details about an applicationset condition, which is usually an error or warning type ApplicationSetCondition struct { // Type is an applicationset condition type Type ApplicationSetConditionType `json:"type" protobuf:"bytes,1,opt,name=type"` @@ -731,7 +773,7 @@ type ApplicationSetCondition struct { LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,3,opt,name=lastTransitionTime"` // True/False/Unknown Status ApplicationSetConditionStatus `json:"status" protobuf:"bytes,4,opt,name=status"` - //Single word camelcase representing the reason for the status eg ErrorOccurred + // Single word camelcase representing the reason for the status eg ErrorOccurred Reason string `json:"reason" protobuf:"bytes,5,opt,name=reason"` } @@ -793,6 +835,8 @@ type ApplicationSetApplicationStatus struct { Status string `json:"status" protobuf:"bytes,4,opt,name=status"` // Step tracks which step this Application should be updated in Step string `json:"step" protobuf:"bytes,5,opt,name=step"` + // TargetRevision tracks the desired revisions the Application should be synced to. + TargetRevisions []string `json:"targetRevisions" protobuf:"bytes,6,opt,name=targetrevisions"` } // ApplicationSetList contains a list of ApplicationSet @@ -804,6 +848,21 @@ type ApplicationSetList struct { Items []ApplicationSet `json:"items" protobuf:"bytes,2,rep,name=items"` } +// ApplicationSetTree holds nodes which belongs to the application +// Used to build a tree of an ApplicationSet and its children +type ApplicationSetTree struct { + // Nodes contains list of nodes which are directly managed by the applicationset + Nodes []ResourceNode `json:"nodes,omitempty" protobuf:"bytes,1,rep,name=nodes"` +} + +// Normalize sorts applicationset tree nodes. The persistent order allows to +// effectively compare previously cached app tree and allows to unnecessary Redis requests. +func (t *ApplicationSetTree) Normalize() { + sort.Slice(t.Nodes, func(i, j int) bool { + return t.Nodes[i].FullName() < t.Nodes[j].FullName() + }) +} + // func init() { // SchemeBuilder.Register(&ApplicationSet{}, &ApplicationSetList{}) // } diff --git a/vendor/github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1/generated.pb.go b/vendor/github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1/generated.pb.go index 13d8d44466..fa80e6b820 100644 --- a/vendor/github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1/generated.pb.go +++ b/vendor/github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1/generated.pb.go @@ -797,10 +797,38 @@ func (m *ApplicationSetTerminalGenerator) XXX_DiscardUnknown() { var xxx_messageInfo_ApplicationSetTerminalGenerator proto.InternalMessageInfo +func (m *ApplicationSetTree) Reset() { *m = ApplicationSetTree{} } +func (*ApplicationSetTree) ProtoMessage() {} +func (*ApplicationSetTree) Descriptor() ([]byte, []int) { + return fileDescriptor_030104ce3b95bcac, []int{27} +} +func (m *ApplicationSetTree) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ApplicationSetTree) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ApplicationSetTree) XXX_Merge(src proto.Message) { + xxx_messageInfo_ApplicationSetTree.Merge(m, src) +} +func (m *ApplicationSetTree) XXX_Size() int { + return m.Size() +} +func (m *ApplicationSetTree) XXX_DiscardUnknown() { + xxx_messageInfo_ApplicationSetTree.DiscardUnknown(m) +} + +var xxx_messageInfo_ApplicationSetTree proto.InternalMessageInfo + func (m *ApplicationSource) Reset() { *m = ApplicationSource{} } func (*ApplicationSource) ProtoMessage() {} func (*ApplicationSource) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{27} + return fileDescriptor_030104ce3b95bcac, []int{28} } func (m *ApplicationSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -828,7 +856,7 @@ var xxx_messageInfo_ApplicationSource proto.InternalMessageInfo func (m *ApplicationSourceDirectory) Reset() { *m = ApplicationSourceDirectory{} } func (*ApplicationSourceDirectory) ProtoMessage() {} func (*ApplicationSourceDirectory) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{28} + return fileDescriptor_030104ce3b95bcac, []int{29} } func (m *ApplicationSourceDirectory) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -856,7 +884,7 @@ var xxx_messageInfo_ApplicationSourceDirectory proto.InternalMessageInfo func (m *ApplicationSourceHelm) Reset() { *m = ApplicationSourceHelm{} } func (*ApplicationSourceHelm) ProtoMessage() {} func (*ApplicationSourceHelm) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{29} + return fileDescriptor_030104ce3b95bcac, []int{30} } func (m *ApplicationSourceHelm) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -884,7 +912,7 @@ var xxx_messageInfo_ApplicationSourceHelm proto.InternalMessageInfo func (m *ApplicationSourceJsonnet) Reset() { *m = ApplicationSourceJsonnet{} } func (*ApplicationSourceJsonnet) ProtoMessage() {} func (*ApplicationSourceJsonnet) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{30} + return fileDescriptor_030104ce3b95bcac, []int{31} } func (m *ApplicationSourceJsonnet) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -912,7 +940,7 @@ var xxx_messageInfo_ApplicationSourceJsonnet proto.InternalMessageInfo func (m *ApplicationSourceKustomize) Reset() { *m = ApplicationSourceKustomize{} } func (*ApplicationSourceKustomize) ProtoMessage() {} func (*ApplicationSourceKustomize) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{31} + return fileDescriptor_030104ce3b95bcac, []int{32} } func (m *ApplicationSourceKustomize) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -940,7 +968,7 @@ var xxx_messageInfo_ApplicationSourceKustomize proto.InternalMessageInfo func (m *ApplicationSourcePlugin) Reset() { *m = ApplicationSourcePlugin{} } func (*ApplicationSourcePlugin) ProtoMessage() {} func (*ApplicationSourcePlugin) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{32} + return fileDescriptor_030104ce3b95bcac, []int{33} } func (m *ApplicationSourcePlugin) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -968,7 +996,7 @@ var xxx_messageInfo_ApplicationSourcePlugin proto.InternalMessageInfo func (m *ApplicationSourcePluginParameter) Reset() { *m = ApplicationSourcePluginParameter{} } func (*ApplicationSourcePluginParameter) ProtoMessage() {} func (*ApplicationSourcePluginParameter) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{33} + return fileDescriptor_030104ce3b95bcac, []int{34} } func (m *ApplicationSourcePluginParameter) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -996,7 +1024,7 @@ var xxx_messageInfo_ApplicationSourcePluginParameter proto.InternalMessageInfo func (m *ApplicationSpec) Reset() { *m = ApplicationSpec{} } func (*ApplicationSpec) ProtoMessage() {} func (*ApplicationSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{34} + return fileDescriptor_030104ce3b95bcac, []int{35} } func (m *ApplicationSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1024,7 +1052,7 @@ var xxx_messageInfo_ApplicationSpec proto.InternalMessageInfo func (m *ApplicationStatus) Reset() { *m = ApplicationStatus{} } func (*ApplicationStatus) ProtoMessage() {} func (*ApplicationStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{35} + return fileDescriptor_030104ce3b95bcac, []int{36} } func (m *ApplicationStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1052,7 +1080,7 @@ var xxx_messageInfo_ApplicationStatus proto.InternalMessageInfo func (m *ApplicationSummary) Reset() { *m = ApplicationSummary{} } func (*ApplicationSummary) ProtoMessage() {} func (*ApplicationSummary) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{36} + return fileDescriptor_030104ce3b95bcac, []int{37} } func (m *ApplicationSummary) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1080,7 +1108,7 @@ var xxx_messageInfo_ApplicationSummary proto.InternalMessageInfo func (m *ApplicationTree) Reset() { *m = ApplicationTree{} } func (*ApplicationTree) ProtoMessage() {} func (*ApplicationTree) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{37} + return fileDescriptor_030104ce3b95bcac, []int{38} } func (m *ApplicationTree) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1108,7 +1136,7 @@ var xxx_messageInfo_ApplicationTree proto.InternalMessageInfo func (m *ApplicationWatchEvent) Reset() { *m = ApplicationWatchEvent{} } func (*ApplicationWatchEvent) ProtoMessage() {} func (*ApplicationWatchEvent) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{38} + return fileDescriptor_030104ce3b95bcac, []int{39} } func (m *ApplicationWatchEvent) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1136,7 +1164,7 @@ var xxx_messageInfo_ApplicationWatchEvent proto.InternalMessageInfo func (m *Backoff) Reset() { *m = Backoff{} } func (*Backoff) ProtoMessage() {} func (*Backoff) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{39} + return fileDescriptor_030104ce3b95bcac, []int{40} } func (m *Backoff) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1164,7 +1192,7 @@ var xxx_messageInfo_Backoff proto.InternalMessageInfo func (m *BasicAuthBitbucketServer) Reset() { *m = BasicAuthBitbucketServer{} } func (*BasicAuthBitbucketServer) ProtoMessage() {} func (*BasicAuthBitbucketServer) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{40} + return fileDescriptor_030104ce3b95bcac, []int{41} } func (m *BasicAuthBitbucketServer) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1192,7 +1220,7 @@ var xxx_messageInfo_BasicAuthBitbucketServer proto.InternalMessageInfo func (m *BearerTokenBitbucketCloud) Reset() { *m = BearerTokenBitbucketCloud{} } func (*BearerTokenBitbucketCloud) ProtoMessage() {} func (*BearerTokenBitbucketCloud) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{41} + return fileDescriptor_030104ce3b95bcac, []int{42} } func (m *BearerTokenBitbucketCloud) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1220,7 +1248,7 @@ var xxx_messageInfo_BearerTokenBitbucketCloud proto.InternalMessageInfo func (m *ChartDetails) Reset() { *m = ChartDetails{} } func (*ChartDetails) ProtoMessage() {} func (*ChartDetails) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{42} + return fileDescriptor_030104ce3b95bcac, []int{43} } func (m *ChartDetails) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1248,7 +1276,7 @@ var xxx_messageInfo_ChartDetails proto.InternalMessageInfo func (m *Cluster) Reset() { *m = Cluster{} } func (*Cluster) ProtoMessage() {} func (*Cluster) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{43} + return fileDescriptor_030104ce3b95bcac, []int{44} } func (m *Cluster) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1276,7 +1304,7 @@ var xxx_messageInfo_Cluster proto.InternalMessageInfo func (m *ClusterCacheInfo) Reset() { *m = ClusterCacheInfo{} } func (*ClusterCacheInfo) ProtoMessage() {} func (*ClusterCacheInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{44} + return fileDescriptor_030104ce3b95bcac, []int{45} } func (m *ClusterCacheInfo) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1304,7 +1332,7 @@ var xxx_messageInfo_ClusterCacheInfo proto.InternalMessageInfo func (m *ClusterConfig) Reset() { *m = ClusterConfig{} } func (*ClusterConfig) ProtoMessage() {} func (*ClusterConfig) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{45} + return fileDescriptor_030104ce3b95bcac, []int{46} } func (m *ClusterConfig) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1332,7 +1360,7 @@ var xxx_messageInfo_ClusterConfig proto.InternalMessageInfo func (m *ClusterGenerator) Reset() { *m = ClusterGenerator{} } func (*ClusterGenerator) ProtoMessage() {} func (*ClusterGenerator) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{46} + return fileDescriptor_030104ce3b95bcac, []int{47} } func (m *ClusterGenerator) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1360,7 +1388,7 @@ var xxx_messageInfo_ClusterGenerator proto.InternalMessageInfo func (m *ClusterInfo) Reset() { *m = ClusterInfo{} } func (*ClusterInfo) ProtoMessage() {} func (*ClusterInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{47} + return fileDescriptor_030104ce3b95bcac, []int{48} } func (m *ClusterInfo) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1388,7 +1416,7 @@ var xxx_messageInfo_ClusterInfo proto.InternalMessageInfo func (m *ClusterList) Reset() { *m = ClusterList{} } func (*ClusterList) ProtoMessage() {} func (*ClusterList) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{48} + return fileDescriptor_030104ce3b95bcac, []int{49} } func (m *ClusterList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1416,7 +1444,7 @@ var xxx_messageInfo_ClusterList proto.InternalMessageInfo func (m *Command) Reset() { *m = Command{} } func (*Command) ProtoMessage() {} func (*Command) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{49} + return fileDescriptor_030104ce3b95bcac, []int{50} } func (m *Command) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1444,7 +1472,7 @@ var xxx_messageInfo_Command proto.InternalMessageInfo func (m *ComparedTo) Reset() { *m = ComparedTo{} } func (*ComparedTo) ProtoMessage() {} func (*ComparedTo) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{50} + return fileDescriptor_030104ce3b95bcac, []int{51} } func (m *ComparedTo) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1472,7 +1500,7 @@ var xxx_messageInfo_ComparedTo proto.InternalMessageInfo func (m *ComponentParameter) Reset() { *m = ComponentParameter{} } func (*ComponentParameter) ProtoMessage() {} func (*ComponentParameter) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{51} + return fileDescriptor_030104ce3b95bcac, []int{52} } func (m *ComponentParameter) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1500,7 +1528,7 @@ var xxx_messageInfo_ComponentParameter proto.InternalMessageInfo func (m *ConfigManagementPlugin) Reset() { *m = ConfigManagementPlugin{} } func (*ConfigManagementPlugin) ProtoMessage() {} func (*ConfigManagementPlugin) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{52} + return fileDescriptor_030104ce3b95bcac, []int{53} } func (m *ConfigManagementPlugin) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1528,7 +1556,7 @@ var xxx_messageInfo_ConfigManagementPlugin proto.InternalMessageInfo func (m *ConnectionState) Reset() { *m = ConnectionState{} } func (*ConnectionState) ProtoMessage() {} func (*ConnectionState) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{53} + return fileDescriptor_030104ce3b95bcac, []int{54} } func (m *ConnectionState) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1556,7 +1584,7 @@ var xxx_messageInfo_ConnectionState proto.InternalMessageInfo func (m *DuckTypeGenerator) Reset() { *m = DuckTypeGenerator{} } func (*DuckTypeGenerator) ProtoMessage() {} func (*DuckTypeGenerator) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{54} + return fileDescriptor_030104ce3b95bcac, []int{55} } func (m *DuckTypeGenerator) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1584,7 +1612,7 @@ var xxx_messageInfo_DuckTypeGenerator proto.InternalMessageInfo func (m *EnvEntry) Reset() { *m = EnvEntry{} } func (*EnvEntry) ProtoMessage() {} func (*EnvEntry) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{55} + return fileDescriptor_030104ce3b95bcac, []int{56} } func (m *EnvEntry) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1612,7 +1640,7 @@ var xxx_messageInfo_EnvEntry proto.InternalMessageInfo func (m *ErrApplicationNotAllowedToUseProject) Reset() { *m = ErrApplicationNotAllowedToUseProject{} } func (*ErrApplicationNotAllowedToUseProject) ProtoMessage() {} func (*ErrApplicationNotAllowedToUseProject) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{56} + return fileDescriptor_030104ce3b95bcac, []int{57} } func (m *ErrApplicationNotAllowedToUseProject) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1640,7 +1668,7 @@ var xxx_messageInfo_ErrApplicationNotAllowedToUseProject proto.InternalMessageIn func (m *ExecProviderConfig) Reset() { *m = ExecProviderConfig{} } func (*ExecProviderConfig) ProtoMessage() {} func (*ExecProviderConfig) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{57} + return fileDescriptor_030104ce3b95bcac, []int{58} } func (m *ExecProviderConfig) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1668,7 +1696,7 @@ var xxx_messageInfo_ExecProviderConfig proto.InternalMessageInfo func (m *GitDirectoryGeneratorItem) Reset() { *m = GitDirectoryGeneratorItem{} } func (*GitDirectoryGeneratorItem) ProtoMessage() {} func (*GitDirectoryGeneratorItem) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{58} + return fileDescriptor_030104ce3b95bcac, []int{59} } func (m *GitDirectoryGeneratorItem) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1696,7 +1724,7 @@ var xxx_messageInfo_GitDirectoryGeneratorItem proto.InternalMessageInfo func (m *GitFileGeneratorItem) Reset() { *m = GitFileGeneratorItem{} } func (*GitFileGeneratorItem) ProtoMessage() {} func (*GitFileGeneratorItem) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{59} + return fileDescriptor_030104ce3b95bcac, []int{60} } func (m *GitFileGeneratorItem) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1724,7 +1752,7 @@ var xxx_messageInfo_GitFileGeneratorItem proto.InternalMessageInfo func (m *GitGenerator) Reset() { *m = GitGenerator{} } func (*GitGenerator) ProtoMessage() {} func (*GitGenerator) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{60} + return fileDescriptor_030104ce3b95bcac, []int{61} } func (m *GitGenerator) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1752,7 +1780,7 @@ var xxx_messageInfo_GitGenerator proto.InternalMessageInfo func (m *GnuPGPublicKey) Reset() { *m = GnuPGPublicKey{} } func (*GnuPGPublicKey) ProtoMessage() {} func (*GnuPGPublicKey) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{61} + return fileDescriptor_030104ce3b95bcac, []int{62} } func (m *GnuPGPublicKey) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1780,7 +1808,7 @@ var xxx_messageInfo_GnuPGPublicKey proto.InternalMessageInfo func (m *GnuPGPublicKeyList) Reset() { *m = GnuPGPublicKeyList{} } func (*GnuPGPublicKeyList) ProtoMessage() {} func (*GnuPGPublicKeyList) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{62} + return fileDescriptor_030104ce3b95bcac, []int{63} } func (m *GnuPGPublicKeyList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1808,7 +1836,7 @@ var xxx_messageInfo_GnuPGPublicKeyList proto.InternalMessageInfo func (m *HealthStatus) Reset() { *m = HealthStatus{} } func (*HealthStatus) ProtoMessage() {} func (*HealthStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{63} + return fileDescriptor_030104ce3b95bcac, []int{64} } func (m *HealthStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1836,7 +1864,7 @@ var xxx_messageInfo_HealthStatus proto.InternalMessageInfo func (m *HelmFileParameter) Reset() { *m = HelmFileParameter{} } func (*HelmFileParameter) ProtoMessage() {} func (*HelmFileParameter) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{64} + return fileDescriptor_030104ce3b95bcac, []int{65} } func (m *HelmFileParameter) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1864,7 +1892,7 @@ var xxx_messageInfo_HelmFileParameter proto.InternalMessageInfo func (m *HelmOptions) Reset() { *m = HelmOptions{} } func (*HelmOptions) ProtoMessage() {} func (*HelmOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{65} + return fileDescriptor_030104ce3b95bcac, []int{66} } func (m *HelmOptions) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1892,7 +1920,7 @@ var xxx_messageInfo_HelmOptions proto.InternalMessageInfo func (m *HelmParameter) Reset() { *m = HelmParameter{} } func (*HelmParameter) ProtoMessage() {} func (*HelmParameter) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{66} + return fileDescriptor_030104ce3b95bcac, []int{67} } func (m *HelmParameter) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1920,7 +1948,7 @@ var xxx_messageInfo_HelmParameter proto.InternalMessageInfo func (m *HostInfo) Reset() { *m = HostInfo{} } func (*HostInfo) ProtoMessage() {} func (*HostInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{67} + return fileDescriptor_030104ce3b95bcac, []int{68} } func (m *HostInfo) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1948,7 +1976,7 @@ var xxx_messageInfo_HostInfo proto.InternalMessageInfo func (m *HostResourceInfo) Reset() { *m = HostResourceInfo{} } func (*HostResourceInfo) ProtoMessage() {} func (*HostResourceInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{68} + return fileDescriptor_030104ce3b95bcac, []int{69} } func (m *HostResourceInfo) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1976,7 +2004,7 @@ var xxx_messageInfo_HostResourceInfo proto.InternalMessageInfo func (m *Info) Reset() { *m = Info{} } func (*Info) ProtoMessage() {} func (*Info) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{69} + return fileDescriptor_030104ce3b95bcac, []int{70} } func (m *Info) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2004,7 +2032,7 @@ var xxx_messageInfo_Info proto.InternalMessageInfo func (m *InfoItem) Reset() { *m = InfoItem{} } func (*InfoItem) ProtoMessage() {} func (*InfoItem) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{70} + return fileDescriptor_030104ce3b95bcac, []int{71} } func (m *InfoItem) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2032,7 +2060,7 @@ var xxx_messageInfo_InfoItem proto.InternalMessageInfo func (m *JWTToken) Reset() { *m = JWTToken{} } func (*JWTToken) ProtoMessage() {} func (*JWTToken) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{71} + return fileDescriptor_030104ce3b95bcac, []int{72} } func (m *JWTToken) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2060,7 +2088,7 @@ var xxx_messageInfo_JWTToken proto.InternalMessageInfo func (m *JWTTokens) Reset() { *m = JWTTokens{} } func (*JWTTokens) ProtoMessage() {} func (*JWTTokens) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{72} + return fileDescriptor_030104ce3b95bcac, []int{73} } func (m *JWTTokens) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2088,7 +2116,7 @@ var xxx_messageInfo_JWTTokens proto.InternalMessageInfo func (m *JsonnetVar) Reset() { *m = JsonnetVar{} } func (*JsonnetVar) ProtoMessage() {} func (*JsonnetVar) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{73} + return fileDescriptor_030104ce3b95bcac, []int{74} } func (m *JsonnetVar) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2116,7 +2144,7 @@ var xxx_messageInfo_JsonnetVar proto.InternalMessageInfo func (m *KnownTypeField) Reset() { *m = KnownTypeField{} } func (*KnownTypeField) ProtoMessage() {} func (*KnownTypeField) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{74} + return fileDescriptor_030104ce3b95bcac, []int{75} } func (m *KnownTypeField) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2144,7 +2172,7 @@ var xxx_messageInfo_KnownTypeField proto.InternalMessageInfo func (m *KustomizeGvk) Reset() { *m = KustomizeGvk{} } func (*KustomizeGvk) ProtoMessage() {} func (*KustomizeGvk) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{75} + return fileDescriptor_030104ce3b95bcac, []int{76} } func (m *KustomizeGvk) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2172,7 +2200,7 @@ var xxx_messageInfo_KustomizeGvk proto.InternalMessageInfo func (m *KustomizeOptions) Reset() { *m = KustomizeOptions{} } func (*KustomizeOptions) ProtoMessage() {} func (*KustomizeOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{76} + return fileDescriptor_030104ce3b95bcac, []int{77} } func (m *KustomizeOptions) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2200,7 +2228,7 @@ var xxx_messageInfo_KustomizeOptions proto.InternalMessageInfo func (m *KustomizePatch) Reset() { *m = KustomizePatch{} } func (*KustomizePatch) ProtoMessage() {} func (*KustomizePatch) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{77} + return fileDescriptor_030104ce3b95bcac, []int{78} } func (m *KustomizePatch) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2228,7 +2256,7 @@ var xxx_messageInfo_KustomizePatch proto.InternalMessageInfo func (m *KustomizeReplica) Reset() { *m = KustomizeReplica{} } func (*KustomizeReplica) ProtoMessage() {} func (*KustomizeReplica) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{78} + return fileDescriptor_030104ce3b95bcac, []int{79} } func (m *KustomizeReplica) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2256,7 +2284,7 @@ var xxx_messageInfo_KustomizeReplica proto.InternalMessageInfo func (m *KustomizeResId) Reset() { *m = KustomizeResId{} } func (*KustomizeResId) ProtoMessage() {} func (*KustomizeResId) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{79} + return fileDescriptor_030104ce3b95bcac, []int{80} } func (m *KustomizeResId) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2284,7 +2312,7 @@ var xxx_messageInfo_KustomizeResId proto.InternalMessageInfo func (m *KustomizeSelector) Reset() { *m = KustomizeSelector{} } func (*KustomizeSelector) ProtoMessage() {} func (*KustomizeSelector) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{80} + return fileDescriptor_030104ce3b95bcac, []int{81} } func (m *KustomizeSelector) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2312,7 +2340,7 @@ var xxx_messageInfo_KustomizeSelector proto.InternalMessageInfo func (m *ListGenerator) Reset() { *m = ListGenerator{} } func (*ListGenerator) ProtoMessage() {} func (*ListGenerator) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{81} + return fileDescriptor_030104ce3b95bcac, []int{82} } func (m *ListGenerator) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2340,7 +2368,7 @@ var xxx_messageInfo_ListGenerator proto.InternalMessageInfo func (m *ManagedNamespaceMetadata) Reset() { *m = ManagedNamespaceMetadata{} } func (*ManagedNamespaceMetadata) ProtoMessage() {} func (*ManagedNamespaceMetadata) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{82} + return fileDescriptor_030104ce3b95bcac, []int{83} } func (m *ManagedNamespaceMetadata) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2368,7 +2396,7 @@ var xxx_messageInfo_ManagedNamespaceMetadata proto.InternalMessageInfo func (m *MatrixGenerator) Reset() { *m = MatrixGenerator{} } func (*MatrixGenerator) ProtoMessage() {} func (*MatrixGenerator) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{83} + return fileDescriptor_030104ce3b95bcac, []int{84} } func (m *MatrixGenerator) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2396,7 +2424,7 @@ var xxx_messageInfo_MatrixGenerator proto.InternalMessageInfo func (m *MergeGenerator) Reset() { *m = MergeGenerator{} } func (*MergeGenerator) ProtoMessage() {} func (*MergeGenerator) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{84} + return fileDescriptor_030104ce3b95bcac, []int{85} } func (m *MergeGenerator) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2424,7 +2452,7 @@ var xxx_messageInfo_MergeGenerator proto.InternalMessageInfo func (m *NestedMatrixGenerator) Reset() { *m = NestedMatrixGenerator{} } func (*NestedMatrixGenerator) ProtoMessage() {} func (*NestedMatrixGenerator) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{85} + return fileDescriptor_030104ce3b95bcac, []int{86} } func (m *NestedMatrixGenerator) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2452,7 +2480,7 @@ var xxx_messageInfo_NestedMatrixGenerator proto.InternalMessageInfo func (m *NestedMergeGenerator) Reset() { *m = NestedMergeGenerator{} } func (*NestedMergeGenerator) ProtoMessage() {} func (*NestedMergeGenerator) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{86} + return fileDescriptor_030104ce3b95bcac, []int{87} } func (m *NestedMergeGenerator) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2480,7 +2508,7 @@ var xxx_messageInfo_NestedMergeGenerator proto.InternalMessageInfo func (m *Operation) Reset() { *m = Operation{} } func (*Operation) ProtoMessage() {} func (*Operation) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{87} + return fileDescriptor_030104ce3b95bcac, []int{88} } func (m *Operation) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2508,7 +2536,7 @@ var xxx_messageInfo_Operation proto.InternalMessageInfo func (m *OperationInitiator) Reset() { *m = OperationInitiator{} } func (*OperationInitiator) ProtoMessage() {} func (*OperationInitiator) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{88} + return fileDescriptor_030104ce3b95bcac, []int{89} } func (m *OperationInitiator) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2536,7 +2564,7 @@ var xxx_messageInfo_OperationInitiator proto.InternalMessageInfo func (m *OperationState) Reset() { *m = OperationState{} } func (*OperationState) ProtoMessage() {} func (*OperationState) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{89} + return fileDescriptor_030104ce3b95bcac, []int{90} } func (m *OperationState) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2564,7 +2592,7 @@ var xxx_messageInfo_OperationState proto.InternalMessageInfo func (m *OptionalArray) Reset() { *m = OptionalArray{} } func (*OptionalArray) ProtoMessage() {} func (*OptionalArray) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{90} + return fileDescriptor_030104ce3b95bcac, []int{91} } func (m *OptionalArray) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2592,7 +2620,7 @@ var xxx_messageInfo_OptionalArray proto.InternalMessageInfo func (m *OptionalMap) Reset() { *m = OptionalMap{} } func (*OptionalMap) ProtoMessage() {} func (*OptionalMap) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{91} + return fileDescriptor_030104ce3b95bcac, []int{92} } func (m *OptionalMap) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2620,7 +2648,7 @@ var xxx_messageInfo_OptionalMap proto.InternalMessageInfo func (m *OrphanedResourceKey) Reset() { *m = OrphanedResourceKey{} } func (*OrphanedResourceKey) ProtoMessage() {} func (*OrphanedResourceKey) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{92} + return fileDescriptor_030104ce3b95bcac, []int{93} } func (m *OrphanedResourceKey) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2648,7 +2676,7 @@ var xxx_messageInfo_OrphanedResourceKey proto.InternalMessageInfo func (m *OrphanedResourcesMonitorSettings) Reset() { *m = OrphanedResourcesMonitorSettings{} } func (*OrphanedResourcesMonitorSettings) ProtoMessage() {} func (*OrphanedResourcesMonitorSettings) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{93} + return fileDescriptor_030104ce3b95bcac, []int{94} } func (m *OrphanedResourcesMonitorSettings) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2676,7 +2704,7 @@ var xxx_messageInfo_OrphanedResourcesMonitorSettings proto.InternalMessageInfo func (m *OverrideIgnoreDiff) Reset() { *m = OverrideIgnoreDiff{} } func (*OverrideIgnoreDiff) ProtoMessage() {} func (*OverrideIgnoreDiff) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{94} + return fileDescriptor_030104ce3b95bcac, []int{95} } func (m *OverrideIgnoreDiff) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2704,7 +2732,7 @@ var xxx_messageInfo_OverrideIgnoreDiff proto.InternalMessageInfo func (m *PluginConfigMapRef) Reset() { *m = PluginConfigMapRef{} } func (*PluginConfigMapRef) ProtoMessage() {} func (*PluginConfigMapRef) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{95} + return fileDescriptor_030104ce3b95bcac, []int{96} } func (m *PluginConfigMapRef) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2732,7 +2760,7 @@ var xxx_messageInfo_PluginConfigMapRef proto.InternalMessageInfo func (m *PluginGenerator) Reset() { *m = PluginGenerator{} } func (*PluginGenerator) ProtoMessage() {} func (*PluginGenerator) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{96} + return fileDescriptor_030104ce3b95bcac, []int{97} } func (m *PluginGenerator) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2760,7 +2788,7 @@ var xxx_messageInfo_PluginGenerator proto.InternalMessageInfo func (m *PluginInput) Reset() { *m = PluginInput{} } func (*PluginInput) ProtoMessage() {} func (*PluginInput) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{97} + return fileDescriptor_030104ce3b95bcac, []int{98} } func (m *PluginInput) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2788,7 +2816,7 @@ var xxx_messageInfo_PluginInput proto.InternalMessageInfo func (m *ProjectRole) Reset() { *m = ProjectRole{} } func (*ProjectRole) ProtoMessage() {} func (*ProjectRole) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{98} + return fileDescriptor_030104ce3b95bcac, []int{99} } func (m *ProjectRole) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2816,7 +2844,7 @@ var xxx_messageInfo_ProjectRole proto.InternalMessageInfo func (m *PullRequestGenerator) Reset() { *m = PullRequestGenerator{} } func (*PullRequestGenerator) ProtoMessage() {} func (*PullRequestGenerator) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{99} + return fileDescriptor_030104ce3b95bcac, []int{100} } func (m *PullRequestGenerator) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2844,7 +2872,7 @@ var xxx_messageInfo_PullRequestGenerator proto.InternalMessageInfo func (m *PullRequestGeneratorAzureDevOps) Reset() { *m = PullRequestGeneratorAzureDevOps{} } func (*PullRequestGeneratorAzureDevOps) ProtoMessage() {} func (*PullRequestGeneratorAzureDevOps) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{100} + return fileDescriptor_030104ce3b95bcac, []int{101} } func (m *PullRequestGeneratorAzureDevOps) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2872,7 +2900,7 @@ var xxx_messageInfo_PullRequestGeneratorAzureDevOps proto.InternalMessageInfo func (m *PullRequestGeneratorBitbucket) Reset() { *m = PullRequestGeneratorBitbucket{} } func (*PullRequestGeneratorBitbucket) ProtoMessage() {} func (*PullRequestGeneratorBitbucket) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{101} + return fileDescriptor_030104ce3b95bcac, []int{102} } func (m *PullRequestGeneratorBitbucket) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2900,7 +2928,7 @@ var xxx_messageInfo_PullRequestGeneratorBitbucket proto.InternalMessageInfo func (m *PullRequestGeneratorBitbucketServer) Reset() { *m = PullRequestGeneratorBitbucketServer{} } func (*PullRequestGeneratorBitbucketServer) ProtoMessage() {} func (*PullRequestGeneratorBitbucketServer) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{102} + return fileDescriptor_030104ce3b95bcac, []int{103} } func (m *PullRequestGeneratorBitbucketServer) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2928,7 +2956,7 @@ var xxx_messageInfo_PullRequestGeneratorBitbucketServer proto.InternalMessageInf func (m *PullRequestGeneratorFilter) Reset() { *m = PullRequestGeneratorFilter{} } func (*PullRequestGeneratorFilter) ProtoMessage() {} func (*PullRequestGeneratorFilter) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{103} + return fileDescriptor_030104ce3b95bcac, []int{104} } func (m *PullRequestGeneratorFilter) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2956,7 +2984,7 @@ var xxx_messageInfo_PullRequestGeneratorFilter proto.InternalMessageInfo func (m *PullRequestGeneratorGitLab) Reset() { *m = PullRequestGeneratorGitLab{} } func (*PullRequestGeneratorGitLab) ProtoMessage() {} func (*PullRequestGeneratorGitLab) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{104} + return fileDescriptor_030104ce3b95bcac, []int{105} } func (m *PullRequestGeneratorGitLab) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2984,7 +3012,7 @@ var xxx_messageInfo_PullRequestGeneratorGitLab proto.InternalMessageInfo func (m *PullRequestGeneratorGitea) Reset() { *m = PullRequestGeneratorGitea{} } func (*PullRequestGeneratorGitea) ProtoMessage() {} func (*PullRequestGeneratorGitea) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{105} + return fileDescriptor_030104ce3b95bcac, []int{106} } func (m *PullRequestGeneratorGitea) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3012,7 +3040,7 @@ var xxx_messageInfo_PullRequestGeneratorGitea proto.InternalMessageInfo func (m *PullRequestGeneratorGithub) Reset() { *m = PullRequestGeneratorGithub{} } func (*PullRequestGeneratorGithub) ProtoMessage() {} func (*PullRequestGeneratorGithub) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{106} + return fileDescriptor_030104ce3b95bcac, []int{107} } func (m *PullRequestGeneratorGithub) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3040,7 +3068,7 @@ var xxx_messageInfo_PullRequestGeneratorGithub proto.InternalMessageInfo func (m *RefTarget) Reset() { *m = RefTarget{} } func (*RefTarget) ProtoMessage() {} func (*RefTarget) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{107} + return fileDescriptor_030104ce3b95bcac, []int{108} } func (m *RefTarget) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3068,7 +3096,7 @@ var xxx_messageInfo_RefTarget proto.InternalMessageInfo func (m *RepoCreds) Reset() { *m = RepoCreds{} } func (*RepoCreds) ProtoMessage() {} func (*RepoCreds) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{108} + return fileDescriptor_030104ce3b95bcac, []int{109} } func (m *RepoCreds) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3096,7 +3124,7 @@ var xxx_messageInfo_RepoCreds proto.InternalMessageInfo func (m *RepoCredsList) Reset() { *m = RepoCredsList{} } func (*RepoCredsList) ProtoMessage() {} func (*RepoCredsList) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{109} + return fileDescriptor_030104ce3b95bcac, []int{110} } func (m *RepoCredsList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3124,7 +3152,7 @@ var xxx_messageInfo_RepoCredsList proto.InternalMessageInfo func (m *Repository) Reset() { *m = Repository{} } func (*Repository) ProtoMessage() {} func (*Repository) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{110} + return fileDescriptor_030104ce3b95bcac, []int{111} } func (m *Repository) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3152,7 +3180,7 @@ var xxx_messageInfo_Repository proto.InternalMessageInfo func (m *RepositoryCertificate) Reset() { *m = RepositoryCertificate{} } func (*RepositoryCertificate) ProtoMessage() {} func (*RepositoryCertificate) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{111} + return fileDescriptor_030104ce3b95bcac, []int{112} } func (m *RepositoryCertificate) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3180,7 +3208,7 @@ var xxx_messageInfo_RepositoryCertificate proto.InternalMessageInfo func (m *RepositoryCertificateList) Reset() { *m = RepositoryCertificateList{} } func (*RepositoryCertificateList) ProtoMessage() {} func (*RepositoryCertificateList) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{112} + return fileDescriptor_030104ce3b95bcac, []int{113} } func (m *RepositoryCertificateList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3208,7 +3236,7 @@ var xxx_messageInfo_RepositoryCertificateList proto.InternalMessageInfo func (m *RepositoryList) Reset() { *m = RepositoryList{} } func (*RepositoryList) ProtoMessage() {} func (*RepositoryList) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{113} + return fileDescriptor_030104ce3b95bcac, []int{114} } func (m *RepositoryList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3236,7 +3264,7 @@ var xxx_messageInfo_RepositoryList proto.InternalMessageInfo func (m *ResourceAction) Reset() { *m = ResourceAction{} } func (*ResourceAction) ProtoMessage() {} func (*ResourceAction) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{114} + return fileDescriptor_030104ce3b95bcac, []int{115} } func (m *ResourceAction) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3264,7 +3292,7 @@ var xxx_messageInfo_ResourceAction proto.InternalMessageInfo func (m *ResourceActionDefinition) Reset() { *m = ResourceActionDefinition{} } func (*ResourceActionDefinition) ProtoMessage() {} func (*ResourceActionDefinition) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{115} + return fileDescriptor_030104ce3b95bcac, []int{116} } func (m *ResourceActionDefinition) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3292,7 +3320,7 @@ var xxx_messageInfo_ResourceActionDefinition proto.InternalMessageInfo func (m *ResourceActionParam) Reset() { *m = ResourceActionParam{} } func (*ResourceActionParam) ProtoMessage() {} func (*ResourceActionParam) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{116} + return fileDescriptor_030104ce3b95bcac, []int{117} } func (m *ResourceActionParam) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3320,7 +3348,7 @@ var xxx_messageInfo_ResourceActionParam proto.InternalMessageInfo func (m *ResourceActions) Reset() { *m = ResourceActions{} } func (*ResourceActions) ProtoMessage() {} func (*ResourceActions) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{117} + return fileDescriptor_030104ce3b95bcac, []int{118} } func (m *ResourceActions) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3348,7 +3376,7 @@ var xxx_messageInfo_ResourceActions proto.InternalMessageInfo func (m *ResourceDiff) Reset() { *m = ResourceDiff{} } func (*ResourceDiff) ProtoMessage() {} func (*ResourceDiff) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{118} + return fileDescriptor_030104ce3b95bcac, []int{119} } func (m *ResourceDiff) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3376,7 +3404,7 @@ var xxx_messageInfo_ResourceDiff proto.InternalMessageInfo func (m *ResourceIgnoreDifferences) Reset() { *m = ResourceIgnoreDifferences{} } func (*ResourceIgnoreDifferences) ProtoMessage() {} func (*ResourceIgnoreDifferences) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{119} + return fileDescriptor_030104ce3b95bcac, []int{120} } func (m *ResourceIgnoreDifferences) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3404,7 +3432,7 @@ var xxx_messageInfo_ResourceIgnoreDifferences proto.InternalMessageInfo func (m *ResourceNetworkingInfo) Reset() { *m = ResourceNetworkingInfo{} } func (*ResourceNetworkingInfo) ProtoMessage() {} func (*ResourceNetworkingInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{120} + return fileDescriptor_030104ce3b95bcac, []int{121} } func (m *ResourceNetworkingInfo) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3432,7 +3460,7 @@ var xxx_messageInfo_ResourceNetworkingInfo proto.InternalMessageInfo func (m *ResourceNode) Reset() { *m = ResourceNode{} } func (*ResourceNode) ProtoMessage() {} func (*ResourceNode) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{121} + return fileDescriptor_030104ce3b95bcac, []int{122} } func (m *ResourceNode) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3460,7 +3488,7 @@ var xxx_messageInfo_ResourceNode proto.InternalMessageInfo func (m *ResourceOverride) Reset() { *m = ResourceOverride{} } func (*ResourceOverride) ProtoMessage() {} func (*ResourceOverride) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{122} + return fileDescriptor_030104ce3b95bcac, []int{123} } func (m *ResourceOverride) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3488,7 +3516,7 @@ var xxx_messageInfo_ResourceOverride proto.InternalMessageInfo func (m *ResourceRef) Reset() { *m = ResourceRef{} } func (*ResourceRef) ProtoMessage() {} func (*ResourceRef) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{123} + return fileDescriptor_030104ce3b95bcac, []int{124} } func (m *ResourceRef) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3516,7 +3544,7 @@ var xxx_messageInfo_ResourceRef proto.InternalMessageInfo func (m *ResourceResult) Reset() { *m = ResourceResult{} } func (*ResourceResult) ProtoMessage() {} func (*ResourceResult) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{124} + return fileDescriptor_030104ce3b95bcac, []int{125} } func (m *ResourceResult) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3544,7 +3572,7 @@ var xxx_messageInfo_ResourceResult proto.InternalMessageInfo func (m *ResourceStatus) Reset() { *m = ResourceStatus{} } func (*ResourceStatus) ProtoMessage() {} func (*ResourceStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{125} + return fileDescriptor_030104ce3b95bcac, []int{126} } func (m *ResourceStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3572,7 +3600,7 @@ var xxx_messageInfo_ResourceStatus proto.InternalMessageInfo func (m *RetryStrategy) Reset() { *m = RetryStrategy{} } func (*RetryStrategy) ProtoMessage() {} func (*RetryStrategy) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{126} + return fileDescriptor_030104ce3b95bcac, []int{127} } func (m *RetryStrategy) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3600,7 +3628,7 @@ var xxx_messageInfo_RetryStrategy proto.InternalMessageInfo func (m *RevisionHistory) Reset() { *m = RevisionHistory{} } func (*RevisionHistory) ProtoMessage() {} func (*RevisionHistory) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{127} + return fileDescriptor_030104ce3b95bcac, []int{128} } func (m *RevisionHistory) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3628,7 +3656,7 @@ var xxx_messageInfo_RevisionHistory proto.InternalMessageInfo func (m *RevisionMetadata) Reset() { *m = RevisionMetadata{} } func (*RevisionMetadata) ProtoMessage() {} func (*RevisionMetadata) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{128} + return fileDescriptor_030104ce3b95bcac, []int{129} } func (m *RevisionMetadata) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3656,7 +3684,7 @@ var xxx_messageInfo_RevisionMetadata proto.InternalMessageInfo func (m *SCMProviderGenerator) Reset() { *m = SCMProviderGenerator{} } func (*SCMProviderGenerator) ProtoMessage() {} func (*SCMProviderGenerator) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{129} + return fileDescriptor_030104ce3b95bcac, []int{130} } func (m *SCMProviderGenerator) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3684,7 +3712,7 @@ var xxx_messageInfo_SCMProviderGenerator proto.InternalMessageInfo func (m *SCMProviderGeneratorAWSCodeCommit) Reset() { *m = SCMProviderGeneratorAWSCodeCommit{} } func (*SCMProviderGeneratorAWSCodeCommit) ProtoMessage() {} func (*SCMProviderGeneratorAWSCodeCommit) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{130} + return fileDescriptor_030104ce3b95bcac, []int{131} } func (m *SCMProviderGeneratorAWSCodeCommit) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3712,7 +3740,7 @@ var xxx_messageInfo_SCMProviderGeneratorAWSCodeCommit proto.InternalMessageInfo func (m *SCMProviderGeneratorAzureDevOps) Reset() { *m = SCMProviderGeneratorAzureDevOps{} } func (*SCMProviderGeneratorAzureDevOps) ProtoMessage() {} func (*SCMProviderGeneratorAzureDevOps) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{131} + return fileDescriptor_030104ce3b95bcac, []int{132} } func (m *SCMProviderGeneratorAzureDevOps) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3740,7 +3768,7 @@ var xxx_messageInfo_SCMProviderGeneratorAzureDevOps proto.InternalMessageInfo func (m *SCMProviderGeneratorBitbucket) Reset() { *m = SCMProviderGeneratorBitbucket{} } func (*SCMProviderGeneratorBitbucket) ProtoMessage() {} func (*SCMProviderGeneratorBitbucket) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{132} + return fileDescriptor_030104ce3b95bcac, []int{133} } func (m *SCMProviderGeneratorBitbucket) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3768,7 +3796,7 @@ var xxx_messageInfo_SCMProviderGeneratorBitbucket proto.InternalMessageInfo func (m *SCMProviderGeneratorBitbucketServer) Reset() { *m = SCMProviderGeneratorBitbucketServer{} } func (*SCMProviderGeneratorBitbucketServer) ProtoMessage() {} func (*SCMProviderGeneratorBitbucketServer) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{133} + return fileDescriptor_030104ce3b95bcac, []int{134} } func (m *SCMProviderGeneratorBitbucketServer) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3796,7 +3824,7 @@ var xxx_messageInfo_SCMProviderGeneratorBitbucketServer proto.InternalMessageInf func (m *SCMProviderGeneratorFilter) Reset() { *m = SCMProviderGeneratorFilter{} } func (*SCMProviderGeneratorFilter) ProtoMessage() {} func (*SCMProviderGeneratorFilter) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{134} + return fileDescriptor_030104ce3b95bcac, []int{135} } func (m *SCMProviderGeneratorFilter) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3824,7 +3852,7 @@ var xxx_messageInfo_SCMProviderGeneratorFilter proto.InternalMessageInfo func (m *SCMProviderGeneratorGitea) Reset() { *m = SCMProviderGeneratorGitea{} } func (*SCMProviderGeneratorGitea) ProtoMessage() {} func (*SCMProviderGeneratorGitea) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{135} + return fileDescriptor_030104ce3b95bcac, []int{136} } func (m *SCMProviderGeneratorGitea) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3852,7 +3880,7 @@ var xxx_messageInfo_SCMProviderGeneratorGitea proto.InternalMessageInfo func (m *SCMProviderGeneratorGithub) Reset() { *m = SCMProviderGeneratorGithub{} } func (*SCMProviderGeneratorGithub) ProtoMessage() {} func (*SCMProviderGeneratorGithub) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{136} + return fileDescriptor_030104ce3b95bcac, []int{137} } func (m *SCMProviderGeneratorGithub) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3880,7 +3908,7 @@ var xxx_messageInfo_SCMProviderGeneratorGithub proto.InternalMessageInfo func (m *SCMProviderGeneratorGitlab) Reset() { *m = SCMProviderGeneratorGitlab{} } func (*SCMProviderGeneratorGitlab) ProtoMessage() {} func (*SCMProviderGeneratorGitlab) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{137} + return fileDescriptor_030104ce3b95bcac, []int{138} } func (m *SCMProviderGeneratorGitlab) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3908,7 +3936,7 @@ var xxx_messageInfo_SCMProviderGeneratorGitlab proto.InternalMessageInfo func (m *SecretRef) Reset() { *m = SecretRef{} } func (*SecretRef) ProtoMessage() {} func (*SecretRef) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{138} + return fileDescriptor_030104ce3b95bcac, []int{139} } func (m *SecretRef) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3936,7 +3964,7 @@ var xxx_messageInfo_SecretRef proto.InternalMessageInfo func (m *SignatureKey) Reset() { *m = SignatureKey{} } func (*SignatureKey) ProtoMessage() {} func (*SignatureKey) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{139} + return fileDescriptor_030104ce3b95bcac, []int{140} } func (m *SignatureKey) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3964,7 +3992,7 @@ var xxx_messageInfo_SignatureKey proto.InternalMessageInfo func (m *SyncOperation) Reset() { *m = SyncOperation{} } func (*SyncOperation) ProtoMessage() {} func (*SyncOperation) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{140} + return fileDescriptor_030104ce3b95bcac, []int{141} } func (m *SyncOperation) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3992,7 +4020,7 @@ var xxx_messageInfo_SyncOperation proto.InternalMessageInfo func (m *SyncOperationResource) Reset() { *m = SyncOperationResource{} } func (*SyncOperationResource) ProtoMessage() {} func (*SyncOperationResource) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{141} + return fileDescriptor_030104ce3b95bcac, []int{142} } func (m *SyncOperationResource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4020,7 +4048,7 @@ var xxx_messageInfo_SyncOperationResource proto.InternalMessageInfo func (m *SyncOperationResult) Reset() { *m = SyncOperationResult{} } func (*SyncOperationResult) ProtoMessage() {} func (*SyncOperationResult) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{142} + return fileDescriptor_030104ce3b95bcac, []int{143} } func (m *SyncOperationResult) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4048,7 +4076,7 @@ var xxx_messageInfo_SyncOperationResult proto.InternalMessageInfo func (m *SyncPolicy) Reset() { *m = SyncPolicy{} } func (*SyncPolicy) ProtoMessage() {} func (*SyncPolicy) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{143} + return fileDescriptor_030104ce3b95bcac, []int{144} } func (m *SyncPolicy) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4076,7 +4104,7 @@ var xxx_messageInfo_SyncPolicy proto.InternalMessageInfo func (m *SyncPolicyAutomated) Reset() { *m = SyncPolicyAutomated{} } func (*SyncPolicyAutomated) ProtoMessage() {} func (*SyncPolicyAutomated) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{144} + return fileDescriptor_030104ce3b95bcac, []int{145} } func (m *SyncPolicyAutomated) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4104,7 +4132,7 @@ var xxx_messageInfo_SyncPolicyAutomated proto.InternalMessageInfo func (m *SyncStatus) Reset() { *m = SyncStatus{} } func (*SyncStatus) ProtoMessage() {} func (*SyncStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{145} + return fileDescriptor_030104ce3b95bcac, []int{146} } func (m *SyncStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4132,7 +4160,7 @@ var xxx_messageInfo_SyncStatus proto.InternalMessageInfo func (m *SyncStrategy) Reset() { *m = SyncStrategy{} } func (*SyncStrategy) ProtoMessage() {} func (*SyncStrategy) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{146} + return fileDescriptor_030104ce3b95bcac, []int{147} } func (m *SyncStrategy) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4160,7 +4188,7 @@ var xxx_messageInfo_SyncStrategy proto.InternalMessageInfo func (m *SyncStrategyApply) Reset() { *m = SyncStrategyApply{} } func (*SyncStrategyApply) ProtoMessage() {} func (*SyncStrategyApply) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{147} + return fileDescriptor_030104ce3b95bcac, []int{148} } func (m *SyncStrategyApply) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4188,7 +4216,7 @@ var xxx_messageInfo_SyncStrategyApply proto.InternalMessageInfo func (m *SyncStrategyHook) Reset() { *m = SyncStrategyHook{} } func (*SyncStrategyHook) ProtoMessage() {} func (*SyncStrategyHook) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{148} + return fileDescriptor_030104ce3b95bcac, []int{149} } func (m *SyncStrategyHook) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4216,7 +4244,7 @@ var xxx_messageInfo_SyncStrategyHook proto.InternalMessageInfo func (m *SyncWindow) Reset() { *m = SyncWindow{} } func (*SyncWindow) ProtoMessage() {} func (*SyncWindow) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{149} + return fileDescriptor_030104ce3b95bcac, []int{150} } func (m *SyncWindow) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4244,7 +4272,7 @@ var xxx_messageInfo_SyncWindow proto.InternalMessageInfo func (m *TLSClientConfig) Reset() { *m = TLSClientConfig{} } func (*TLSClientConfig) ProtoMessage() {} func (*TLSClientConfig) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{150} + return fileDescriptor_030104ce3b95bcac, []int{151} } func (m *TLSClientConfig) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4272,7 +4300,7 @@ var xxx_messageInfo_TLSClientConfig proto.InternalMessageInfo func (m *TagFilter) Reset() { *m = TagFilter{} } func (*TagFilter) ProtoMessage() {} func (*TagFilter) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{151} + return fileDescriptor_030104ce3b95bcac, []int{152} } func (m *TagFilter) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4328,6 +4356,7 @@ func init() { proto.RegisterMapType((map[string]string)(nil), "github.zerozr99.workers.dev.argoproj.argo_cd.v2.pkg.apis.application.v1alpha1.ApplicationSetTemplateMeta.AnnotationsEntry") proto.RegisterMapType((map[string]string)(nil), "github.zerozr99.workers.dev.argoproj.argo_cd.v2.pkg.apis.application.v1alpha1.ApplicationSetTemplateMeta.LabelsEntry") proto.RegisterType((*ApplicationSetTerminalGenerator)(nil), "github.zerozr99.workers.dev.argoproj.argo_cd.v2.pkg.apis.application.v1alpha1.ApplicationSetTerminalGenerator") + proto.RegisterType((*ApplicationSetTree)(nil), "github.zerozr99.workers.dev.argoproj.argo_cd.v2.pkg.apis.application.v1alpha1.ApplicationSetTree") proto.RegisterType((*ApplicationSource)(nil), "github.zerozr99.workers.dev.argoproj.argo_cd.v2.pkg.apis.application.v1alpha1.ApplicationSource") proto.RegisterType((*ApplicationSourceDirectory)(nil), "github.zerozr99.workers.dev.argoproj.argo_cd.v2.pkg.apis.application.v1alpha1.ApplicationSourceDirectory") proto.RegisterType((*ApplicationSourceHelm)(nil), "github.zerozr99.workers.dev.argoproj.argo_cd.v2.pkg.apis.application.v1alpha1.ApplicationSourceHelm") @@ -4477,693 +4506,702 @@ func init() { } var fileDescriptor_030104ce3b95bcac = []byte{ - // 10965 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x7d, 0x6d, 0x70, 0x1c, 0xc9, - 0x75, 0x98, 0x66, 0x3f, 0x80, 0xdd, 0x07, 0x10, 0x24, 0x9b, 0xe4, 0x1d, 0x48, 0xdd, 0x1d, 0xe8, - 0x39, 0xfb, 0x74, 0x8e, 0xee, 0x00, 0x1f, 0x7d, 0xa7, 0x5c, 0x74, 0xb6, 0x64, 0x7c, 0x90, 0x20, - 0x48, 0x80, 0xc0, 0x35, 0x40, 0x52, 0x3a, 0xf9, 0x74, 0x1a, 0xec, 0x36, 0x16, 0x43, 0xcc, 0xce, - 0xcc, 0xcd, 0xcc, 0x82, 0xc0, 0x59, 0x92, 0x25, 0x4b, 0xb6, 0x95, 0xe8, 0xe3, 0x14, 0x29, 0x29, - 0x9f, 0x93, 0xc8, 0x91, 0x2d, 0x27, 0x15, 0x57, 0xa2, 0x8a, 0x93, 0xfc, 0x88, 0x13, 0x27, 0xe5, - 0xb2, 0x9d, 0x4a, 0x29, 0xa5, 0xa4, 0xec, 0x4a, 0xb9, 0x2c, 0x27, 0xb1, 0x11, 0x09, 0x29, 0x57, - 0x52, 0xa9, 0x8a, 0xab, 0x9c, 0xf8, 0x87, 0xc3, 0xe4, 0x47, 0xaa, 0xbf, 0x7b, 0x66, 0x67, 0x81, - 0x05, 0x30, 0x20, 0x29, 0xe5, 0xfe, 0xed, 0xf6, 0x7b, 0xf3, 0x5e, 0x4f, 0x4f, 0xf7, 0x7b, 0xaf, - 0x5f, 0xbf, 0xf7, 0x1a, 0xe6, 0x5b, 0x6e, 0xb2, 0xde, 0x59, 0x1d, 0x6f, 0x04, 0xed, 0x09, 0x27, - 0x6a, 0x05, 0x61, 0x14, 0xdc, 0x61, 0x3f, 0x9e, 0x6d, 0x34, 0x27, 0x36, 0x2f, 0x4d, 0x84, 0x1b, - 0xad, 0x09, 0x27, 0x74, 0xe3, 0x09, 0x27, 0x0c, 0x3d, 0xb7, 0xe1, 0x24, 0x6e, 0xe0, 0x4f, 0x6c, - 0x3e, 0xe7, 0x78, 0xe1, 0xba, 0xf3, 0xdc, 0x44, 0x8b, 0xf8, 0x24, 0x72, 0x12, 0xd2, 0x1c, 0x0f, - 0xa3, 0x20, 0x09, 0xd0, 0x8f, 0x68, 0x6a, 0xe3, 0x92, 0x1a, 0xfb, 0xf1, 0x5a, 0xa3, 0x39, 0xbe, - 0x79, 0x69, 0x3c, 0xdc, 0x68, 0x8d, 0x53, 0x6a, 0xe3, 0x06, 0xb5, 0x71, 0x49, 0xed, 0xc2, 0xb3, - 0x46, 0x5f, 0x5a, 0x41, 0x2b, 0x98, 0x60, 0x44, 0x57, 0x3b, 0x6b, 0xec, 0x1f, 0xfb, 0xc3, 0x7e, - 0x71, 0x66, 0x17, 0xec, 0x8d, 0x17, 0xe3, 0x71, 0x37, 0xa0, 0xdd, 0x9b, 0x68, 0x04, 0x11, 0x99, - 0xd8, 0xec, 0xea, 0xd0, 0x85, 0xab, 0x1a, 0x87, 0x6c, 0x25, 0xc4, 0x8f, 0xdd, 0xc0, 0x8f, 0x9f, - 0xa5, 0x5d, 0x20, 0xd1, 0x26, 0x89, 0xcc, 0xd7, 0x33, 0x10, 0xf2, 0x28, 0x3d, 0xaf, 0x29, 0xb5, - 0x9d, 0xc6, 0xba, 0xeb, 0x93, 0x68, 0x5b, 0x3f, 0xde, 0x26, 0x89, 0x93, 0xf7, 0xd4, 0x44, 0xaf, - 0xa7, 0xa2, 0x8e, 0x9f, 0xb8, 0x6d, 0xd2, 0xf5, 0xc0, 0x7b, 0xf6, 0x7b, 0x20, 0x6e, 0xac, 0x93, - 0xb6, 0xd3, 0xf5, 0xdc, 0x0f, 0xf7, 0x7a, 0xae, 0x93, 0xb8, 0xde, 0x84, 0xeb, 0x27, 0x71, 0x12, - 0x65, 0x1f, 0xb2, 0x5f, 0x87, 0x13, 0x93, 0xb7, 0x97, 0x27, 0x3b, 0xc9, 0xfa, 0x74, 0xe0, 0xaf, - 0xb9, 0x2d, 0xf4, 0x02, 0x0c, 0x35, 0xbc, 0x4e, 0x9c, 0x90, 0xe8, 0x86, 0xd3, 0x26, 0xa3, 0xd6, - 0x45, 0xeb, 0xe9, 0xfa, 0xd4, 0x99, 0x6f, 0xec, 0x8c, 0xbd, 0x63, 0x77, 0x67, 0x6c, 0x68, 0x5a, - 0x83, 0xb0, 0x89, 0x87, 0x7e, 0x10, 0x06, 0xa3, 0xc0, 0x23, 0x93, 0xf8, 0xc6, 0x68, 0x89, 0x3d, - 0x72, 0x52, 0x3c, 0x32, 0x88, 0x79, 0x33, 0x96, 0x70, 0xfb, 0xf7, 0x4b, 0x00, 0x93, 0x61, 0xb8, - 0x14, 0x05, 0x77, 0x48, 0x23, 0x41, 0x1f, 0x81, 0x1a, 0x1d, 0xba, 0xa6, 0x93, 0x38, 0x8c, 0xdb, - 0xd0, 0xa5, 0x1f, 0x1a, 0xe7, 0x6f, 0x32, 0x6e, 0xbe, 0x89, 0x9e, 0x38, 0x14, 0x7b, 0x7c, 0xf3, - 0xb9, 0xf1, 0xc5, 0x55, 0xfa, 0xfc, 0x02, 0x49, 0x9c, 0x29, 0x24, 0x98, 0x81, 0x6e, 0xc3, 0x8a, - 0x2a, 0xf2, 0xa1, 0x12, 0x87, 0xa4, 0xc1, 0x3a, 0x36, 0x74, 0x69, 0x7e, 0xfc, 0x28, 0x33, 0x74, - 0x5c, 0xf7, 0x7c, 0x39, 0x24, 0x8d, 0xa9, 0x61, 0xc1, 0xb9, 0x42, 0xff, 0x61, 0xc6, 0x07, 0x6d, - 0xc2, 0x40, 0x9c, 0x38, 0x49, 0x27, 0x1e, 0x2d, 0x33, 0x8e, 0x37, 0x0a, 0xe3, 0xc8, 0xa8, 0x4e, - 0x8d, 0x08, 0x9e, 0x03, 0xfc, 0x3f, 0x16, 0xdc, 0xec, 0x3f, 0xb2, 0x60, 0x44, 0x23, 0xcf, 0xbb, - 0x71, 0x82, 0x7e, 0xbc, 0x6b, 0x70, 0xc7, 0xfb, 0x1b, 0x5c, 0xfa, 0x34, 0x1b, 0xda, 0x53, 0x82, - 0x59, 0x4d, 0xb6, 0x18, 0x03, 0xdb, 0x86, 0xaa, 0x9b, 0x90, 0x76, 0x3c, 0x5a, 0xba, 0x58, 0x7e, - 0x7a, 0xe8, 0xd2, 0xd5, 0xa2, 0xde, 0x73, 0xea, 0x84, 0x60, 0x5a, 0x9d, 0xa3, 0xe4, 0x31, 0xe7, - 0x62, 0xff, 0xca, 0xb0, 0xf9, 0x7e, 0x74, 0xc0, 0xd1, 0x73, 0x30, 0x14, 0x07, 0x9d, 0xa8, 0x41, - 0x30, 0x09, 0x83, 0x78, 0xd4, 0xba, 0x58, 0xa6, 0x53, 0x8f, 0xce, 0xd4, 0x65, 0xdd, 0x8c, 0x4d, - 0x1c, 0xf4, 0x05, 0x0b, 0x86, 0x9b, 0x24, 0x4e, 0x5c, 0x9f, 0xf1, 0x97, 0x9d, 0x5f, 0x39, 0x72, - 0xe7, 0x65, 0xe3, 0x8c, 0x26, 0x3e, 0x75, 0x56, 0xbc, 0xc8, 0xb0, 0xd1, 0x18, 0xe3, 0x14, 0x7f, - 0xba, 0xe2, 0x9a, 0x24, 0x6e, 0x44, 0x6e, 0x48, 0xff, 0xb3, 0x39, 0x63, 0xac, 0xb8, 0x19, 0x0d, - 0xc2, 0x26, 0x1e, 0xf2, 0xa1, 0x4a, 0x57, 0x54, 0x3c, 0x5a, 0x61, 0xfd, 0x9f, 0x3b, 0x5a, 0xff, - 0xc5, 0xa0, 0xd2, 0xc5, 0xaa, 0x47, 0x9f, 0xfe, 0x8b, 0x31, 0x67, 0x83, 0x3e, 0x6f, 0xc1, 0xa8, - 0x58, 0xf1, 0x98, 0xf0, 0x01, 0xbd, 0xbd, 0xee, 0x26, 0xc4, 0x73, 0xe3, 0x64, 0xb4, 0xca, 0xfa, - 0x30, 0xd1, 0xdf, 0xdc, 0x9a, 0x8d, 0x82, 0x4e, 0x78, 0xdd, 0xf5, 0x9b, 0x53, 0x17, 0x05, 0xa7, - 0xd1, 0xe9, 0x1e, 0x84, 0x71, 0x4f, 0x96, 0xe8, 0xcb, 0x16, 0x5c, 0xf0, 0x9d, 0x36, 0x89, 0x43, - 0x87, 0x7e, 0x5a, 0x0e, 0x9e, 0xf2, 0x9c, 0xc6, 0x06, 0xeb, 0xd1, 0xc0, 0xe1, 0x7a, 0x64, 0x8b, - 0x1e, 0x5d, 0xb8, 0xd1, 0x93, 0x34, 0xde, 0x83, 0x2d, 0xfa, 0x9a, 0x05, 0xa7, 0x83, 0x28, 0x5c, - 0x77, 0x7c, 0xd2, 0x94, 0xd0, 0x78, 0x74, 0x90, 0x2d, 0xbd, 0x0f, 0x1f, 0xed, 0x13, 0x2d, 0x66, - 0xc9, 0x2e, 0x04, 0xbe, 0x9b, 0x04, 0xd1, 0x32, 0x49, 0x12, 0xd7, 0x6f, 0xc5, 0x53, 0xe7, 0x76, - 0x77, 0xc6, 0x4e, 0x77, 0x61, 0xe1, 0xee, 0xfe, 0xa0, 0x9f, 0x80, 0xa1, 0x78, 0xdb, 0x6f, 0xdc, - 0x76, 0xfd, 0x66, 0x70, 0x37, 0x1e, 0xad, 0x15, 0xb1, 0x7c, 0x97, 0x15, 0x41, 0xb1, 0x00, 0x35, - 0x03, 0x6c, 0x72, 0xcb, 0xff, 0x70, 0x7a, 0x2a, 0xd5, 0x8b, 0xfe, 0x70, 0x7a, 0x32, 0xed, 0xc1, - 0x16, 0xfd, 0xac, 0x05, 0x27, 0x62, 0xb7, 0xe5, 0x3b, 0x49, 0x27, 0x22, 0xd7, 0xc9, 0x76, 0x3c, - 0x0a, 0xac, 0x23, 0xd7, 0x8e, 0x38, 0x2a, 0x06, 0xc9, 0xa9, 0x73, 0xa2, 0x8f, 0x27, 0xcc, 0xd6, - 0x18, 0xa7, 0xf9, 0xe6, 0x2d, 0x34, 0x3d, 0xad, 0x87, 0x8a, 0x5d, 0x68, 0x7a, 0x52, 0xf7, 0x64, - 0x89, 0x7e, 0x0c, 0x4e, 0xf1, 0x26, 0x35, 0xb2, 0xf1, 0xe8, 0x30, 0x13, 0xb4, 0x67, 0x77, 0x77, - 0xc6, 0x4e, 0x2d, 0x67, 0x60, 0xb8, 0x0b, 0x1b, 0xbd, 0x0e, 0x63, 0x21, 0x89, 0xda, 0x6e, 0xb2, - 0xe8, 0x7b, 0xdb, 0x52, 0x7c, 0x37, 0x82, 0x90, 0x34, 0x45, 0x77, 0xe2, 0xd1, 0x13, 0x17, 0xad, - 0xa7, 0x6b, 0x53, 0xef, 0x12, 0xdd, 0x1c, 0x5b, 0xda, 0x1b, 0x1d, 0xef, 0x47, 0xcf, 0xfe, 0x37, - 0x25, 0x38, 0x95, 0x55, 0x9c, 0xe8, 0xef, 0x5a, 0x70, 0xf2, 0xce, 0xdd, 0x64, 0x25, 0xd8, 0x20, - 0x7e, 0x3c, 0xb5, 0x4d, 0xc5, 0x1b, 0x53, 0x19, 0x43, 0x97, 0x1a, 0xc5, 0xaa, 0xe8, 0xf1, 0x6b, - 0x69, 0x2e, 0x97, 0xfd, 0x24, 0xda, 0x9e, 0x7a, 0x54, 0xbc, 0xdd, 0xc9, 0x6b, 0xb7, 0x57, 0x4c, - 0x28, 0xce, 0x76, 0xea, 0xc2, 0x67, 0x2d, 0x38, 0x9b, 0x47, 0x02, 0x9d, 0x82, 0xf2, 0x06, 0xd9, - 0xe6, 0x56, 0x19, 0xa6, 0x3f, 0xd1, 0xab, 0x50, 0xdd, 0x74, 0xbc, 0x0e, 0x11, 0xd6, 0xcd, 0xec, - 0xd1, 0x5e, 0x44, 0xf5, 0x0c, 0x73, 0xaa, 0xef, 0x2d, 0xbd, 0x68, 0xd9, 0xbf, 0x53, 0x86, 0x21, - 0x43, 0xbf, 0xdd, 0x07, 0x8b, 0x2d, 0x48, 0x59, 0x6c, 0x0b, 0x85, 0xa9, 0xe6, 0x9e, 0x26, 0xdb, - 0xdd, 0x8c, 0xc9, 0xb6, 0x58, 0x1c, 0xcb, 0x3d, 0x6d, 0x36, 0x94, 0x40, 0x3d, 0x08, 0xa9, 0x45, - 0x4e, 0x55, 0x7f, 0xa5, 0x88, 0x4f, 0xb8, 0x28, 0xc9, 0x4d, 0x9d, 0xd8, 0xdd, 0x19, 0xab, 0xab, - 0xbf, 0x58, 0x33, 0xb2, 0xbf, 0x65, 0xc1, 0x59, 0xa3, 0x8f, 0xd3, 0x81, 0xdf, 0x74, 0xd9, 0xa7, - 0xbd, 0x08, 0x95, 0x64, 0x3b, 0x94, 0x66, 0xbf, 0x1a, 0xa9, 0x95, 0xed, 0x90, 0x60, 0x06, 0xa1, - 0x86, 0x7e, 0x9b, 0xc4, 0xb1, 0xd3, 0x22, 0x59, 0x43, 0x7f, 0x81, 0x37, 0x63, 0x09, 0x47, 0x11, - 0x20, 0xcf, 0x89, 0x93, 0x95, 0xc8, 0xf1, 0x63, 0x46, 0x7e, 0xc5, 0x6d, 0x13, 0x31, 0xc0, 0x7f, - 0xa1, 0xbf, 0x19, 0x43, 0x9f, 0x98, 0x7a, 0x64, 0x77, 0x67, 0x0c, 0xcd, 0x77, 0x51, 0xc2, 0x39, - 0xd4, 0xed, 0x2f, 0x5b, 0xf0, 0x48, 0xbe, 0x2d, 0x86, 0x9e, 0x82, 0x01, 0xbe, 0xe5, 0x13, 0x6f, - 0xa7, 0x3f, 0x09, 0x6b, 0xc5, 0x02, 0x8a, 0x26, 0xa0, 0xae, 0xf4, 0x84, 0x78, 0xc7, 0xd3, 0x02, - 0xb5, 0xae, 0x95, 0x8b, 0xc6, 0xa1, 0x83, 0x46, 0xff, 0x08, 0xcb, 0x4d, 0x0d, 0x1a, 0xdb, 0x24, - 0x31, 0x88, 0xfd, 0x9f, 0x2d, 0x38, 0x69, 0xf4, 0xea, 0x3e, 0x98, 0xe6, 0x7e, 0xda, 0x34, 0x9f, - 0x2b, 0x6c, 0x3e, 0xf7, 0xb0, 0xcd, 0x3f, 0x6f, 0xc1, 0x05, 0x03, 0x6b, 0xc1, 0x49, 0x1a, 0xeb, - 0x97, 0xb7, 0xc2, 0x88, 0xc4, 0x74, 0x3b, 0x8d, 0x1e, 0x37, 0xe4, 0xd6, 0xd4, 0x90, 0xa0, 0x50, - 0xbe, 0x4e, 0xb6, 0xb9, 0x10, 0x7b, 0x06, 0x6a, 0x7c, 0x72, 0x06, 0x91, 0x18, 0x71, 0xf5, 0x6e, - 0x8b, 0xa2, 0x1d, 0x2b, 0x0c, 0x64, 0xc3, 0x00, 0x13, 0x4e, 0x74, 0xb1, 0x52, 0x35, 0x04, 0xf4, - 0x23, 0xde, 0x62, 0x2d, 0x58, 0x40, 0xec, 0x38, 0xd5, 0x9d, 0xa5, 0x88, 0xb0, 0x8f, 0xdb, 0xbc, - 0xe2, 0x12, 0xaf, 0x19, 0xd3, 0x6d, 0x83, 0xe3, 0xfb, 0x41, 0x22, 0x76, 0x00, 0xc6, 0xb6, 0x61, - 0x52, 0x37, 0x63, 0x13, 0x87, 0x32, 0xf5, 0x9c, 0x55, 0xe2, 0xf1, 0x11, 0x15, 0x4c, 0xe7, 0x59, - 0x0b, 0x16, 0x10, 0x7b, 0xb7, 0xc4, 0x36, 0x28, 0x6a, 0xe9, 0x93, 0xfb, 0xb1, 0xbb, 0x8d, 0x52, - 0xb2, 0x72, 0xa9, 0x38, 0xc1, 0x45, 0x7a, 0xef, 0x70, 0xdf, 0xc8, 0x88, 0x4b, 0x5c, 0x28, 0xd7, - 0xbd, 0x77, 0xb9, 0xbf, 0x59, 0x82, 0xb1, 0xf4, 0x03, 0x5d, 0xd2, 0x96, 0x6e, 0xa9, 0x0c, 0x46, - 0x59, 0x27, 0x86, 0x81, 0x8f, 0x4d, 0xbc, 0x1e, 0x02, 0xab, 0x74, 0x9c, 0x02, 0xcb, 0x94, 0xa7, - 0xe5, 0x7d, 0xe4, 0xe9, 0x53, 0x6a, 0xd4, 0x2b, 0x19, 0x01, 0x96, 0xd6, 0x29, 0x17, 0xa1, 0x12, - 0x27, 0x24, 0x1c, 0xad, 0xa6, 0xe5, 0xd1, 0x72, 0x42, 0x42, 0xcc, 0x20, 0xf6, 0x7f, 0x2f, 0xc1, - 0xa3, 0xe9, 0x31, 0xd4, 0x2a, 0xe0, 0xfd, 0x29, 0x15, 0xf0, 0x6e, 0x53, 0x05, 0xdc, 0xdb, 0x19, - 0x7b, 0x67, 0x8f, 0xc7, 0xbe, 0x6b, 0x34, 0x04, 0x9a, 0xcd, 0x8c, 0xe2, 0x44, 0x7a, 0x14, 0xef, - 0xed, 0x8c, 0x3d, 0xde, 0xe3, 0x1d, 0x33, 0xc3, 0xfc, 0x14, 0x0c, 0x44, 0xc4, 0x89, 0x03, 0x5f, - 0x0c, 0xb4, 0xfa, 0x1c, 0x98, 0xb5, 0x62, 0x01, 0xb5, 0xff, 0x7d, 0x3d, 0x3b, 0xd8, 0xb3, 0xdc, - 0x09, 0x17, 0x44, 0xc8, 0x85, 0x0a, 0x33, 0xeb, 0xb9, 0x68, 0xb8, 0x7e, 0xb4, 0x65, 0x44, 0xd5, - 0x80, 0x22, 0x3d, 0x55, 0xa3, 0x5f, 0x8d, 0x36, 0x61, 0xc6, 0x02, 0x6d, 0x41, 0xad, 0x21, 0xad, - 0xed, 0x52, 0x11, 0x7e, 0x29, 0x61, 0x6b, 0x6b, 0x8e, 0xc3, 0x54, 0x5e, 0x2b, 0x13, 0x5d, 0x71, - 0x43, 0x04, 0xca, 0x2d, 0x37, 0x11, 0x9f, 0xf5, 0x88, 0xfb, 0xa9, 0x59, 0xd7, 0x78, 0xc5, 0x41, - 0xaa, 0x44, 0x66, 0xdd, 0x04, 0x53, 0xfa, 0xe8, 0xa7, 0x2d, 0x18, 0x8a, 0x1b, 0xed, 0xa5, 0x28, - 0xd8, 0x74, 0x9b, 0x24, 0x12, 0xd6, 0xd4, 0x11, 0x45, 0xd3, 0xf2, 0xf4, 0x82, 0x24, 0xa8, 0xf9, - 0xf2, 0xfd, 0xad, 0x86, 0x60, 0x93, 0x2f, 0xdd, 0x65, 0x3c, 0x2a, 0xde, 0x7d, 0x86, 0x34, 0x5c, - 0xaa, 0xff, 0xe4, 0xa6, 0x8a, 0xcd, 0x94, 0x23, 0x5b, 0x97, 0x33, 0x9d, 0xc6, 0x06, 0x5d, 0x6f, - 0xba, 0x43, 0xef, 0xdc, 0xdd, 0x19, 0x7b, 0x74, 0x3a, 0x9f, 0x27, 0xee, 0xd5, 0x19, 0x36, 0x60, - 0x61, 0xc7, 0xf3, 0x30, 0x79, 0xbd, 0x43, 0x98, 0xcb, 0xa4, 0x80, 0x01, 0x5b, 0xd2, 0x04, 0x33, - 0x03, 0x66, 0x40, 0xb0, 0xc9, 0x17, 0xbd, 0x0e, 0x03, 0x6d, 0x27, 0x89, 0xdc, 0x2d, 0xe1, 0x27, - 0x39, 0xa2, 0xbd, 0xbf, 0xc0, 0x68, 0x69, 0xe6, 0x4c, 0x53, 0xf3, 0x46, 0x2c, 0x18, 0xa1, 0x36, - 0x54, 0xdb, 0x24, 0x6a, 0x91, 0xd1, 0x5a, 0x11, 0x3e, 0xe1, 0x05, 0x4a, 0x4a, 0x33, 0xac, 0x53, - 0xeb, 0x88, 0xb5, 0x61, 0xce, 0x05, 0xbd, 0x0a, 0xb5, 0x98, 0x78, 0xa4, 0x41, 0xed, 0x9b, 0x3a, - 0xe3, 0xf8, 0xc3, 0x7d, 0xda, 0x7a, 0xd4, 0xb0, 0x58, 0x16, 0x8f, 0xf2, 0x05, 0x26, 0xff, 0x61, - 0x45, 0x92, 0x0e, 0x60, 0xe8, 0x75, 0x5a, 0xae, 0x3f, 0x0a, 0x45, 0x0c, 0xe0, 0x12, 0xa3, 0x95, - 0x19, 0x40, 0xde, 0x88, 0x05, 0x23, 0xfb, 0x8f, 0x2d, 0x40, 0x69, 0xa1, 0x76, 0x1f, 0x8c, 0xda, - 0xd7, 0xd3, 0x46, 0xed, 0x7c, 0x91, 0x56, 0x47, 0x0f, 0xbb, 0xf6, 0xd7, 0xeb, 0x90, 0x51, 0x07, - 0x37, 0x48, 0x9c, 0x90, 0xe6, 0xdb, 0x22, 0xfc, 0x6d, 0x11, 0xfe, 0xb6, 0x08, 0x57, 0x22, 0x7c, - 0x35, 0x23, 0xc2, 0xdf, 0x67, 0xac, 0x7a, 0x7d, 0xa8, 0xfa, 0x9a, 0x3a, 0x75, 0x35, 0x7b, 0x60, - 0x20, 0x50, 0x49, 0x70, 0x6d, 0x79, 0xf1, 0x46, 0xae, 0xcc, 0x7e, 0x2d, 0x2d, 0xb3, 0x8f, 0xca, - 0xe2, 0xff, 0x07, 0x29, 0xfd, 0xaf, 0x2d, 0x78, 0x57, 0x5a, 0x7a, 0xc9, 0x99, 0x33, 0xd7, 0xf2, - 0x83, 0x88, 0xcc, 0xb8, 0x6b, 0x6b, 0x24, 0x22, 0x7e, 0x83, 0xc4, 0xca, 0x8b, 0x61, 0xf5, 0xf2, - 0x62, 0xa0, 0xe7, 0x61, 0xf8, 0x4e, 0x1c, 0xf8, 0x4b, 0x81, 0xeb, 0x0b, 0x11, 0x44, 0x37, 0xc2, - 0xa7, 0x76, 0x77, 0xc6, 0x86, 0xe9, 0x88, 0xca, 0x76, 0x9c, 0xc2, 0x42, 0xd3, 0x70, 0xfa, 0xce, - 0xeb, 0x4b, 0x4e, 0x62, 0xb8, 0x03, 0xe4, 0xc6, 0x9d, 0x1d, 0x58, 0x5c, 0x7b, 0x39, 0x03, 0xc4, - 0xdd, 0xf8, 0xf6, 0xdf, 0x2c, 0xc1, 0xf9, 0xcc, 0x8b, 0x04, 0x9e, 0x17, 0x74, 0x12, 0xba, 0xa9, - 0x41, 0xbf, 0x60, 0xc1, 0xa9, 0x76, 0xda, 0xe3, 0x10, 0x0b, 0xc7, 0xee, 0x07, 0x0a, 0xd3, 0x11, - 0x19, 0x97, 0xc6, 0xd4, 0xa8, 0x18, 0xa1, 0x53, 0x19, 0x40, 0x8c, 0xbb, 0xfa, 0x82, 0x5e, 0x85, - 0x7a, 0xdb, 0xd9, 0xba, 0x19, 0x36, 0x9d, 0x44, 0xee, 0x27, 0x7b, 0xbb, 0x01, 0x3a, 0x89, 0xeb, - 0x8d, 0xf3, 0xe3, 0xfa, 0xf1, 0x39, 0x3f, 0x59, 0x8c, 0x96, 0x93, 0xc8, 0xf5, 0x5b, 0xdc, 0x9d, - 0xb7, 0x20, 0xc9, 0x60, 0x4d, 0xd1, 0xfe, 0x8a, 0x95, 0x55, 0x52, 0x6a, 0x74, 0x22, 0x27, 0x21, - 0xad, 0x6d, 0xf4, 0x51, 0xa8, 0xd2, 0x8d, 0x9f, 0x1c, 0x95, 0xdb, 0x45, 0x6a, 0x4e, 0xe3, 0x4b, - 0x68, 0x25, 0x4a, 0xff, 0xc5, 0x98, 0x33, 0xb5, 0xff, 0xb8, 0x96, 0x35, 0x16, 0xd8, 0xe1, 0xed, - 0x25, 0x80, 0x56, 0xb0, 0x42, 0xda, 0xa1, 0x47, 0x87, 0xc5, 0x62, 0x27, 0x00, 0xca, 0xd7, 0x31, - 0xab, 0x20, 0xd8, 0xc0, 0x42, 0x7f, 0xd9, 0x02, 0x68, 0xc9, 0x39, 0x2f, 0x0d, 0x81, 0x9b, 0x45, - 0xbe, 0x8e, 0x5e, 0x51, 0xba, 0x2f, 0x8a, 0x21, 0x36, 0x98, 0xa3, 0x9f, 0xb2, 0xa0, 0x96, 0xc8, - 0xee, 0x73, 0xd5, 0xb8, 0x52, 0x64, 0x4f, 0xe4, 0x4b, 0x6b, 0x9b, 0x48, 0x0d, 0x89, 0xe2, 0x8b, - 0x7e, 0xc6, 0x02, 0x88, 0xb7, 0xfd, 0xc6, 0x52, 0xe0, 0xb9, 0x8d, 0x6d, 0xa1, 0x31, 0x6f, 0x15, - 0xea, 0x8f, 0x51, 0xd4, 0xa7, 0x46, 0xe8, 0x68, 0xe8, 0xff, 0xd8, 0xe0, 0x8c, 0x3e, 0x0e, 0xb5, - 0x58, 0x4c, 0x37, 0xa1, 0x23, 0x57, 0x8a, 0xf5, 0x0a, 0x71, 0xda, 0x42, 0xbc, 0x8a, 0x7f, 0x58, - 0xf1, 0x44, 0x3f, 0x67, 0xc1, 0xc9, 0x30, 0xed, 0xe7, 0x13, 0xea, 0xb0, 0x38, 0x19, 0x90, 0xf1, - 0x23, 0x4e, 0x9d, 0xd9, 0xdd, 0x19, 0x3b, 0x99, 0x69, 0xc4, 0xd9, 0x5e, 0x50, 0x09, 0xa8, 0x67, - 0xf0, 0x62, 0xc8, 0x7d, 0x8e, 0x83, 0x5a, 0x02, 0xce, 0x66, 0x81, 0xb8, 0x1b, 0x1f, 0x2d, 0xc1, - 0x59, 0xda, 0xbb, 0x6d, 0x6e, 0x7e, 0x4a, 0xf5, 0x12, 0x33, 0x65, 0x58, 0x9b, 0x7a, 0x4c, 0xcc, - 0x10, 0xe6, 0xd5, 0xcf, 0xe2, 0xe0, 0xdc, 0x27, 0xd1, 0xef, 0x58, 0xf0, 0x98, 0xcb, 0xd4, 0x80, - 0xe9, 0x30, 0xd7, 0x1a, 0x41, 0x9c, 0xc4, 0x92, 0x42, 0x65, 0x45, 0x2f, 0xf5, 0x33, 0xf5, 0xfd, - 0xe2, 0x0d, 0x1e, 0x9b, 0xdb, 0xa3, 0x4b, 0x78, 0xcf, 0x0e, 0xdb, 0xdf, 0x2c, 0xa5, 0x8e, 0x35, - 0x94, 0x2f, 0x91, 0x49, 0x8d, 0x86, 0x74, 0xe3, 0x48, 0x21, 0x58, 0xa8, 0xd4, 0x50, 0x4e, 0x22, - 0x2d, 0x35, 0x54, 0x53, 0x8c, 0x0d, 0xe6, 0xd4, 0xb6, 0x3c, 0xed, 0x64, 0x3d, 0x96, 0x42, 0x90, - 0xbd, 0x5a, 0x64, 0x97, 0xba, 0x0f, 0xa1, 0xce, 0x8b, 0xae, 0x9d, 0xee, 0x02, 0xe1, 0xee, 0x2e, - 0xd9, 0xdf, 0x4c, 0x1f, 0xa5, 0x18, 0x6b, 0xb0, 0x8f, 0x63, 0xa2, 0x2f, 0x58, 0x30, 0x14, 0x05, - 0x9e, 0xe7, 0xfa, 0x2d, 0x2a, 0x2f, 0x84, 0xd2, 0xfb, 0xd0, 0xb1, 0xe8, 0x1d, 0x21, 0x18, 0x98, - 0x85, 0x8a, 0x35, 0x4f, 0x6c, 0x76, 0xc0, 0xfe, 0x23, 0x0b, 0x46, 0x7b, 0xc9, 0x35, 0x44, 0xe0, - 0x9d, 0x72, 0xd1, 0xaa, 0x20, 0x89, 0x45, 0x7f, 0x86, 0x78, 0x44, 0xf9, 0x8f, 0x6b, 0x53, 0x4f, - 0x8a, 0xd7, 0x7c, 0xe7, 0x52, 0x6f, 0x54, 0xbc, 0x17, 0x1d, 0xf4, 0x0a, 0x9c, 0x32, 0xde, 0x2b, - 0x56, 0x03, 0x53, 0x9f, 0x1a, 0xa7, 0x86, 0xc4, 0x64, 0x06, 0x76, 0x6f, 0x67, 0xec, 0x91, 0x6c, - 0x9b, 0x10, 0xbc, 0x5d, 0x74, 0xec, 0x5f, 0x2e, 0x65, 0xbf, 0x96, 0xd2, 0x99, 0x6f, 0x59, 0x5d, - 0xbb, 0xf2, 0x0f, 0x1c, 0x87, 0x9e, 0x62, 0xfb, 0x77, 0x15, 0x87, 0xd1, 0x1b, 0xe7, 0x01, 0x1e, - 0xf4, 0xda, 0xff, 0xb6, 0x02, 0x7b, 0xf4, 0xac, 0x0f, 0x23, 0xf8, 0xc0, 0xa7, 0x83, 0x9f, 0xb3, - 0xd4, 0xc9, 0x51, 0x99, 0x2d, 0xf2, 0xe6, 0x71, 0x8d, 0x3d, 0xdf, 0x87, 0xc4, 0x3c, 0xd8, 0x40, - 0x79, 0xa3, 0xd3, 0x67, 0x54, 0xe8, 0xab, 0x56, 0xfa, 0xec, 0x8b, 0x47, 0x8f, 0xb9, 0xc7, 0xd6, - 0x27, 0xe3, 0x40, 0x8d, 0x77, 0x4c, 0x1f, 0xc3, 0xf4, 0x3a, 0x6a, 0x1b, 0x07, 0x58, 0x73, 0x7d, - 0xc7, 0x73, 0xdf, 0xa0, 0xbb, 0x8c, 0x2a, 0x53, 0x94, 0xcc, 0xf2, 0xb8, 0xa2, 0x5a, 0xb1, 0x81, - 0x71, 0xe1, 0x2f, 0xc1, 0x90, 0xf1, 0xe6, 0x39, 0x31, 0x12, 0x67, 0xcd, 0x18, 0x89, 0xba, 0x11, - 0xda, 0x70, 0xe1, 0x7d, 0x70, 0x2a, 0xdb, 0xc1, 0x83, 0x3c, 0x6f, 0xff, 0xf9, 0x60, 0xf6, 0x30, - 0x6a, 0x85, 0x44, 0x6d, 0xda, 0xb5, 0xb7, 0x1d, 0x44, 0x6f, 0x3b, 0x88, 0xde, 0x76, 0x10, 0x99, - 0x3e, 0x7e, 0xe1, 0xfc, 0x18, 0xbc, 0x4f, 0xce, 0x8f, 0x94, 0x3b, 0xa7, 0x56, 0xb8, 0x3b, 0xc7, - 0xde, 0xad, 0x42, 0xca, 0x8e, 0xe2, 0xe3, 0xfd, 0x83, 0x30, 0x18, 0x91, 0x30, 0xb8, 0x89, 0xe7, - 0x85, 0x0e, 0xd1, 0x71, 0xf0, 0xbc, 0x19, 0x4b, 0x38, 0xd5, 0x35, 0xa1, 0x93, 0xac, 0x0b, 0x25, - 0xa2, 0x74, 0xcd, 0x92, 0x93, 0xac, 0x63, 0x06, 0x41, 0xef, 0x83, 0x91, 0xc4, 0x89, 0x5a, 0xd4, - 0x6c, 0xde, 0x64, 0x9f, 0x55, 0x1c, 0x59, 0x3e, 0x22, 0x70, 0x47, 0x56, 0x52, 0x50, 0x9c, 0xc1, - 0x46, 0xaf, 0x43, 0x65, 0x9d, 0x78, 0x6d, 0x31, 0xe4, 0xcb, 0xc5, 0xc9, 0x78, 0xf6, 0xae, 0x57, - 0x89, 0xd7, 0xe6, 0x12, 0x88, 0xfe, 0xc2, 0x8c, 0x15, 0x9d, 0x6f, 0xf5, 0x8d, 0x4e, 0x9c, 0x04, - 0x6d, 0xf7, 0x0d, 0xe9, 0xa9, 0xfb, 0x40, 0xc1, 0x8c, 0xaf, 0x4b, 0xfa, 0xdc, 0x25, 0xa2, 0xfe, - 0x62, 0xcd, 0x99, 0xf5, 0xa3, 0xe9, 0x46, 0xec, 0x53, 0x6d, 0x0b, 0x87, 0x5b, 0xd1, 0xfd, 0x98, - 0x91, 0xf4, 0x79, 0x3f, 0xd4, 0x5f, 0xac, 0x39, 0xa3, 0x6d, 0x35, 0xef, 0x87, 0x58, 0x1f, 0x6e, - 0x16, 0xdc, 0x07, 0x3e, 0xe7, 0x73, 0xe7, 0xff, 0x93, 0x50, 0x6d, 0xac, 0x3b, 0x51, 0x32, 0x3a, - 0xcc, 0x26, 0x8d, 0x72, 0xcd, 0x4c, 0xd3, 0x46, 0xcc, 0x61, 0xe8, 0x71, 0x28, 0x47, 0x64, 0x8d, - 0x85, 0x5f, 0x1a, 0x81, 0x39, 0x98, 0xac, 0x61, 0xda, 0x6e, 0xff, 0x62, 0x29, 0x6d, 0x2e, 0xa5, - 0xdf, 0x9b, 0xcf, 0xf6, 0x46, 0x27, 0x8a, 0xa5, 0xfb, 0xc6, 0x98, 0xed, 0xac, 0x19, 0x4b, 0x38, - 0xfa, 0xa4, 0x05, 0x83, 0x77, 0xe2, 0xc0, 0xf7, 0x49, 0x22, 0x54, 0xd3, 0xad, 0x82, 0x87, 0xe2, - 0x1a, 0xa7, 0xae, 0xfb, 0x20, 0x1a, 0xb0, 0xe4, 0x4b, 0xbb, 0x4b, 0xb6, 0x1a, 0x5e, 0xa7, 0xd9, - 0x15, 0x6b, 0x71, 0x99, 0x37, 0x63, 0x09, 0xa7, 0xa8, 0xae, 0xcf, 0x51, 0x2b, 0x69, 0xd4, 0x39, - 0x5f, 0xa0, 0x0a, 0xb8, 0xfd, 0xd7, 0x07, 0xe0, 0x5c, 0xee, 0xe2, 0xa0, 0x86, 0x0c, 0x33, 0x15, - 0xae, 0xb8, 0x1e, 0x91, 0x51, 0x46, 0xcc, 0x90, 0xb9, 0xa5, 0x5a, 0xb1, 0x81, 0x81, 0x7e, 0x12, - 0x20, 0x74, 0x22, 0xa7, 0x4d, 0x94, 0x7b, 0xf5, 0xc8, 0xf6, 0x02, 0xed, 0xc7, 0x92, 0xa4, 0xa9, - 0xf7, 0xa6, 0xaa, 0x29, 0xc6, 0x06, 0x4b, 0xf4, 0x02, 0x0c, 0x45, 0xc4, 0x23, 0x4e, 0xcc, 0xa2, - 0x77, 0xb3, 0xa9, 0x08, 0x58, 0x83, 0xb0, 0x89, 0x87, 0x9e, 0x52, 0x01, 0x59, 0x99, 0xc0, 0x94, - 0x74, 0x50, 0x16, 0x7a, 0xd3, 0x82, 0x91, 0x35, 0xd7, 0x23, 0x9a, 0xbb, 0x48, 0x1c, 0x58, 0x3c, - 0xfa, 0x4b, 0x5e, 0x31, 0xe9, 0x6a, 0x09, 0x99, 0x6a, 0x8e, 0x71, 0x86, 0x3d, 0xfd, 0xcc, 0x9b, - 0x24, 0x62, 0xa2, 0x75, 0x20, 0xfd, 0x99, 0x6f, 0xf1, 0x66, 0x2c, 0xe1, 0x68, 0x12, 0x4e, 0x86, - 0x4e, 0x1c, 0x4f, 0x47, 0xa4, 0x49, 0xfc, 0xc4, 0x75, 0x3c, 0x1e, 0xd6, 0x5f, 0xd3, 0x61, 0xbd, - 0x4b, 0x69, 0x30, 0xce, 0xe2, 0xa3, 0x0f, 0xc2, 0xa3, 0xdc, 0x7f, 0xb1, 0xe0, 0xc6, 0xb1, 0xeb, - 0xb7, 0xf4, 0x34, 0x10, 0x6e, 0x9c, 0x31, 0x41, 0xea, 0xd1, 0xb9, 0x7c, 0x34, 0xdc, 0xeb, 0x79, - 0xf4, 0x0c, 0xd4, 0xe2, 0x0d, 0x37, 0x9c, 0x8e, 0x9a, 0x31, 0x3b, 0xbb, 0xa8, 0x69, 0xa7, 0xe1, - 0xb2, 0x68, 0xc7, 0x0a, 0x03, 0x35, 0x60, 0x98, 0x7f, 0x12, 0x1e, 0x51, 0x26, 0xe4, 0xe3, 0xb3, - 0x3d, 0xd5, 0xa3, 0xc8, 0x3c, 0x1b, 0xc7, 0xce, 0xdd, 0xcb, 0xf2, 0x24, 0x85, 0x3b, 0xfe, 0x6f, - 0x19, 0x64, 0x70, 0x8a, 0xa8, 0xfd, 0xf3, 0xa5, 0xf4, 0x8e, 0xdb, 0x5c, 0xa4, 0x28, 0xa6, 0x4b, - 0x31, 0xb9, 0xe5, 0x44, 0xd2, 0x1b, 0x73, 0xc4, 0xec, 0x03, 0x41, 0xf7, 0x96, 0x13, 0x99, 0x8b, - 0x9a, 0x31, 0xc0, 0x92, 0x13, 0xba, 0x03, 0x95, 0xc4, 0x73, 0x0a, 0x4a, 0x57, 0x32, 0x38, 0x6a, - 0x07, 0xc8, 0xfc, 0x64, 0x8c, 0x19, 0x0f, 0xf4, 0x18, 0xb5, 0xfa, 0x57, 0xe5, 0x49, 0x87, 0x30, - 0xd4, 0x57, 0x63, 0xcc, 0x5a, 0xed, 0x3f, 0xaf, 0xe7, 0xc8, 0x55, 0xa5, 0xc8, 0xd0, 0x25, 0x00, - 0xba, 0x81, 0x5c, 0x8a, 0xc8, 0x9a, 0xbb, 0x25, 0x0c, 0x09, 0xb5, 0x76, 0x6f, 0x28, 0x08, 0x36, - 0xb0, 0xe4, 0x33, 0xcb, 0x9d, 0x35, 0xfa, 0x4c, 0xa9, 0xfb, 0x19, 0x0e, 0xc1, 0x06, 0x16, 0x7a, - 0x1e, 0x06, 0xdc, 0xb6, 0xd3, 0x52, 0x91, 0x94, 0x8f, 0xd1, 0x45, 0x3b, 0xc7, 0x5a, 0xee, 0xed, - 0x8c, 0x8d, 0xa8, 0x0e, 0xb1, 0x26, 0x2c, 0x70, 0xd1, 0x2f, 0x5b, 0x30, 0xdc, 0x08, 0xda, 0xed, - 0xc0, 0xe7, 0xdb, 0x2e, 0xb1, 0x87, 0xbc, 0x73, 0x5c, 0x6a, 0x7e, 0x7c, 0xda, 0x60, 0xc6, 0x37, - 0x91, 0x2a, 0xaf, 0xca, 0x04, 0xe1, 0x54, 0xaf, 0xcc, 0xb5, 0x5d, 0xdd, 0x67, 0x6d, 0xff, 0x9a, - 0x05, 0xa7, 0xf9, 0xb3, 0xc6, 0x6e, 0x50, 0xa4, 0x10, 0x05, 0xc7, 0xfc, 0x5a, 0x5d, 0x1b, 0x64, - 0xe5, 0xa5, 0xeb, 0x82, 0xe3, 0xee, 0x4e, 0xa2, 0x59, 0x38, 0xbd, 0x16, 0x44, 0x0d, 0x62, 0x0e, - 0x84, 0x10, 0x4c, 0x8a, 0xd0, 0x95, 0x2c, 0x02, 0xee, 0x7e, 0x06, 0xdd, 0x82, 0x47, 0x8c, 0x46, - 0x73, 0x1c, 0xb8, 0x6c, 0x7a, 0x42, 0x50, 0x7b, 0xe4, 0x4a, 0x2e, 0x16, 0xee, 0xf1, 0x74, 0xda, - 0x61, 0x52, 0xef, 0xc3, 0x61, 0xf2, 0x1a, 0x9c, 0x6f, 0x74, 0x8f, 0xcc, 0x66, 0xdc, 0x59, 0x8d, - 0xb9, 0xa4, 0xaa, 0x4d, 0x7d, 0x9f, 0x20, 0x70, 0x7e, 0xba, 0x17, 0x22, 0xee, 0x4d, 0x03, 0x7d, - 0x14, 0x6a, 0x11, 0x61, 0x5f, 0x25, 0x16, 0xf9, 0x34, 0x47, 0xdc, 0x25, 0x6b, 0x0b, 0x94, 0x93, - 0xd5, 0xb2, 0x57, 0x34, 0xc4, 0x58, 0x71, 0x44, 0x77, 0x61, 0x30, 0x74, 0x92, 0xc6, 0xba, 0xc8, - 0xa2, 0x39, 0x72, 0x18, 0x8b, 0x62, 0xbe, 0x44, 0xa9, 0xea, 0x49, 0xbe, 0xc4, 0x99, 0x60, 0xc9, - 0xed, 0xc2, 0xfb, 0xe1, 0x74, 0xd7, 0x42, 0x3a, 0x90, 0xb3, 0x64, 0x06, 0x1e, 0xc9, 0x9f, 0xb2, - 0x07, 0x72, 0x99, 0xfc, 0x93, 0x4c, 0xec, 0xa9, 0x61, 0xc6, 0xf6, 0xe1, 0x7e, 0x73, 0xa0, 0x4c, - 0xfc, 0x4d, 0x21, 0xc1, 0xaf, 0x1c, 0x6d, 0xe4, 0x2e, 0xfb, 0x9b, 0x7c, 0xc5, 0x31, 0x1f, 0xc3, - 0x65, 0x7f, 0x13, 0x53, 0xda, 0xe8, 0x4b, 0x56, 0xca, 0x0c, 0xe3, 0x4e, 0xbb, 0x0f, 0x1f, 0x8b, - 0xdd, 0xde, 0xb7, 0x65, 0x66, 0xff, 0xbb, 0x12, 0x5c, 0xdc, 0x8f, 0x48, 0x1f, 0xc3, 0xf7, 0x24, - 0x0c, 0xc4, 0xec, 0x34, 0x59, 0x88, 0xc4, 0x21, 0x3a, 0x53, 0xf8, 0xf9, 0xf2, 0x6b, 0x58, 0x80, - 0x90, 0x07, 0xe5, 0xb6, 0x13, 0x0a, 0x5f, 0xce, 0xdc, 0x51, 0xb3, 0x51, 0xe8, 0x7f, 0xc7, 0x5b, - 0x70, 0x42, 0xee, 0x21, 0x30, 0x1a, 0x30, 0x65, 0x83, 0x12, 0xa8, 0x3a, 0x51, 0xe4, 0xc8, 0xa3, - 0xcb, 0xeb, 0xc5, 0xf0, 0x9b, 0xa4, 0x24, 0xa7, 0x4e, 0xef, 0xee, 0x8c, 0x9d, 0x48, 0x35, 0x61, - 0xce, 0xcc, 0xfe, 0xdc, 0x60, 0x2a, 0x23, 0x83, 0x9d, 0x47, 0xc7, 0x30, 0x20, 0x5c, 0x38, 0x56, - 0xd1, 0x49, 0x40, 0x3c, 0xa5, 0x8e, 0xed, 0xd2, 0x44, 0x62, 0xb2, 0x60, 0x85, 0x3e, 0x6b, 0xb1, - 0xf4, 0x5f, 0x99, 0xa5, 0x22, 0xf6, 0x46, 0xc7, 0x93, 0x8d, 0x6c, 0x26, 0x15, 0xcb, 0x46, 0x6c, - 0x72, 0xa7, 0x3a, 0x33, 0xe4, 0x89, 0x6c, 0xd9, 0x1d, 0x92, 0x4c, 0x10, 0x96, 0x70, 0xb4, 0x95, - 0x73, 0xee, 0x5c, 0x40, 0x0a, 0x69, 0x1f, 0x27, 0xcd, 0x5f, 0xb5, 0xe0, 0xb4, 0x9b, 0x3d, 0x40, - 0x14, 0x3b, 0x89, 0x23, 0x46, 0x36, 0xf4, 0x3e, 0x9f, 0x54, 0xca, 0xb4, 0x0b, 0x84, 0xbb, 0x3b, - 0x83, 0x9a, 0x50, 0x71, 0xfd, 0xb5, 0x40, 0x98, 0x10, 0x53, 0x47, 0xeb, 0xd4, 0x9c, 0xbf, 0x16, - 0xe8, 0xd5, 0x4c, 0xff, 0x61, 0x46, 0x1d, 0xcd, 0xc3, 0xd9, 0x48, 0xf8, 0x7a, 0xae, 0xba, 0x31, - 0xdd, 0x91, 0xcf, 0xbb, 0x6d, 0x37, 0x61, 0xea, 0xbf, 0x3c, 0x35, 0xba, 0xbb, 0x33, 0x76, 0x16, - 0xe7, 0xc0, 0x71, 0xee, 0x53, 0xe8, 0x0d, 0x18, 0x94, 0xf9, 0xca, 0xb5, 0x22, 0x76, 0x65, 0xdd, - 0xf3, 0x5f, 0x4d, 0xa6, 0x65, 0x91, 0x9a, 0x2c, 0x19, 0xda, 0x6f, 0x0e, 0x41, 0xf7, 0xa1, 0x24, - 0xfa, 0x18, 0xd4, 0x23, 0x95, 0x43, 0x6d, 0x15, 0xa1, 0x2c, 0xe5, 0xf7, 0x15, 0x07, 0xa2, 0xca, - 0x10, 0xd1, 0xd9, 0xd2, 0x9a, 0x23, 0xdd, 0x2e, 0xc4, 0xfa, 0xec, 0xb2, 0x80, 0xb9, 0x2d, 0xb8, - 0xea, 0x73, 0xa9, 0x6d, 0xbf, 0x81, 0x19, 0x0f, 0x14, 0xc1, 0xc0, 0x3a, 0x71, 0xbc, 0x64, 0xbd, - 0x18, 0x17, 0xfa, 0x55, 0x46, 0x2b, 0x9b, 0x49, 0xc3, 0x5b, 0xb1, 0xe0, 0x84, 0xb6, 0x60, 0x70, - 0x9d, 0x4f, 0x00, 0x61, 0xc1, 0x2f, 0x1c, 0x75, 0x70, 0x53, 0xb3, 0x4a, 0x7f, 0x6e, 0xd1, 0x80, - 0x25, 0x3b, 0x16, 0xb4, 0x62, 0x9c, 0xc7, 0xf3, 0xa5, 0x5b, 0x5c, 0x12, 0x51, 0xff, 0x87, 0xf1, - 0x1f, 0x81, 0xe1, 0x88, 0x34, 0x02, 0xbf, 0xe1, 0x7a, 0xa4, 0x39, 0x29, 0xdd, 0xe3, 0x07, 0x49, - 0x3d, 0x61, 0xbb, 0x60, 0x6c, 0xd0, 0xc0, 0x29, 0x8a, 0xe8, 0x33, 0x16, 0x8c, 0xa8, 0xc4, 0x4b, - 0xfa, 0x41, 0x88, 0x70, 0xc7, 0xce, 0x17, 0x94, 0xe6, 0xc9, 0x68, 0x4e, 0xa1, 0xdd, 0x9d, 0xb1, - 0x91, 0x74, 0x1b, 0xce, 0xf0, 0x45, 0xaf, 0x00, 0x04, 0xab, 0x3c, 0x32, 0x65, 0x32, 0x11, 0xbe, - 0xd9, 0x83, 0xbc, 0xea, 0x08, 0xcf, 0x41, 0x93, 0x14, 0xb0, 0x41, 0x0d, 0x5d, 0x07, 0xe0, 0xcb, - 0x66, 0x65, 0x3b, 0x94, 0x66, 0xbe, 0xcc, 0x1d, 0x82, 0x65, 0x05, 0xb9, 0xb7, 0x33, 0xd6, 0xed, - 0x2b, 0x63, 0x61, 0x03, 0xc6, 0xe3, 0xe8, 0x27, 0x60, 0x30, 0xee, 0xb4, 0xdb, 0x8e, 0xf2, 0xdc, - 0x16, 0x98, 0xd5, 0xc6, 0xe9, 0x1a, 0xa2, 0x88, 0x37, 0x60, 0xc9, 0x11, 0xdd, 0xa1, 0x42, 0x35, - 0x16, 0x4e, 0x3c, 0xb6, 0x8a, 0xb8, 0x4d, 0x30, 0xc4, 0xde, 0xe9, 0x3d, 0x32, 0xd0, 0x06, 0xe7, - 0xe0, 0xdc, 0xdb, 0x19, 0x7b, 0x24, 0xdd, 0x3e, 0x1f, 0x88, 0x3c, 0xb3, 0x5c, 0x9a, 0xe8, 0x9a, - 0x2c, 0x5f, 0x42, 0x5f, 0x5b, 0x66, 0xd5, 0x3f, 0xad, 0xcb, 0x97, 0xb0, 0xe6, 0xde, 0x63, 0x66, - 0x3e, 0x8c, 0x16, 0xe0, 0x4c, 0x23, 0xf0, 0x93, 0x28, 0xf0, 0x3c, 0x5e, 0x93, 0x87, 0xef, 0xb8, - 0xb8, 0x67, 0xf7, 0x9d, 0xa2, 0xdb, 0x67, 0xa6, 0xbb, 0x51, 0x70, 0xde, 0x73, 0xb6, 0x9f, 0x0e, - 0xd9, 0x13, 0x83, 0xf3, 0x3c, 0x0c, 0x93, 0xad, 0x84, 0x44, 0xbe, 0xe3, 0xdd, 0xc4, 0xf3, 0xd2, - 0xa7, 0xc9, 0xd6, 0xc0, 0x65, 0xa3, 0x1d, 0xa7, 0xb0, 0x90, 0xad, 0xdc, 0x0c, 0x46, 0xee, 0x24, - 0x77, 0x33, 0x48, 0xa7, 0x82, 0xfd, 0xbf, 0x4b, 0x29, 0x83, 0x6c, 0x25, 0x22, 0x04, 0x05, 0x50, - 0xf5, 0x83, 0xa6, 0x92, 0xfd, 0xd7, 0x8a, 0x91, 0xfd, 0x37, 0x82, 0xa6, 0x51, 0xe3, 0x84, 0xfe, - 0x8b, 0x31, 0xe7, 0xc3, 0x8a, 0x40, 0xc8, 0x6a, 0x19, 0x0c, 0x20, 0x36, 0x1a, 0x45, 0x72, 0x56, - 0x45, 0x20, 0x16, 0x4d, 0x46, 0x38, 0xcd, 0x17, 0x6d, 0x40, 0x75, 0x3d, 0x88, 0x13, 0xb9, 0xfd, - 0x38, 0xe2, 0x4e, 0xe7, 0x6a, 0x10, 0x27, 0xcc, 0x8a, 0x50, 0xaf, 0x4d, 0x5b, 0x62, 0xcc, 0x79, - 0xd8, 0xff, 0xd5, 0x4a, 0x79, 0xb0, 0x6f, 0xb3, 0xf0, 0xd5, 0x4d, 0xe2, 0xd3, 0x65, 0x6d, 0x06, - 0xfa, 0xfc, 0xc5, 0x4c, 0x32, 0xe0, 0xbb, 0x7a, 0x55, 0x9c, 0xba, 0x4b, 0x29, 0x8c, 0x33, 0x12, - 0x46, 0x4c, 0xd0, 0x27, 0xac, 0x74, 0x5a, 0x66, 0xa9, 0x88, 0x0d, 0x86, 0x99, 0x9a, 0xbc, 0x6f, - 0x86, 0xa7, 0xfd, 0x25, 0x0b, 0x06, 0xa7, 0x9c, 0xc6, 0x46, 0xb0, 0xb6, 0x86, 0x9e, 0x81, 0x5a, - 0xb3, 0x13, 0x99, 0x19, 0xa2, 0x6a, 0xdb, 0x3e, 0x23, 0xda, 0xb1, 0xc2, 0xa0, 0x73, 0x78, 0xcd, - 0x69, 0xc8, 0x04, 0xe5, 0x32, 0x9f, 0xc3, 0x57, 0x58, 0x0b, 0x16, 0x10, 0xf4, 0x02, 0x0c, 0xb5, - 0x9d, 0x2d, 0xf9, 0x70, 0xd6, 0x7d, 0xbe, 0xa0, 0x41, 0xd8, 0xc4, 0xb3, 0xff, 0x95, 0x05, 0xa3, - 0x53, 0x4e, 0xec, 0x36, 0x26, 0x3b, 0xc9, 0xfa, 0x94, 0x9b, 0xac, 0x76, 0x1a, 0x1b, 0x24, 0xe1, - 0x59, 0xe9, 0xb4, 0x97, 0x9d, 0x98, 0x2e, 0x25, 0xb5, 0xaf, 0x53, 0xbd, 0xbc, 0x29, 0xda, 0xb1, - 0xc2, 0x40, 0x6f, 0xc0, 0x50, 0xe8, 0xc4, 0xf1, 0xdd, 0x20, 0x6a, 0x62, 0xb2, 0x56, 0x4c, 0x4d, - 0x88, 0x65, 0xd2, 0x88, 0x48, 0x82, 0xc9, 0x9a, 0x38, 0xe2, 0xd5, 0xf4, 0xb1, 0xc9, 0xcc, 0xfe, - 0x82, 0x05, 0xe7, 0xa7, 0x88, 0x13, 0x91, 0x88, 0x95, 0x90, 0x50, 0x2f, 0x32, 0xed, 0x05, 0x9d, - 0x26, 0x7a, 0x1d, 0x6a, 0x09, 0x6d, 0xa6, 0xdd, 0xb2, 0x8a, 0xed, 0x16, 0x3b, 0xa1, 0x5d, 0x11, - 0xc4, 0xb1, 0x62, 0x63, 0xff, 0x0d, 0x0b, 0x86, 0xd9, 0x61, 0xd7, 0x0c, 0x49, 0x1c, 0xd7, 0xeb, - 0xaa, 0xb4, 0x64, 0xf5, 0x59, 0x69, 0xe9, 0x22, 0x54, 0xd6, 0x83, 0x36, 0xc9, 0x1e, 0xd4, 0x5e, - 0x0d, 0xe8, 0xb6, 0x9a, 0x42, 0xd0, 0x73, 0xf4, 0xc3, 0xbb, 0x7e, 0xe2, 0xd0, 0x25, 0x20, 0x9d, - 0xa9, 0x27, 0xf9, 0x47, 0x57, 0xcd, 0xd8, 0xc4, 0xb1, 0x7f, 0xb3, 0x0e, 0x83, 0xe2, 0x34, 0xbf, - 0xef, 0xca, 0x04, 0x72, 0x7f, 0x5f, 0xea, 0xb9, 0xbf, 0x8f, 0x61, 0xa0, 0xc1, 0xea, 0xb8, 0x09, - 0x33, 0xf2, 0x7a, 0x21, 0xe1, 0x1f, 0xbc, 0x34, 0x9c, 0xee, 0x16, 0xff, 0x8f, 0x05, 0x2b, 0xf4, - 0x45, 0x0b, 0x4e, 0x36, 0x02, 0xdf, 0x27, 0x0d, 0x6d, 0xe3, 0x54, 0x8a, 0x38, 0xe5, 0x9f, 0x4e, - 0x13, 0xd5, 0x27, 0x2d, 0x19, 0x00, 0xce, 0xb2, 0x47, 0x2f, 0xc1, 0x09, 0x3e, 0x66, 0xb7, 0x52, - 0x1e, 0x60, 0x5d, 0x80, 0xc7, 0x04, 0xe2, 0x34, 0x2e, 0x1a, 0xe7, 0x9e, 0x74, 0x51, 0xea, 0x66, - 0x40, 0x1f, 0xdb, 0x19, 0x45, 0x6e, 0x0c, 0x0c, 0x14, 0x01, 0x8a, 0xc8, 0x5a, 0x44, 0xe2, 0x75, - 0x11, 0xed, 0xc0, 0xec, 0xab, 0xc1, 0xc3, 0x65, 0x31, 0xe3, 0x2e, 0x4a, 0x38, 0x87, 0x3a, 0xda, - 0x10, 0x1b, 0xcc, 0x5a, 0x11, 0x32, 0x54, 0x7c, 0xe6, 0x9e, 0xfb, 0xcc, 0x31, 0xa8, 0xc6, 0xeb, - 0x4e, 0xd4, 0x64, 0x76, 0x5d, 0x99, 0x67, 0xce, 0x2c, 0xd3, 0x06, 0xcc, 0xdb, 0xd1, 0x0c, 0x9c, - 0xca, 0x94, 0x0f, 0x8a, 0x85, 0xa7, 0x56, 0x65, 0x49, 0x64, 0x0a, 0x0f, 0xc5, 0xb8, 0xeb, 0x09, - 0xd3, 0xf9, 0x30, 0xb4, 0x8f, 0xf3, 0x61, 0x5b, 0xc5, 0xd4, 0x71, 0x1f, 0xea, 0xcb, 0x85, 0x0c, - 0x40, 0x5f, 0x01, 0x74, 0x9f, 0xcf, 0x04, 0xd0, 0x9d, 0x60, 0x1d, 0xb8, 0x55, 0x4c, 0x07, 0x0e, - 0x1e, 0x2d, 0xf7, 0x20, 0xa3, 0xdf, 0xfe, 0xcc, 0x02, 0xf9, 0x5d, 0xa7, 0x9d, 0xc6, 0x3a, 0xa1, - 0x53, 0x06, 0xbd, 0x0f, 0x46, 0xd4, 0x16, 0x7a, 0x3a, 0xe8, 0xf8, 0x3c, 0xf0, 0xad, 0xac, 0x8f, - 0x64, 0x71, 0x0a, 0x8a, 0x33, 0xd8, 0x68, 0x02, 0xea, 0x74, 0x9c, 0xf8, 0xa3, 0x5c, 0xd7, 0xaa, - 0x6d, 0xfa, 0xe4, 0xd2, 0x9c, 0x78, 0x4a, 0xe3, 0xa0, 0x00, 0x4e, 0x7b, 0x4e, 0x9c, 0xb0, 0x1e, - 0xd0, 0x1d, 0xf5, 0x21, 0x6b, 0x08, 0xb0, 0x50, 0xfc, 0xf9, 0x2c, 0x21, 0xdc, 0x4d, 0xdb, 0xfe, - 0x56, 0x05, 0x4e, 0xa4, 0x24, 0xe3, 0x01, 0x95, 0xf4, 0x33, 0x50, 0x93, 0x7a, 0x33, 0x5b, 0xed, - 0x44, 0x29, 0x57, 0x85, 0x41, 0x95, 0xd6, 0xaa, 0xd6, 0xaa, 0x59, 0xa3, 0xc2, 0x50, 0xb8, 0xd8, - 0xc4, 0x63, 0x42, 0x39, 0xf1, 0xe2, 0x69, 0xcf, 0x25, 0x7e, 0xc2, 0xbb, 0x59, 0x8c, 0x50, 0x5e, - 0x99, 0x5f, 0x36, 0x89, 0x6a, 0xa1, 0x9c, 0x01, 0xe0, 0x2c, 0x7b, 0xf4, 0x69, 0x0b, 0x4e, 0x38, - 0x77, 0x63, 0x5d, 0x6c, 0x54, 0x84, 0xca, 0x1d, 0x51, 0x49, 0xa5, 0xea, 0x97, 0x72, 0x97, 0x6f, - 0xaa, 0x09, 0xa7, 0x99, 0xa2, 0xb7, 0x2c, 0x40, 0x64, 0x8b, 0x34, 0x64, 0x30, 0x9f, 0xe8, 0xcb, - 0x40, 0x11, 0x3b, 0xcd, 0xcb, 0x5d, 0x74, 0xb9, 0x54, 0xef, 0x6e, 0xc7, 0x39, 0x7d, 0xb0, 0xff, - 0x79, 0x59, 0x2d, 0x28, 0x1d, 0x3f, 0xea, 0x18, 0x71, 0x6c, 0xd6, 0xe1, 0xe3, 0xd8, 0x74, 0x3c, - 0x40, 0x77, 0x6a, 0x62, 0x2a, 0x93, 0xa9, 0xf4, 0x80, 0x32, 0x99, 0x7e, 0xca, 0x4a, 0xd5, 0xf5, - 0x19, 0xba, 0xf4, 0x4a, 0xb1, 0xb1, 0xab, 0xe3, 0x3c, 0x56, 0x21, 0x23, 0xdd, 0xd3, 0x21, 0x2a, - 0x54, 0x9a, 0x1a, 0x68, 0x07, 0x92, 0x86, 0xff, 0xb1, 0x0c, 0x43, 0x86, 0x26, 0xcd, 0x35, 0x8b, - 0xac, 0x87, 0xcc, 0x2c, 0x2a, 0x1d, 0xc0, 0x2c, 0xfa, 0x49, 0xa8, 0x37, 0xa4, 0x94, 0x2f, 0xa6, - 0xb2, 0x6d, 0x56, 0x77, 0x68, 0x41, 0xaf, 0x9a, 0xb0, 0xe6, 0x89, 0x66, 0x53, 0x89, 0x33, 0x42, - 0x43, 0x54, 0x98, 0x86, 0xc8, 0xcb, 0x6c, 0x11, 0x9a, 0xa2, 0xfb, 0x19, 0x56, 0xfe, 0x29, 0x74, - 0xc5, 0x7b, 0xc9, 0x08, 0x73, 0x5e, 0xfe, 0x69, 0x69, 0x4e, 0x36, 0x63, 0x13, 0xc7, 0xfe, 0x96, - 0xa5, 0x3e, 0xee, 0x7d, 0x28, 0x74, 0x70, 0x27, 0x5d, 0xe8, 0xe0, 0x72, 0x21, 0xc3, 0xdc, 0xa3, - 0xc2, 0xc1, 0x0d, 0x18, 0x9c, 0x0e, 0xda, 0x6d, 0xc7, 0x6f, 0xa2, 0x1f, 0x80, 0xc1, 0x06, 0xff, - 0x29, 0x1c, 0x3b, 0xec, 0x78, 0x50, 0x40, 0xb1, 0x84, 0xa1, 0xc7, 0xa0, 0xe2, 0x44, 0x2d, 0xe9, - 0xcc, 0x61, 0xa1, 0x2d, 0x93, 0x51, 0x2b, 0xc6, 0xac, 0xd5, 0xfe, 0xc7, 0x15, 0x80, 0xe9, 0xa0, - 0x1d, 0x3a, 0x11, 0x69, 0xae, 0x04, 0xac, 0xb2, 0xde, 0xb1, 0x1e, 0xaa, 0xe9, 0xcd, 0xd2, 0xc3, - 0x7c, 0xb0, 0x66, 0x1c, 0xae, 0x94, 0xef, 0xf3, 0xe1, 0x4a, 0x8f, 0xf3, 0xb2, 0xca, 0x43, 0x74, - 0x5e, 0x66, 0x7f, 0xce, 0x02, 0x44, 0x27, 0x4d, 0xe0, 0x13, 0x3f, 0xd1, 0x07, 0xda, 0x13, 0x50, - 0x6f, 0xc8, 0x56, 0x61, 0x58, 0x69, 0x11, 0x21, 0x01, 0x58, 0xe3, 0xf4, 0xb1, 0x43, 0x7e, 0x52, - 0xca, 0xef, 0x72, 0x3a, 0x2a, 0x96, 0x49, 0x7d, 0x21, 0xce, 0xed, 0xdf, 0x2a, 0xc1, 0x23, 0x5c, - 0x25, 0x2f, 0x38, 0xbe, 0xd3, 0x22, 0x6d, 0xda, 0xab, 0x7e, 0x43, 0x14, 0x1a, 0x74, 0x6b, 0xe6, - 0xca, 0x28, 0xd7, 0xa3, 0xae, 0x5d, 0xbe, 0xe6, 0xf8, 0x2a, 0x9b, 0xf3, 0xdd, 0x04, 0x33, 0xe2, - 0x28, 0x86, 0x9a, 0x2c, 0xe5, 0x2e, 0x64, 0x71, 0x41, 0x8c, 0x94, 0x58, 0x12, 0x7a, 0x93, 0x60, - 0xc5, 0x88, 0x1a, 0xae, 0x5e, 0xd0, 0xd8, 0xc0, 0x24, 0x0c, 0x98, 0xdc, 0x35, 0x82, 0x0c, 0xe7, - 0x45, 0x3b, 0x56, 0x18, 0xf6, 0x6f, 0x59, 0x90, 0xd5, 0x48, 0x46, 0x09, 0x33, 0x6b, 0xcf, 0x12, - 0x66, 0x07, 0xa8, 0x21, 0xf6, 0xe3, 0x30, 0xe4, 0x24, 0xd4, 0x88, 0xe0, 0xdb, 0xee, 0xf2, 0xe1, - 0x8e, 0x35, 0x16, 0x82, 0xa6, 0xbb, 0xe6, 0xb2, 0xed, 0xb6, 0x49, 0xce, 0xfe, 0x9f, 0x15, 0x38, - 0xdd, 0x95, 0x8b, 0x81, 0x5e, 0x84, 0xe1, 0x86, 0x98, 0x1e, 0xa1, 0x74, 0x68, 0xd5, 0xcd, 0xa0, - 0x34, 0x0d, 0xc3, 0x29, 0xcc, 0x3e, 0x26, 0xe8, 0x1c, 0x9c, 0x89, 0xe8, 0x46, 0xbf, 0x43, 0x26, - 0xd7, 0x12, 0x12, 0x2d, 0x93, 0x46, 0xe0, 0x37, 0x79, 0xa1, 0xbd, 0xf2, 0xd4, 0xa3, 0xbb, 0x3b, - 0x63, 0x67, 0x70, 0x37, 0x18, 0xe7, 0x3d, 0x83, 0x42, 0x38, 0xe1, 0x99, 0x36, 0xa0, 0xd8, 0x00, - 0x1c, 0xca, 0x7c, 0x54, 0x36, 0x42, 0xaa, 0x19, 0xa7, 0x19, 0xa4, 0x0d, 0xc9, 0xea, 0x03, 0x32, - 0x24, 0x3f, 0xa5, 0x0d, 0x49, 0x7e, 0xfe, 0xfe, 0xa1, 0x82, 0x73, 0x71, 0x8e, 0xdb, 0x92, 0x7c, - 0x19, 0x6a, 0x32, 0x36, 0xa9, 0xaf, 0x98, 0x1e, 0x93, 0x4e, 0x0f, 0x89, 0xf6, 0x14, 0x7c, 0xff, - 0xe5, 0x28, 0x32, 0x06, 0xf3, 0x46, 0x90, 0x4c, 0x7a, 0x5e, 0x70, 0x97, 0x2a, 0xe9, 0x9b, 0x31, - 0x11, 0x1e, 0x16, 0xfb, 0x5e, 0x09, 0x72, 0x36, 0x2b, 0x74, 0x3d, 0x6a, 0xcb, 0x20, 0xb5, 0x1e, - 0x0f, 0x66, 0x1d, 0xa0, 0x2d, 0x1e, 0xbf, 0xc5, 0x75, 0xe0, 0x07, 0x8b, 0xde, 0x6c, 0xe9, 0x90, - 0x2e, 0x95, 0xca, 0xa0, 0xc2, 0xba, 0x2e, 0x01, 0x68, 0x83, 0x4e, 0x04, 0xaa, 0xab, 0xe3, 0x61, - 0x6d, 0xf7, 0x61, 0x03, 0x8b, 0xee, 0xbd, 0x5d, 0x3f, 0x4e, 0x1c, 0xcf, 0xbb, 0xea, 0xfa, 0x89, - 0x70, 0x22, 0x2a, 0x65, 0x3f, 0xa7, 0x41, 0xd8, 0xc4, 0xbb, 0xf0, 0x1e, 0xe3, 0xfb, 0x1d, 0xe4, - 0xbb, 0xaf, 0xc3, 0xf9, 0x59, 0x37, 0x51, 0xe9, 0x15, 0x6a, 0xbe, 0x51, 0x7b, 0x4d, 0xa5, 0x0b, - 0x59, 0x3d, 0xd3, 0x85, 0x8c, 0xf4, 0x86, 0x52, 0x3a, 0x1b, 0x23, 0x9b, 0xde, 0x60, 0xbf, 0x08, - 0x67, 0x67, 0xdd, 0xe4, 0x8a, 0xeb, 0x91, 0x03, 0x32, 0xb1, 0x7f, 0x63, 0x00, 0x86, 0xcd, 0x04, - 0xbd, 0x83, 0x64, 0x3c, 0x7d, 0x81, 0x9a, 0x64, 0xe2, 0xed, 0x5c, 0x75, 0xb8, 0x76, 0xfb, 0xc8, - 0xd9, 0x82, 0xf9, 0x23, 0x66, 0x58, 0x65, 0x9a, 0x27, 0x36, 0x3b, 0x80, 0xee, 0x42, 0x75, 0x8d, - 0x85, 0xdf, 0x97, 0x8b, 0x88, 0x40, 0xc8, 0x1b, 0x51, 0xbd, 0x1c, 0x79, 0x00, 0x3f, 0xe7, 0x47, - 0x35, 0x69, 0x94, 0xce, 0xe9, 0x32, 0x42, 0x46, 0x45, 0x36, 0x97, 0xc2, 0xe8, 0xa5, 0x12, 0xaa, - 0x87, 0x50, 0x09, 0x29, 0x01, 0x3d, 0xf0, 0x80, 0x04, 0x34, 0x4b, 0xa5, 0x48, 0xd6, 0x99, 0x9d, - 0x27, 0x62, 0xdc, 0x07, 0xd9, 0x20, 0x18, 0xa9, 0x14, 0x29, 0x30, 0xce, 0xe2, 0xa3, 0x8f, 0x2b, - 0x11, 0x5f, 0x2b, 0xc2, 0xff, 0x6a, 0xce, 0xe8, 0xe3, 0x96, 0xee, 0x9f, 0x2b, 0xc1, 0xc8, 0xac, - 0xdf, 0x59, 0x9a, 0x5d, 0xea, 0xac, 0x7a, 0x6e, 0xe3, 0x3a, 0xd9, 0xa6, 0x22, 0x7c, 0x83, 0x6c, - 0xcf, 0xcd, 0x88, 0x15, 0xa4, 0xe6, 0xcc, 0x75, 0xda, 0x88, 0x39, 0x8c, 0x0a, 0xa3, 0x35, 0xd7, - 0x6f, 0x91, 0x28, 0x8c, 0x5c, 0xe1, 0x1a, 0x35, 0x84, 0xd1, 0x15, 0x0d, 0xc2, 0x26, 0x1e, 0xa5, - 0x1d, 0xdc, 0xf5, 0x49, 0x94, 0x35, 0x78, 0x17, 0x69, 0x23, 0xe6, 0x30, 0x8a, 0x94, 0x44, 0x9d, - 0x38, 0x11, 0x93, 0x51, 0x21, 0xad, 0xd0, 0x46, 0xcc, 0x61, 0x74, 0xa5, 0xc7, 0x9d, 0x55, 0x16, - 0xe0, 0x91, 0x09, 0xa8, 0x5f, 0xe6, 0xcd, 0x58, 0xc2, 0x29, 0xea, 0x06, 0xd9, 0x9e, 0xa1, 0xbb, - 0xe3, 0x4c, 0x5e, 0xcd, 0x75, 0xde, 0x8c, 0x25, 0x9c, 0x55, 0x12, 0x4c, 0x0f, 0xc7, 0x77, 0x5d, - 0x25, 0xc1, 0x74, 0xf7, 0x7b, 0xec, 0xb3, 0x7f, 0xc9, 0x82, 0x61, 0x33, 0x2c, 0x0b, 0xb5, 0x32, - 0xb6, 0xf0, 0x62, 0x57, 0x21, 0xda, 0x1f, 0xcd, 0xbb, 0x99, 0xab, 0xe5, 0x26, 0x41, 0x18, 0x3f, - 0x4b, 0xfc, 0x96, 0xeb, 0x13, 0x76, 0xda, 0xce, 0xc3, 0xb9, 0x52, 0x31, 0x5f, 0xd3, 0x41, 0x93, - 0x1c, 0xc2, 0x98, 0xb6, 0x6f, 0xc3, 0xe9, 0xae, 0x64, 0xaa, 0x3e, 0x4c, 0x90, 0x7d, 0x53, 0x59, - 0x6d, 0x0c, 0x43, 0x94, 0xb0, 0xac, 0x66, 0x33, 0x0d, 0xa7, 0xf9, 0x42, 0xa2, 0x9c, 0x96, 0x1b, - 0xeb, 0xa4, 0xad, 0x12, 0xe4, 0x98, 0x1f, 0xfe, 0x56, 0x16, 0x88, 0xbb, 0xf1, 0xed, 0xcf, 0x5b, - 0x70, 0x22, 0x95, 0xdf, 0x56, 0x90, 0xb1, 0xc4, 0x56, 0x5a, 0xc0, 0xa2, 0x04, 0x59, 0xa8, 0x74, - 0x99, 0x29, 0x53, 0xbd, 0xd2, 0x34, 0x08, 0x9b, 0x78, 0xf6, 0x97, 0x4a, 0x50, 0x93, 0x91, 0x16, - 0x7d, 0x74, 0xe5, 0xb3, 0x16, 0x9c, 0x50, 0x67, 0x1f, 0xcc, 0xa9, 0x56, 0x2a, 0x22, 0x19, 0x81, - 0xf6, 0x40, 0x6d, 0xcb, 0xfd, 0xb5, 0x40, 0x5b, 0xee, 0xd8, 0x64, 0x86, 0xd3, 0xbc, 0xd1, 0x2d, - 0x80, 0x78, 0x3b, 0x4e, 0x48, 0xdb, 0x70, 0xef, 0xd9, 0xc6, 0x8a, 0x1b, 0x6f, 0x04, 0x11, 0xa1, - 0xeb, 0xeb, 0x46, 0xd0, 0x24, 0xcb, 0x0a, 0x53, 0x9b, 0x50, 0xba, 0x0d, 0x1b, 0x94, 0xec, 0x7f, - 0x58, 0x82, 0x53, 0xd9, 0x2e, 0xa1, 0x0f, 0xc1, 0xb0, 0xe4, 0x6e, 0xdc, 0x32, 0x26, 0xc3, 0x4b, - 0x86, 0xb1, 0x01, 0xbb, 0xb7, 0x33, 0x36, 0xd6, 0x7d, 0xcb, 0xdb, 0xb8, 0x89, 0x82, 0x53, 0xc4, - 0xf8, 0x01, 0x94, 0x38, 0x29, 0x9d, 0xda, 0x9e, 0x0c, 0x43, 0x71, 0x8a, 0x64, 0x1c, 0x40, 0x99, - 0x50, 0x9c, 0xc1, 0x46, 0x4b, 0x70, 0xd6, 0x68, 0xb9, 0x41, 0xdc, 0xd6, 0xfa, 0x6a, 0x10, 0xc9, - 0x1d, 0xd8, 0x63, 0x3a, 0x00, 0xac, 0x1b, 0x07, 0xe7, 0x3e, 0x49, 0xb5, 0x7d, 0xc3, 0x09, 0x9d, - 0x86, 0x9b, 0x6c, 0x0b, 0x7f, 0xa5, 0x92, 0x4d, 0xd3, 0xa2, 0x1d, 0x2b, 0x0c, 0x7b, 0x01, 0x2a, - 0x7d, 0xce, 0xa0, 0xbe, 0x2c, 0xff, 0x97, 0xa1, 0x46, 0xc9, 0x49, 0xf3, 0xae, 0x08, 0x92, 0x01, - 0xd4, 0xe4, 0x45, 0x21, 0xc8, 0x86, 0xb2, 0xeb, 0xc8, 0x33, 0x3e, 0xf5, 0x5a, 0x73, 0x71, 0xdc, - 0x61, 0x9b, 0x69, 0x0a, 0x44, 0x4f, 0x42, 0x99, 0x6c, 0x85, 0xd9, 0xc3, 0xbc, 0xcb, 0x5b, 0xa1, - 0x1b, 0x91, 0x98, 0x22, 0x91, 0xad, 0x10, 0x5d, 0x80, 0x92, 0xdb, 0x14, 0x4a, 0x0a, 0x04, 0x4e, - 0x69, 0x6e, 0x06, 0x97, 0xdc, 0xa6, 0xbd, 0x05, 0x75, 0x75, 0x33, 0x09, 0xda, 0x90, 0xb2, 0xdb, - 0x2a, 0x22, 0x34, 0x4a, 0xd2, 0xed, 0x21, 0xb5, 0x3b, 0x00, 0x3a, 0xd1, 0xaf, 0x28, 0xf9, 0x72, - 0x11, 0x2a, 0x8d, 0x40, 0x24, 0x21, 0xd7, 0x34, 0x19, 0x26, 0xb4, 0x19, 0xc4, 0xbe, 0x0d, 0x23, - 0xd7, 0xfd, 0xe0, 0x2e, 0x2b, 0xab, 0xce, 0xaa, 0x88, 0x51, 0xc2, 0x6b, 0xf4, 0x47, 0xd6, 0x44, - 0x60, 0x50, 0xcc, 0x61, 0xaa, 0x2e, 0x53, 0xa9, 0x57, 0x5d, 0x26, 0xfb, 0x13, 0x16, 0x0c, 0xab, - 0x8c, 0xa1, 0xd9, 0xcd, 0x0d, 0x4a, 0xb7, 0x15, 0x05, 0x9d, 0x30, 0x4b, 0x97, 0xdd, 0x1d, 0x84, - 0x39, 0xcc, 0x4c, 0xa5, 0x2b, 0xed, 0x93, 0x4a, 0x77, 0x11, 0x2a, 0x1b, 0xae, 0xdf, 0xcc, 0x5e, - 0x86, 0x71, 0xdd, 0xf5, 0x9b, 0x98, 0x41, 0x68, 0x17, 0x4e, 0xa9, 0x2e, 0x48, 0x85, 0xf0, 0x22, - 0x0c, 0xaf, 0x76, 0x5c, 0xaf, 0x29, 0xcb, 0xa3, 0x65, 0x3c, 0x2a, 0x53, 0x06, 0x0c, 0xa7, 0x30, - 0xe9, 0xbe, 0x6e, 0xd5, 0xf5, 0x9d, 0x68, 0x7b, 0x49, 0x6b, 0x20, 0x25, 0x94, 0xa6, 0x14, 0x04, - 0x1b, 0x58, 0xf6, 0x9b, 0x65, 0x18, 0x49, 0xe7, 0x4d, 0xf5, 0xb1, 0xbd, 0x7a, 0x12, 0xaa, 0x2c, - 0x95, 0x2a, 0xfb, 0x69, 0xd9, 0xf3, 0x98, 0xc3, 0x50, 0x0c, 0x03, 0xbc, 0x08, 0x43, 0x31, 0x17, - 0xc9, 0xa8, 0x4e, 0x2a, 0x3f, 0x0c, 0x8b, 0x3b, 0x13, 0x75, 0x1f, 0x04, 0x2b, 0xf4, 0x69, 0x0b, - 0x06, 0x83, 0xd0, 0xac, 0xe7, 0xf3, 0xc1, 0x22, 0x73, 0xca, 0x44, 0x52, 0x8d, 0xb0, 0x88, 0xd5, - 0xa7, 0x97, 0x9f, 0x43, 0xb2, 0xbe, 0xf0, 0x5e, 0x18, 0x36, 0x31, 0xf7, 0x33, 0x8a, 0x6b, 0xa6, - 0x51, 0xfc, 0x59, 0x73, 0x52, 0x88, 0xac, 0xb9, 0x3e, 0x96, 0xdb, 0x4d, 0xa8, 0x36, 0x54, 0xa0, - 0xc0, 0xa1, 0x8a, 0x6a, 0xaa, 0xaa, 0x08, 0xec, 0xb0, 0x88, 0x53, 0xb3, 0xbf, 0x65, 0x19, 0xf3, - 0x03, 0x93, 0x78, 0xae, 0x89, 0x22, 0x28, 0xb7, 0x36, 0x37, 0x84, 0x29, 0x7a, 0xad, 0xa0, 0xe1, - 0x9d, 0xdd, 0xdc, 0xd0, 0x73, 0xdc, 0x6c, 0xc5, 0x94, 0x59, 0x1f, 0xce, 0xc2, 0x54, 0x72, 0x65, - 0x79, 0xff, 0xe4, 0x4a, 0xfb, 0xad, 0x12, 0x9c, 0xee, 0x9a, 0x54, 0xe8, 0x0d, 0xa8, 0x46, 0xf4, - 0x2d, 0xc5, 0xeb, 0xcd, 0x17, 0x96, 0x0e, 0x19, 0xcf, 0x35, 0xb5, 0xde, 0x4d, 0xb7, 0x63, 0xce, - 0x12, 0x5d, 0x03, 0xa4, 0xc3, 0x59, 0x94, 0xa7, 0x92, 0xbf, 0xf2, 0x05, 0xf1, 0x28, 0x9a, 0xec, - 0xc2, 0xc0, 0x39, 0x4f, 0xa1, 0x97, 0xb2, 0x0e, 0xcf, 0x72, 0xfa, 0x7c, 0x73, 0x2f, 0xdf, 0xa5, - 0xfd, 0x2f, 0x4a, 0x70, 0x22, 0x55, 0x5e, 0x09, 0x79, 0x50, 0x23, 0x1e, 0x73, 0xfe, 0x4b, 0x65, - 0x73, 0xd4, 0xa2, 0xc3, 0x4a, 0x41, 0x5e, 0x16, 0x74, 0xb1, 0xe2, 0xf0, 0x70, 0x1c, 0xc2, 0xbf, - 0x08, 0xc3, 0xb2, 0x43, 0x1f, 0x74, 0xda, 0x9e, 0x18, 0x40, 0x35, 0x47, 0x2f, 0x1b, 0x30, 0x9c, - 0xc2, 0xb4, 0x7f, 0xbb, 0x0c, 0xa3, 0xfc, 0xb4, 0xa4, 0xa9, 0x66, 0xde, 0x82, 0xdc, 0x6f, 0xfd, - 0x15, 0x5d, 0x04, 0x8d, 0x0f, 0xe4, 0xea, 0x51, 0x6b, 0xfc, 0xe7, 0x33, 0xea, 0x2b, 0x82, 0xeb, - 0x17, 0x32, 0x11, 0x5c, 0xdc, 0xec, 0x6e, 0x1d, 0x53, 0x8f, 0xbe, 0xbb, 0x42, 0xba, 0xfe, 0x5e, - 0x09, 0x4e, 0x66, 0x2e, 0x50, 0x40, 0x6f, 0xa6, 0x6b, 0xee, 0x5a, 0x45, 0xf8, 0xd4, 0xf7, 0xac, - 0xa9, 0x7f, 0xb0, 0xca, 0xbb, 0x0f, 0x68, 0xa9, 0xd8, 0xbf, 0x57, 0x82, 0x91, 0xf4, 0xcd, 0x0f, - 0x0f, 0xe1, 0x48, 0xbd, 0x1b, 0xea, 0xac, 0xb8, 0x39, 0xbb, 0xd1, 0x92, 0xbb, 0xe4, 0x79, 0x1d, - 0x69, 0xd9, 0x88, 0x35, 0xfc, 0xa1, 0x28, 0x68, 0x6c, 0xff, 0x7d, 0x0b, 0xce, 0xf1, 0xb7, 0xcc, - 0xce, 0xc3, 0xbf, 0x9a, 0x37, 0xba, 0xaf, 0x16, 0xdb, 0xc1, 0x4c, 0xf1, 0xbe, 0xfd, 0xc6, 0x97, - 0xdd, 0xa4, 0x27, 0x7a, 0x9b, 0x9e, 0x0a, 0x0f, 0x61, 0x67, 0x0f, 0x34, 0x19, 0xec, 0xdf, 0x2b, - 0x83, 0xbe, 0x3c, 0x10, 0xb9, 0x22, 0x17, 0xb2, 0x90, 0x22, 0x86, 0xcb, 0xdb, 0x7e, 0x43, 0x5f, - 0x53, 0x58, 0xcb, 0xa4, 0x42, 0xfe, 0xac, 0x05, 0x43, 0xae, 0xef, 0x26, 0xae, 0xc3, 0xb6, 0xd1, - 0xc5, 0x5c, 0x6c, 0xa6, 0xd8, 0xcd, 0x71, 0xca, 0x41, 0x64, 0x9e, 0xe3, 0x28, 0x66, 0xd8, 0xe4, - 0x8c, 0x3e, 0x22, 0x82, 0xac, 0xcb, 0x85, 0x65, 0xf1, 0xd6, 0x32, 0x91, 0xd5, 0x21, 0x35, 0xbc, - 0x92, 0xa8, 0xa0, 0xe4, 0x77, 0x4c, 0x49, 0xa9, 0x7a, 0xb8, 0xfa, 0x1a, 0x67, 0xda, 0x8c, 0x39, - 0x23, 0x3b, 0x06, 0xd4, 0x3d, 0x16, 0x07, 0x0c, 0x60, 0x9d, 0x80, 0xba, 0xd3, 0x49, 0x82, 0x36, - 0x1d, 0x26, 0x71, 0xd4, 0xa4, 0x43, 0x74, 0x25, 0x00, 0x6b, 0x1c, 0xfb, 0xcd, 0x2a, 0x64, 0x92, - 0x13, 0xd1, 0x96, 0x79, 0xf1, 0xa5, 0x55, 0xec, 0xc5, 0x97, 0xaa, 0x33, 0x79, 0x97, 0x5f, 0xa2, - 0x16, 0x54, 0xc3, 0x75, 0x27, 0x96, 0x66, 0xf5, 0xcb, 0x6a, 0x1f, 0x47, 0x1b, 0xef, 0xed, 0x8c, - 0xfd, 0x58, 0x7f, 0x5e, 0x57, 0x3a, 0x57, 0x27, 0x78, 0x91, 0x11, 0xcd, 0x9a, 0xd1, 0xc0, 0x9c, - 0xfe, 0x41, 0xae, 0x76, 0xfb, 0xa4, 0xa8, 0xe2, 0x8e, 0x49, 0xdc, 0xf1, 0x12, 0x31, 0x1b, 0x5e, - 0x2e, 0x70, 0x95, 0x71, 0xc2, 0x3a, 0xad, 0x9e, 0xff, 0xc7, 0x06, 0x53, 0xf4, 0x21, 0xa8, 0xc7, - 0x89, 0x13, 0x25, 0x87, 0x4c, 0x84, 0x55, 0x83, 0xbe, 0x2c, 0x89, 0x60, 0x4d, 0x0f, 0xbd, 0xc2, - 0x6a, 0xba, 0xba, 0xf1, 0xfa, 0x21, 0x73, 0x23, 0x64, 0xfd, 0x57, 0x41, 0x01, 0x1b, 0xd4, 0xd0, - 0x25, 0x00, 0x36, 0xb7, 0x79, 0x40, 0x60, 0x8d, 0x79, 0x99, 0x94, 0x28, 0xc4, 0x0a, 0x82, 0x0d, - 0x2c, 0xfb, 0x87, 0x20, 0x5d, 0x17, 0x02, 0x8d, 0xc9, 0x32, 0x14, 0xdc, 0x0b, 0xcd, 0x72, 0x1c, - 0x52, 0x15, 0x23, 0x7e, 0xcd, 0x02, 0xb3, 0x78, 0x05, 0x7a, 0x9d, 0x57, 0xc9, 0xb0, 0x8a, 0x38, - 0x39, 0x34, 0xe8, 0x8e, 0x2f, 0x38, 0x61, 0xe6, 0x08, 0x5b, 0x96, 0xca, 0xb8, 0xf0, 0x1e, 0xa8, - 0x49, 0xe8, 0x81, 0x8c, 0xba, 0x8f, 0xc3, 0x99, 0xec, 0xb5, 0xe0, 0xe2, 0xd4, 0x69, 0x7f, 0xd7, - 0x8f, 0xf4, 0xe7, 0x94, 0x7a, 0xf9, 0x73, 0xfa, 0xb8, 0xfe, 0xf4, 0xd7, 0x2d, 0xb8, 0xb8, 0xdf, - 0xed, 0xe5, 0xe8, 0x31, 0xa8, 0xdc, 0x75, 0x22, 0x59, 0x6c, 0x9b, 0x09, 0xca, 0xdb, 0x4e, 0xe4, - 0x63, 0xd6, 0x8a, 0xb6, 0x61, 0x80, 0x47, 0x8d, 0x09, 0x6b, 0xfd, 0xe5, 0x62, 0xef, 0x52, 0xbf, - 0x4e, 0x8c, 0xed, 0x02, 0x8f, 0x58, 0xc3, 0x82, 0xa1, 0xfd, 0x6d, 0x0b, 0xd0, 0xe2, 0x26, 0x89, - 0x22, 0xb7, 0x69, 0xc4, 0xb9, 0xb1, 0xdb, 0x50, 0x8c, 0x5b, 0x4f, 0xcc, 0x54, 0xd8, 0xcc, 0x6d, - 0x28, 0xc6, 0xbf, 0xfc, 0xdb, 0x50, 0x4a, 0x07, 0xbb, 0x0d, 0x05, 0x2d, 0xc2, 0xb9, 0x36, 0xdf, - 0x6e, 0xf0, 0x1b, 0x06, 0xf8, 0xde, 0x43, 0x25, 0x9e, 0x9d, 0xdf, 0xdd, 0x19, 0x3b, 0xb7, 0x90, - 0x87, 0x80, 0xf3, 0x9f, 0xb3, 0xdf, 0x03, 0x88, 0x87, 0xb7, 0x4d, 0xe7, 0xc5, 0x2a, 0xf5, 0x74, - 0xbf, 0xd8, 0x5f, 0xa9, 0xc2, 0xc9, 0x4c, 0x29, 0x56, 0xba, 0xd5, 0xeb, 0x0e, 0x8e, 0x3a, 0xb2, - 0xfe, 0xee, 0xee, 0x5e, 0x5f, 0xe1, 0x56, 0x3e, 0x54, 0x5d, 0x3f, 0xec, 0x24, 0xc5, 0xe4, 0x9a, - 0xf2, 0x4e, 0xcc, 0x51, 0x82, 0x86, 0xbb, 0x98, 0xfe, 0xc5, 0x9c, 0x4d, 0x91, 0xc1, 0x5b, 0x29, - 0x63, 0xbc, 0xf2, 0x80, 0xdc, 0x01, 0x9f, 0xd4, 0xa1, 0x54, 0xd5, 0x22, 0x1c, 0x8b, 0x99, 0xc9, - 0x72, 0xdc, 0x47, 0xed, 0xbf, 0x5a, 0x82, 0x21, 0xe3, 0xa3, 0xa1, 0x5f, 0x4c, 0x97, 0x76, 0xb2, - 0x8a, 0x7b, 0x25, 0x46, 0x7f, 0x5c, 0x17, 0x6f, 0xe2, 0xaf, 0xf4, 0x54, 0x77, 0x55, 0xa7, 0x7b, - 0x3b, 0x63, 0xa7, 0x32, 0x75, 0x9b, 0x52, 0x95, 0x9e, 0x2e, 0x7c, 0x0c, 0x4e, 0x66, 0xc8, 0xe4, - 0xbc, 0xf2, 0x4a, 0xfa, 0xd6, 0xf7, 0x23, 0xba, 0xa5, 0xcc, 0x21, 0xfb, 0x3a, 0x1d, 0x32, 0x91, - 0x6e, 0x17, 0x78, 0xa4, 0x0f, 0x1f, 0x6c, 0x26, 0xab, 0xb6, 0xd4, 0x67, 0x56, 0xed, 0xd3, 0x50, - 0x0b, 0x03, 0xcf, 0x6d, 0xb8, 0xaa, 0xfa, 0x20, 0xcb, 0xe3, 0x5d, 0x12, 0x6d, 0x58, 0x41, 0xd1, - 0x5d, 0xa8, 0xab, 0x0b, 0xf2, 0x85, 0x7f, 0xbb, 0xa8, 0x43, 0x1f, 0x65, 0xb4, 0xe8, 0x8b, 0xef, - 0x35, 0x2f, 0x64, 0xc3, 0x00, 0x53, 0x82, 0x32, 0x45, 0x80, 0xf9, 0xde, 0x99, 0x76, 0x8c, 0xb1, - 0x80, 0xd8, 0x5f, 0xab, 0xc3, 0xd9, 0xbc, 0x7a, 0xd8, 0xe8, 0xa3, 0x30, 0xc0, 0xfb, 0x58, 0xcc, - 0x95, 0x0b, 0x79, 0x3c, 0x66, 0x19, 0x41, 0xd1, 0x2d, 0xf6, 0x1b, 0x0b, 0x9e, 0x82, 0xbb, 0xe7, - 0xac, 0x8a, 0x19, 0x72, 0x3c, 0xdc, 0xe7, 0x1d, 0xcd, 0x7d, 0xde, 0xe1, 0xdc, 0x3d, 0x67, 0x15, - 0x6d, 0x41, 0xb5, 0xe5, 0x26, 0xc4, 0x11, 0x4e, 0x84, 0xdb, 0xc7, 0xc2, 0x9c, 0x38, 0xdc, 0x4a, - 0x63, 0x3f, 0x31, 0x67, 0x88, 0xbe, 0x6a, 0xc1, 0xc9, 0xd5, 0x74, 0x0a, 0xbd, 0x10, 0x9e, 0xce, - 0x31, 0xd4, 0x3c, 0x4f, 0x33, 0xe2, 0xd7, 0x01, 0x65, 0x1a, 0x71, 0xb6, 0x3b, 0xe8, 0x53, 0x16, - 0x0c, 0xae, 0xb9, 0x9e, 0x51, 0xfe, 0xf6, 0x18, 0x3e, 0xce, 0x15, 0xc6, 0x40, 0xef, 0x38, 0xf8, - 0xff, 0x18, 0x4b, 0xce, 0xbd, 0x34, 0xd5, 0xc0, 0x51, 0x35, 0xd5, 0xe0, 0x03, 0xd2, 0x54, 0x9f, - 0xb1, 0xa0, 0xae, 0x46, 0x5a, 0xa4, 0x45, 0x7f, 0xe8, 0x18, 0x3f, 0x39, 0xf7, 0x9c, 0xa8, 0xbf, - 0x58, 0x33, 0x47, 0x5f, 0xb4, 0x60, 0xc8, 0x79, 0xa3, 0x13, 0x91, 0x26, 0xd9, 0x0c, 0xc2, 0x58, - 0xdc, 0x25, 0xf8, 0x6a, 0xf1, 0x9d, 0x99, 0xa4, 0x4c, 0x66, 0xc8, 0xe6, 0x62, 0x18, 0x8b, 0xf4, - 0x25, 0xdd, 0x80, 0xcd, 0x2e, 0xd8, 0x3b, 0x25, 0x18, 0xdb, 0x87, 0x02, 0x7a, 0x11, 0x86, 0x83, - 0xa8, 0xe5, 0xf8, 0xee, 0x1b, 0x66, 0x4d, 0x0c, 0x65, 0x65, 0x2d, 0x1a, 0x30, 0x9c, 0xc2, 0x34, - 0x13, 0xb7, 0x4b, 0xfb, 0x24, 0x6e, 0x5f, 0x84, 0x4a, 0x44, 0xc2, 0x20, 0xbb, 0x59, 0x60, 0xa9, - 0x03, 0x0c, 0x82, 0x1e, 0x87, 0xb2, 0x13, 0xba, 0x22, 0x10, 0x4d, 0xed, 0x81, 0x26, 0x97, 0xe6, - 0x30, 0x6d, 0x4f, 0xd5, 0x91, 0xa8, 0xde, 0x97, 0x3a, 0x12, 0xc6, 0xd5, 0xff, 0x03, 0x3d, 0xaf, - 0xfe, 0x7f, 0xab, 0x0c, 0x8f, 0xef, 0x39, 0x5f, 0x74, 0x1c, 0x9e, 0xb5, 0x47, 0x1c, 0x9e, 0x1c, - 0x9e, 0xd2, 0x7e, 0xc3, 0x53, 0xee, 0x31, 0x3c, 0x9f, 0xa2, 0xcb, 0x40, 0xd6, 0x12, 0x29, 0xe6, - 0x36, 0xb8, 0x5e, 0xa5, 0x49, 0xc4, 0x0a, 0x90, 0x50, 0xac, 0xf9, 0xd2, 0x3d, 0x40, 0x2a, 0x69, - 0xb9, 0x5a, 0x84, 0x1a, 0xe8, 0x59, 0x5b, 0x84, 0xcf, 0xfd, 0x5e, 0x99, 0xd0, 0xf6, 0xcf, 0x95, - 0xe0, 0xc9, 0x3e, 0xa4, 0xb7, 0x39, 0x8b, 0xad, 0x3e, 0x67, 0xf1, 0x77, 0xf7, 0x67, 0xb2, 0xff, - 0x9a, 0x05, 0x17, 0x7a, 0x2b, 0x0f, 0xf4, 0x1c, 0x0c, 0xad, 0x46, 0x8e, 0xdf, 0x58, 0x67, 0x37, - 0x5c, 0xca, 0x41, 0x61, 0x63, 0xad, 0x9b, 0xb1, 0x89, 0x43, 0xb7, 0xb7, 0x3c, 0x26, 0xc1, 0xc0, - 0x90, 0x49, 0xa6, 0x74, 0x7b, 0xbb, 0x92, 0x05, 0xe2, 0x6e, 0x7c, 0xfb, 0x4f, 0x4b, 0xf9, 0xdd, - 0xe2, 0x46, 0xc6, 0x41, 0xbe, 0x93, 0xf8, 0x0a, 0xa5, 0x3e, 0x64, 0x49, 0xf9, 0x7e, 0xcb, 0x92, - 0x4a, 0x2f, 0x59, 0x82, 0x66, 0xe0, 0x94, 0x71, 0x75, 0x0a, 0x4f, 0x1c, 0xe6, 0x01, 0xb7, 0xaa, - 0x9a, 0xc6, 0x52, 0x06, 0x8e, 0xbb, 0x9e, 0x40, 0xcf, 0x40, 0xcd, 0xf5, 0x63, 0xd2, 0xe8, 0x44, - 0x3c, 0xd0, 0xdb, 0x48, 0xd6, 0x9a, 0x13, 0xed, 0x58, 0x61, 0xd8, 0xbf, 0x54, 0x82, 0xf3, 0x3d, - 0xed, 0xac, 0xfb, 0x24, 0xbb, 0xcc, 0xcf, 0x51, 0xb9, 0x3f, 0x9f, 0xc3, 0x1c, 0xa4, 0xea, 0xbe, - 0x83, 0xf4, 0xfb, 0xbd, 0x27, 0x26, 0xb5, 0xb9, 0xbf, 0x67, 0x47, 0xe9, 0x25, 0x38, 0xe1, 0x84, - 0x21, 0xc7, 0x63, 0xf1, 0x9a, 0x99, 0x6a, 0x3a, 0x93, 0x26, 0x10, 0xa7, 0x71, 0xfb, 0xd2, 0x9e, - 0x7f, 0x68, 0x41, 0x1d, 0x93, 0x35, 0x2e, 0x1d, 0xd0, 0x1d, 0x31, 0x44, 0x56, 0x11, 0x75, 0x37, - 0xe9, 0xc0, 0xc6, 0x2e, 0xab, 0x47, 0x99, 0x37, 0xd8, 0xdd, 0x57, 0xec, 0x94, 0x0e, 0x74, 0xc5, - 0x8e, 0xba, 0x64, 0xa5, 0xdc, 0xfb, 0x92, 0x15, 0xfb, 0xeb, 0x83, 0xf4, 0xf5, 0xc2, 0x60, 0x3a, - 0x22, 0xcd, 0x98, 0x7e, 0xdf, 0x4e, 0xe4, 0x89, 0x49, 0xa2, 0xbe, 0xef, 0x4d, 0x3c, 0x8f, 0x69, - 0x7b, 0xea, 0x28, 0xa6, 0x74, 0xa0, 0x5a, 0x22, 0xe5, 0x7d, 0x6b, 0x89, 0xbc, 0x04, 0x27, 0xe2, - 0x78, 0x7d, 0x29, 0x72, 0x37, 0x9d, 0x84, 0x5c, 0x27, 0xdb, 0xc2, 0xca, 0xd2, 0xf9, 0xff, 0xcb, - 0x57, 0x35, 0x10, 0xa7, 0x71, 0xd1, 0x2c, 0x9c, 0xd6, 0x15, 0x3d, 0x48, 0x94, 0xb0, 0xe8, 0x7e, - 0x3e, 0x13, 0x54, 0xb2, 0xaf, 0xae, 0x01, 0x22, 0x10, 0x70, 0xf7, 0x33, 0x54, 0xbe, 0xa5, 0x1a, - 0x69, 0x47, 0x06, 0xd2, 0xf2, 0x2d, 0x45, 0x87, 0xf6, 0xa5, 0xeb, 0x09, 0xb4, 0x00, 0x67, 0xf8, - 0xc4, 0x98, 0x0c, 0x43, 0xe3, 0x8d, 0x06, 0xd3, 0xf5, 0x0e, 0x67, 0xbb, 0x51, 0x70, 0xde, 0x73, - 0xe8, 0x05, 0x18, 0x52, 0xcd, 0x73, 0x33, 0xe2, 0x14, 0x41, 0x79, 0x31, 0x14, 0x99, 0xb9, 0x26, - 0x36, 0xf1, 0xd0, 0x07, 0xe1, 0x51, 0xfd, 0x97, 0xa7, 0x80, 0xf1, 0xa3, 0xb5, 0x19, 0x51, 0x2c, - 0x49, 0x5d, 0xe9, 0x31, 0x9b, 0x8b, 0xd6, 0xc4, 0xbd, 0x9e, 0x47, 0xab, 0x70, 0x41, 0x81, 0x2e, - 0xfb, 0x09, 0xcb, 0xe7, 0x88, 0xc9, 0x94, 0x13, 0x93, 0x9b, 0x91, 0xc7, 0xca, 0x2b, 0xd5, 0xf5, - 0x6d, 0x8b, 0xb3, 0x6e, 0x72, 0x35, 0x0f, 0x13, 0xcf, 0xe3, 0x3d, 0xa8, 0xa0, 0x09, 0xa8, 0x13, - 0xdf, 0x59, 0xf5, 0xc8, 0xe2, 0xf4, 0x1c, 0x2b, 0xba, 0x64, 0x9c, 0xe4, 0x5d, 0x96, 0x00, 0xac, - 0x71, 0x54, 0x84, 0xe9, 0x70, 0xcf, 0x9b, 0x3f, 0x97, 0xe0, 0x6c, 0xab, 0x11, 0x52, 0xdb, 0xc3, - 0x6d, 0x90, 0xc9, 0x06, 0x0b, 0xa8, 0xa3, 0x1f, 0x86, 0x17, 0xa2, 0x54, 0xe1, 0xd3, 0xb3, 0xd3, - 0x4b, 0x5d, 0x38, 0x38, 0xf7, 0x49, 0x16, 0x78, 0x19, 0x05, 0x5b, 0xdb, 0xa3, 0x67, 0x32, 0x81, - 0x97, 0xb4, 0x11, 0x73, 0x18, 0xba, 0x06, 0x88, 0xc5, 0xe2, 0x5f, 0x4d, 0x92, 0x50, 0x19, 0x3b, - 0xa3, 0x67, 0xd9, 0x2b, 0xa9, 0x30, 0xb2, 0x2b, 0x5d, 0x18, 0x38, 0xe7, 0x29, 0xfb, 0x3f, 0x59, - 0x70, 0x42, 0xad, 0xd7, 0xfb, 0x90, 0x8d, 0xe2, 0xa5, 0xb3, 0x51, 0x66, 0x8f, 0x2e, 0xf1, 0x58, - 0xcf, 0x7b, 0x84, 0x34, 0xff, 0xf4, 0x10, 0x80, 0x96, 0x8a, 0x4a, 0x21, 0x59, 0x3d, 0x15, 0xd2, - 0x43, 0x2b, 0x91, 0xf2, 0x2a, 0xac, 0x54, 0x1f, 0x6c, 0x85, 0x95, 0x65, 0x38, 0x27, 0xcd, 0x05, - 0x7e, 0x56, 0x74, 0x35, 0x88, 0x95, 0x80, 0xab, 0x4d, 0x3d, 0x2e, 0x08, 0x9d, 0x9b, 0xcb, 0x43, - 0xc2, 0xf9, 0xcf, 0xa6, 0xac, 0x94, 0xc1, 0xfd, 0xac, 0x14, 0xbd, 0xa6, 0xe7, 0xd7, 0xe4, 0xdd, - 0x1d, 0x99, 0x35, 0x3d, 0x7f, 0x65, 0x19, 0x6b, 0x9c, 0x7c, 0xc1, 0x5e, 0x2f, 0x48, 0xb0, 0xc3, - 0x81, 0x05, 0xbb, 0x14, 0x31, 0x43, 0x3d, 0x45, 0x8c, 0xf4, 0x49, 0x0f, 0xf7, 0xf4, 0x49, 0xbf, - 0x0f, 0x46, 0x5c, 0x7f, 0x9d, 0x44, 0x6e, 0x42, 0x9a, 0x6c, 0x2d, 0x30, 0xf1, 0x53, 0xd3, 0x6a, - 0x7d, 0x2e, 0x05, 0xc5, 0x19, 0xec, 0xb4, 0x5c, 0x1c, 0xe9, 0x43, 0x2e, 0xf6, 0xd0, 0x46, 0x27, - 0x8b, 0xd1, 0x46, 0xa7, 0x8e, 0xae, 0x8d, 0x4e, 0x1f, 0xab, 0x36, 0x42, 0x85, 0x68, 0xa3, 0xbe, - 0x04, 0xbd, 0xb1, 0xfd, 0x3b, 0xbb, 0xcf, 0xf6, 0xaf, 0x97, 0x2a, 0x3a, 0x77, 0x68, 0x55, 0x94, - 0xaf, 0x65, 0x1e, 0x39, 0x94, 0x96, 0xf9, 0x4c, 0x09, 0xce, 0x69, 0x39, 0x4c, 0x67, 0xbf, 0xbb, - 0x46, 0x25, 0x11, 0xbb, 0xfe, 0x89, 0x9f, 0xdb, 0x18, 0xc9, 0x51, 0x3a, 0xcf, 0x4a, 0x41, 0xb0, - 0x81, 0xc5, 0x72, 0x8c, 0x48, 0xc4, 0xca, 0xed, 0x66, 0x85, 0xf4, 0xb4, 0x68, 0xc7, 0x0a, 0x83, - 0xce, 0x2f, 0xfa, 0x5b, 0xe4, 0x6d, 0x66, 0x8b, 0xca, 0x4d, 0x6b, 0x10, 0x36, 0xf1, 0xd0, 0xd3, - 0x9c, 0x09, 0x13, 0x10, 0x54, 0x50, 0x0f, 0x8b, 0xfb, 0x60, 0xa5, 0x4c, 0x50, 0x50, 0xd9, 0x1d, - 0x96, 0x4c, 0x56, 0xed, 0xee, 0x0e, 0x0b, 0x81, 0x52, 0x18, 0xf6, 0xff, 0xb2, 0xe0, 0x7c, 0xee, - 0x50, 0xdc, 0x07, 0xe5, 0xbb, 0x95, 0x56, 0xbe, 0xcb, 0x45, 0x6d, 0x37, 0x8c, 0xb7, 0xe8, 0xa1, - 0x88, 0xff, 0x83, 0x05, 0x23, 0x1a, 0xff, 0x3e, 0xbc, 0xaa, 0x9b, 0x7e, 0xd5, 0xe2, 0x76, 0x56, - 0xf5, 0xae, 0x77, 0xfb, 0xed, 0x12, 0xa8, 0x42, 0x8f, 0x93, 0x0d, 0x59, 0x46, 0x77, 0x9f, 0x93, - 0xc4, 0x6d, 0x18, 0x60, 0x07, 0xa1, 0x71, 0x31, 0x41, 0x1e, 0x69, 0xfe, 0xec, 0x50, 0x55, 0x1f, - 0x32, 0xb3, 0xbf, 0x31, 0x16, 0x0c, 0x59, 0x31, 0x68, 0x37, 0xa6, 0xd2, 0xbc, 0x29, 0xd2, 0xb2, - 0x74, 0x31, 0x68, 0xd1, 0x8e, 0x15, 0x06, 0x55, 0x0f, 0x6e, 0x23, 0xf0, 0xa7, 0x3d, 0x27, 0x96, - 0x77, 0x1e, 0x2a, 0xf5, 0x30, 0x27, 0x01, 0x58, 0xe3, 0xb0, 0x33, 0x52, 0x37, 0x0e, 0x3d, 0x67, - 0xdb, 0xd8, 0x3f, 0x1b, 0xf5, 0x09, 0x14, 0x08, 0x9b, 0x78, 0x76, 0x1b, 0x46, 0xd3, 0x2f, 0x31, - 0x43, 0xd6, 0x58, 0x80, 0x62, 0x5f, 0xc3, 0x39, 0x01, 0x75, 0x87, 0x3d, 0x35, 0xdf, 0x71, 0xb2, - 0x57, 0x95, 0x4f, 0x4a, 0x00, 0xd6, 0x38, 0xf6, 0xaf, 0x58, 0x70, 0x26, 0x67, 0xd0, 0x0a, 0x4c, - 0x7b, 0x4b, 0xb4, 0xb4, 0xc9, 0x53, 0xec, 0x3f, 0x08, 0x83, 0x4d, 0xb2, 0xe6, 0xc8, 0x10, 0x38, - 0x43, 0xb6, 0xcf, 0xf0, 0x66, 0x2c, 0xe1, 0xf6, 0xff, 0xb0, 0xe0, 0x64, 0xba, 0xaf, 0x31, 0x4b, - 0x25, 0xe1, 0xc3, 0xe4, 0xc6, 0x8d, 0x60, 0x93, 0x44, 0xdb, 0xf4, 0xcd, 0xad, 0x4c, 0x2a, 0x49, - 0x17, 0x06, 0xce, 0x79, 0x8a, 0x95, 0x79, 0x6d, 0xaa, 0xd1, 0x96, 0x33, 0xf2, 0x56, 0x91, 0x33, - 0x52, 0x7f, 0x4c, 0xf3, 0xb8, 0x5c, 0xb1, 0xc4, 0x26, 0x7f, 0xfb, 0xdb, 0x15, 0x50, 0x79, 0xb1, - 0x2c, 0xfe, 0xa8, 0xa0, 0xe8, 0xad, 0x83, 0x66, 0x10, 0xa9, 0xc9, 0x50, 0xd9, 0x2b, 0x20, 0x80, - 0x7b, 0x49, 0x4c, 0xd7, 0xa5, 0x7a, 0xc3, 0x15, 0x0d, 0xc2, 0x26, 0x1e, 0xed, 0x89, 0xe7, 0x6e, - 0x12, 0xfe, 0xd0, 0x40, 0xba, 0x27, 0xf3, 0x12, 0x80, 0x35, 0x0e, 0xed, 0x49, 0xd3, 0x5d, 0x5b, - 0x13, 0x5b, 0x7e, 0xd5, 0x13, 0x3a, 0x3a, 0x98, 0x41, 0x78, 0xe5, 0xee, 0x60, 0x43, 0x58, 0xc1, - 0x46, 0xe5, 0xee, 0x60, 0x03, 0x33, 0x08, 0xb5, 0xdb, 0xfc, 0x20, 0x6a, 0xb3, 0xab, 0xe4, 0x9b, - 0x8a, 0x8b, 0xb0, 0x7e, 0x95, 0xdd, 0x76, 0xa3, 0x1b, 0x05, 0xe7, 0x3d, 0x47, 0x67, 0x60, 0x18, - 0x91, 0xa6, 0xdb, 0x48, 0x4c, 0x6a, 0x90, 0x9e, 0x81, 0x4b, 0x5d, 0x18, 0x38, 0xe7, 0x29, 0x34, - 0x09, 0x27, 0x65, 0x5e, 0xb3, 0xac, 0x5a, 0x33, 0x94, 0xae, 0x92, 0x81, 0xd3, 0x60, 0x9c, 0xc5, - 0xa7, 0x52, 0xad, 0x2d, 0x0a, 0x5b, 0x31, 0x63, 0xd9, 0x90, 0x6a, 0xb2, 0xe0, 0x15, 0x56, 0x18, - 0xf6, 0x27, 0xcb, 0x54, 0x0b, 0xf7, 0x28, 0xe8, 0x76, 0xdf, 0xa2, 0x05, 0xd3, 0x33, 0xb2, 0xd2, - 0xc7, 0x8c, 0x7c, 0x1e, 0x86, 0xef, 0xc4, 0x81, 0xaf, 0x22, 0xf1, 0xaa, 0x3d, 0x23, 0xf1, 0x0c, - 0xac, 0xfc, 0x48, 0xbc, 0x81, 0xa2, 0x22, 0xf1, 0x06, 0x0f, 0x19, 0x89, 0xf7, 0xcd, 0x2a, 0xa8, - 0x2b, 0x44, 0x6e, 0x90, 0xe4, 0x6e, 0x10, 0x6d, 0xb8, 0x7e, 0x8b, 0xe5, 0x83, 0x7f, 0xd5, 0x82, - 0x61, 0xbe, 0x5e, 0xe6, 0xcd, 0x4c, 0xaa, 0xb5, 0x82, 0xee, 0xa6, 0x48, 0x31, 0x1b, 0x5f, 0x31, - 0x18, 0x65, 0xae, 0xdc, 0x34, 0x41, 0x38, 0xd5, 0x23, 0xf4, 0x31, 0x00, 0xe9, 0x1f, 0x5d, 0x93, - 0x22, 0x73, 0xae, 0x98, 0xfe, 0x61, 0xb2, 0xa6, 0x6d, 0xe0, 0x15, 0xc5, 0x04, 0x1b, 0x0c, 0xd1, - 0x67, 0x74, 0x96, 0x19, 0x0f, 0xd9, 0xff, 0xc8, 0xb1, 0x8c, 0x4d, 0x3f, 0x39, 0x66, 0x18, 0x06, - 0x5d, 0xbf, 0x45, 0xe7, 0x89, 0x88, 0x58, 0x7a, 0x57, 0x5e, 0x2d, 0x85, 0xf9, 0xc0, 0x69, 0x4e, - 0x39, 0x9e, 0xe3, 0x37, 0x48, 0x34, 0xc7, 0xd1, 0xcd, 0x8b, 0xa6, 0x59, 0x03, 0x96, 0x84, 0xba, - 0x2e, 0x5f, 0xa9, 0xf6, 0x73, 0xf9, 0xca, 0x85, 0xf7, 0xc3, 0xe9, 0xae, 0x8f, 0x79, 0xa0, 0x94, - 0xb2, 0xc3, 0x67, 0xa3, 0xd9, 0xff, 0x72, 0x40, 0x2b, 0xad, 0x1b, 0x41, 0x93, 0x5f, 0x01, 0x12, - 0xe9, 0x2f, 0x2a, 0x6c, 0xdc, 0x02, 0xa7, 0x88, 0x71, 0x59, 0xb5, 0x6a, 0xc4, 0x26, 0x4b, 0x3a, - 0x47, 0x43, 0x27, 0x22, 0xfe, 0x71, 0xcf, 0xd1, 0x25, 0xc5, 0x04, 0x1b, 0x0c, 0xd1, 0x7a, 0x2a, - 0xa7, 0xe4, 0xca, 0xd1, 0x73, 0x4a, 0x58, 0x95, 0xa9, 0xbc, 0xaa, 0xfd, 0x5f, 0xb4, 0x60, 0xc4, - 0x4f, 0xcd, 0xdc, 0x62, 0xc2, 0x48, 0xf3, 0x57, 0x05, 0xbf, 0x81, 0x2a, 0xdd, 0x86, 0x33, 0xfc, - 0xf3, 0x54, 0x5a, 0xf5, 0x80, 0x2a, 0x4d, 0xdf, 0x25, 0x34, 0xd0, 0xeb, 0x2e, 0x21, 0xe4, 0xab, - 0xcb, 0xd4, 0x06, 0x0b, 0xbf, 0x4c, 0x0d, 0x72, 0x2e, 0x52, 0xbb, 0x0d, 0xf5, 0x46, 0x44, 0x9c, - 0xe4, 0x90, 0xf7, 0x6a, 0xb1, 0x03, 0xfa, 0x69, 0x49, 0x00, 0x6b, 0x5a, 0xf6, 0xff, 0xa9, 0xc0, - 0x29, 0x39, 0x22, 0x32, 0x04, 0x9d, 0xea, 0x47, 0xce, 0x57, 0x1b, 0xb7, 0x4a, 0x3f, 0x5e, 0x95, - 0x00, 0xac, 0x71, 0xa8, 0x3d, 0xd6, 0x89, 0xc9, 0x62, 0x48, 0xfc, 0x79, 0x77, 0x35, 0x16, 0xe7, - 0x9c, 0x6a, 0xa1, 0xdc, 0xd4, 0x20, 0x6c, 0xe2, 0x51, 0x63, 0x9c, 0xdb, 0xc5, 0x71, 0x36, 0x7d, - 0x45, 0xd8, 0xdb, 0x58, 0xc2, 0xd1, 0xcf, 0xe7, 0x56, 0x98, 0x2d, 0x26, 0x71, 0xab, 0x2b, 0xf2, - 0xfe, 0x80, 0x57, 0x31, 0xfe, 0x1d, 0x0b, 0xce, 0xf1, 0x56, 0x39, 0x92, 0x37, 0xc3, 0xa6, 0x93, - 0x90, 0xb8, 0x98, 0x8a, 0xef, 0x39, 0xfd, 0xd3, 0x4e, 0xde, 0x3c, 0xb6, 0x38, 0xbf, 0x37, 0xe8, - 0x4d, 0x0b, 0x4e, 0x6e, 0xa4, 0x6a, 0x7e, 0x48, 0xd5, 0x71, 0xd4, 0x74, 0xfc, 0x14, 0x51, 0xbd, - 0xd4, 0xd2, 0xed, 0x31, 0xce, 0x72, 0xb7, 0xff, 0xd4, 0x02, 0x53, 0x8c, 0xde, 0xff, 0x52, 0x21, - 0x07, 0x37, 0x05, 0xa5, 0x75, 0x59, 0xed, 0x69, 0x5d, 0x3e, 0x0e, 0xe5, 0x8e, 0xdb, 0x14, 0xfb, - 0x0b, 0x7d, 0xfa, 0x3a, 0x37, 0x83, 0x69, 0xbb, 0xfd, 0xcf, 0xaa, 0xda, 0x6f, 0x21, 0xf2, 0xa2, - 0xbe, 0x27, 0x5e, 0x7b, 0x4d, 0x15, 0x1b, 0xe3, 0x6f, 0x7e, 0xa3, 0xab, 0xd8, 0xd8, 0x8f, 0x1c, - 0x3c, 0xed, 0x8d, 0x0f, 0x50, 0xaf, 0x5a, 0x63, 0x83, 0xfb, 0xe4, 0xbc, 0xdd, 0x81, 0x1a, 0xdd, - 0x82, 0x31, 0x07, 0x64, 0x2d, 0xd5, 0xa9, 0xda, 0x55, 0xd1, 0x7e, 0x6f, 0x67, 0xec, 0xbd, 0x07, - 0xef, 0x96, 0x7c, 0x1a, 0x2b, 0xfa, 0x28, 0x86, 0x3a, 0xfd, 0xcd, 0xd2, 0xf3, 0xc4, 0xe6, 0xee, - 0xa6, 0x92, 0x99, 0x12, 0x50, 0x48, 0xee, 0x9f, 0xe6, 0x83, 0x7c, 0xa8, 0xb3, 0x5b, 0x6b, 0x19, - 0x53, 0xbe, 0x07, 0x5c, 0x52, 0x49, 0x72, 0x12, 0x70, 0x6f, 0x67, 0xec, 0xa5, 0x83, 0x33, 0x55, - 0x8f, 0x63, 0xcd, 0xc2, 0xfe, 0x52, 0x45, 0xcf, 0x5d, 0x51, 0x63, 0xee, 0x7b, 0x62, 0xee, 0xbe, - 0x98, 0x99, 0xbb, 0x17, 0xbb, 0xe6, 0xee, 0x88, 0xbe, 0x5d, 0x35, 0x35, 0x1b, 0xef, 0xb7, 0x21, - 0xb0, 0xbf, 0xbf, 0x81, 0x59, 0x40, 0xaf, 0x77, 0xdc, 0x88, 0xc4, 0x4b, 0x51, 0xc7, 0x77, 0xfd, - 0x16, 0x9b, 0x8e, 0x35, 0xd3, 0x02, 0x4a, 0x81, 0x71, 0x16, 0x9f, 0x6e, 0xea, 0xe9, 0x37, 0xbf, - 0xed, 0x6c, 0xf2, 0x59, 0x65, 0x94, 0xdd, 0x5a, 0x16, 0xed, 0x58, 0x61, 0xd8, 0x5f, 0x67, 0x67, - 0xd9, 0x46, 0x5e, 0x30, 0x9d, 0x13, 0x1e, 0xbb, 0x26, 0x98, 0xd7, 0xec, 0x52, 0x73, 0x82, 0xdf, - 0x0d, 0xcc, 0x61, 0xe8, 0x2e, 0x0c, 0xae, 0xf2, 0x7b, 0xf2, 0x8a, 0xa9, 0x63, 0x2e, 0x2e, 0xdd, - 0x63, 0xb7, 0xa1, 0xc8, 0x1b, 0xf8, 0xee, 0xe9, 0x9f, 0x58, 0x72, 0xb3, 0xbf, 0x51, 0x81, 0x93, - 0x99, 0x8b, 0x64, 0x53, 0xd5, 0x52, 0x4b, 0xfb, 0x56, 0x4b, 0xfd, 0x30, 0x40, 0x93, 0x84, 0x5e, - 0xb0, 0xcd, 0xcc, 0xb1, 0xca, 0x81, 0xcd, 0x31, 0x65, 0xc1, 0xcf, 0x28, 0x2a, 0xd8, 0xa0, 0x28, - 0x0a, 0x95, 0xf1, 0xe2, 0xab, 0x99, 0x42, 0x65, 0xc6, 0x6d, 0x07, 0x03, 0xf7, 0xf7, 0xb6, 0x03, - 0x17, 0x4e, 0xf2, 0x2e, 0xaa, 0xec, 0xdb, 0x43, 0x24, 0xd9, 0xb2, 0xfc, 0x85, 0x99, 0x34, 0x19, - 0x9c, 0xa5, 0xfb, 0x20, 0xef, 0x89, 0x46, 0xef, 0x86, 0xba, 0xfc, 0xce, 0xf1, 0x68, 0x5d, 0x57, - 0x30, 0x90, 0xd3, 0x80, 0xdd, 0xdf, 0x2c, 0x7e, 0xda, 0x5f, 0x28, 0x51, 0xeb, 0x99, 0xff, 0x53, - 0x95, 0x68, 0x9e, 0x82, 0x01, 0xa7, 0x93, 0xac, 0x07, 0x5d, 0x77, 0xed, 0x4d, 0xb2, 0x56, 0x2c, - 0xa0, 0x68, 0x1e, 0x2a, 0x4d, 0x5d, 0x5d, 0xe4, 0x20, 0xa3, 0xa8, 0x1d, 0x91, 0x4e, 0x42, 0x30, - 0xa3, 0x82, 0x1e, 0x83, 0x4a, 0xe2, 0xb4, 0x64, 0xa2, 0x13, 0x4b, 0x6e, 0x5d, 0x71, 0x5a, 0x31, - 0x66, 0xad, 0xa6, 0xd2, 0xac, 0xec, 0xa3, 0x34, 0x5f, 0x82, 0x13, 0xb1, 0xdb, 0xf2, 0x9d, 0xa4, - 0x13, 0x11, 0xe3, 0x70, 0x4d, 0xc7, 0x4b, 0x98, 0x40, 0x9c, 0xc6, 0xb5, 0x7f, 0x63, 0x18, 0xce, - 0x2e, 0x4f, 0x2f, 0xc8, 0x9a, 0xd9, 0xc7, 0x96, 0xab, 0x94, 0xc7, 0xe3, 0xfe, 0xe5, 0x2a, 0xf5, - 0xe0, 0xee, 0x19, 0xb9, 0x4a, 0x9e, 0x91, 0xab, 0x94, 0x4e, 0x1c, 0x29, 0x17, 0x91, 0x38, 0x92, - 0xd7, 0x83, 0x7e, 0x12, 0x47, 0x8e, 0x2d, 0x79, 0x69, 0xcf, 0x0e, 0x1d, 0x28, 0x79, 0x49, 0x65, - 0x76, 0x15, 0x12, 0xd2, 0xdf, 0xe3, 0x53, 0xe5, 0x66, 0x76, 0xa9, 0xac, 0x1a, 0x9e, 0xae, 0x22, - 0x04, 0xec, 0xab, 0xc5, 0x77, 0xa0, 0x8f, 0xac, 0x1a, 0x91, 0x31, 0x63, 0x66, 0x72, 0x0d, 0x16, - 0x91, 0xc9, 0x95, 0xd7, 0x9d, 0x7d, 0x33, 0xb9, 0x5e, 0x82, 0x13, 0x0d, 0x2f, 0xf0, 0xc9, 0x52, - 0x14, 0x24, 0x41, 0x23, 0xf0, 0x84, 0x31, 0xad, 0x44, 0xc2, 0xb4, 0x09, 0xc4, 0x69, 0xdc, 0x5e, - 0x69, 0x60, 0xf5, 0xa3, 0xa6, 0x81, 0xc1, 0x03, 0x4a, 0x03, 0xfb, 0x19, 0x9d, 0xb0, 0x3c, 0xc4, - 0xbe, 0xc8, 0x87, 0x8b, 0xff, 0x22, 0xfd, 0x64, 0x2d, 0xa3, 0xb7, 0xf8, 0x65, 0x77, 0xd4, 0x1c, - 0x9d, 0x0e, 0xda, 0xd4, 0xdc, 0x1a, 0x66, 0x43, 0xf2, 0xda, 0x31, 0x4c, 0xd8, 0xdb, 0xcb, 0x9a, - 0x8d, 0xba, 0x00, 0x4f, 0x37, 0xe1, 0x74, 0x47, 0x8e, 0x92, 0x50, 0xfd, 0x95, 0x12, 0x7c, 0xdf, - 0xbe, 0x5d, 0x40, 0x77, 0x01, 0x12, 0xa7, 0x25, 0x26, 0xaa, 0x38, 0xa6, 0x38, 0x62, 0x50, 0xe3, - 0x8a, 0xa4, 0xc7, 0x2b, 0x81, 0xa8, 0xbf, 0xec, 0x00, 0x40, 0xfe, 0x66, 0xb1, 0x8c, 0x81, 0xd7, - 0x55, 0x30, 0x11, 0x07, 0x1e, 0xc1, 0x0c, 0x42, 0xd5, 0x7f, 0x44, 0x5a, 0xfa, 0x76, 0x66, 0xf5, - 0xf9, 0x30, 0x6b, 0xc5, 0x02, 0x8a, 0x5e, 0x80, 0x21, 0xc7, 0xf3, 0x78, 0x56, 0x0a, 0x89, 0xc5, - 0x6d, 0x37, 0xba, 0x72, 0x9b, 0x06, 0x61, 0x13, 0xcf, 0xfe, 0x93, 0x12, 0x8c, 0xed, 0x23, 0x53, - 0xba, 0xf2, 0xec, 0xaa, 0x7d, 0xe7, 0xd9, 0x89, 0xcc, 0x80, 0x81, 0x1e, 0x99, 0x01, 0x2f, 0xc0, - 0x50, 0x42, 0x9c, 0xb6, 0x08, 0x83, 0x12, 0xfb, 0x6f, 0x7d, 0xee, 0xaa, 0x41, 0xd8, 0xc4, 0xa3, - 0x52, 0x6c, 0xc4, 0x69, 0x34, 0x48, 0x1c, 0xcb, 0xd0, 0x7f, 0xe1, 0xc3, 0x2c, 0x2c, 0xaf, 0x80, - 0xb9, 0x86, 0x27, 0x53, 0x2c, 0x70, 0x86, 0x65, 0x76, 0xc0, 0xeb, 0x7d, 0x0e, 0xf8, 0xd7, 0x4a, - 0xf0, 0xf8, 0x9e, 0xda, 0xad, 0xef, 0xac, 0x8c, 0x4e, 0x4c, 0xa2, 0xec, 0xc4, 0xb9, 0x19, 0x93, - 0x08, 0x33, 0x08, 0x1f, 0xa5, 0x30, 0x34, 0x6e, 0xbf, 0x2e, 0x3a, 0x65, 0x88, 0x8f, 0x52, 0x8a, - 0x05, 0xce, 0xb0, 0x3c, 0xec, 0xb4, 0xfc, 0x07, 0x25, 0x78, 0xb2, 0x0f, 0x1b, 0xa0, 0xc0, 0xd4, - 0xaa, 0x74, 0x82, 0x5b, 0xf9, 0x01, 0xe5, 0x21, 0x1e, 0x72, 0xb8, 0xbe, 0x5e, 0x82, 0x0b, 0xbd, - 0x55, 0x31, 0xfa, 0x51, 0xba, 0x87, 0x97, 0xb1, 0x4f, 0x66, 0x6e, 0xdc, 0x19, 0xbe, 0x7f, 0x4f, - 0x81, 0x70, 0x16, 0x17, 0x8d, 0x03, 0x84, 0x4e, 0xb2, 0x1e, 0x5f, 0xde, 0x72, 0xe3, 0x44, 0xd4, - 0x7e, 0x19, 0xe1, 0x27, 0x46, 0xb2, 0x15, 0x1b, 0x18, 0x94, 0x1d, 0xfb, 0x37, 0x13, 0xdc, 0x08, - 0x12, 0xfe, 0x10, 0xdf, 0x46, 0x9c, 0x91, 0x37, 0x65, 0x18, 0x20, 0x9c, 0xc5, 0xa5, 0xec, 0xd8, - 0x99, 0x24, 0xef, 0x28, 0xdf, 0x5f, 0x30, 0x76, 0xf3, 0xaa, 0x15, 0x1b, 0x18, 0xd9, 0xac, 0xbf, - 0xea, 0xfe, 0x59, 0x7f, 0xf6, 0x3f, 0x2d, 0xc1, 0xf9, 0x9e, 0xa6, 0x5c, 0x7f, 0x0b, 0xf0, 0xe1, - 0xcb, 0xd4, 0x3b, 0xdc, 0xdc, 0x39, 0x60, 0x46, 0xd9, 0x1f, 0xf6, 0x98, 0x69, 0x22, 0xa3, 0xec, - 0xf0, 0x29, 0xd9, 0x0f, 0xdf, 0x78, 0x76, 0x25, 0x91, 0x55, 0x0e, 0x90, 0x44, 0x96, 0xf9, 0x18, - 0xd5, 0x3e, 0x17, 0xf2, 0x9f, 0x95, 0x7b, 0x0e, 0x2f, 0xdd, 0xfa, 0xf5, 0xe5, 0x1d, 0x9d, 0x81, - 0x53, 0xae, 0xcf, 0x6e, 0x4d, 0x5a, 0xee, 0xac, 0x8a, 0x72, 0x20, 0xa5, 0xf4, 0xdd, 0xe6, 0x73, - 0x19, 0x38, 0xee, 0x7a, 0xe2, 0x21, 0x4c, 0xea, 0x3b, 0xdc, 0x90, 0x1e, 0x2c, 0xad, 0x14, 0x2d, - 0xc2, 0x39, 0x39, 0x14, 0xeb, 0x4e, 0x44, 0x9a, 0x42, 0x8d, 0xc4, 0x22, 0x8d, 0xe1, 0x3c, 0x4f, - 0x85, 0xc8, 0x41, 0xc0, 0xf9, 0xcf, 0xb1, 0x8b, 0x6a, 0x82, 0xd0, 0x6d, 0x88, 0x4d, 0x8e, 0xbe, - 0xa8, 0x86, 0x36, 0x62, 0x0e, 0xb3, 0x3f, 0x0c, 0x75, 0xf5, 0xfe, 0x3c, 0x98, 0x5a, 0x4d, 0xba, - 0xae, 0x60, 0x6a, 0x35, 0xe3, 0x0c, 0x2c, 0xfa, 0xb5, 0xa8, 0x49, 0x9c, 0x59, 0x3d, 0xd7, 0xc9, - 0x36, 0xb3, 0x8f, 0xed, 0x1f, 0x86, 0x61, 0xe5, 0x67, 0xe9, 0xf7, 0xfa, 0x1e, 0xfb, 0x4b, 0x03, - 0x70, 0x22, 0x55, 0x92, 0x2f, 0xe5, 0xd6, 0xb4, 0xf6, 0x75, 0x6b, 0xb2, 0xe0, 0xf8, 0x8e, 0x2f, - 0xef, 0xf6, 0x32, 0x82, 0xe3, 0x3b, 0x3e, 0xc1, 0x1c, 0x46, 0xcd, 0xdb, 0x66, 0xb4, 0x8d, 0x3b, - 0xbe, 0x08, 0x62, 0x55, 0xe6, 0xed, 0x0c, 0x6b, 0xc5, 0x02, 0x8a, 0x3e, 0x61, 0xc1, 0x70, 0xcc, - 0x7c, 0xe6, 0xdc, 0x29, 0x2c, 0x26, 0xdd, 0xb5, 0xa3, 0x57, 0x1c, 0x54, 0xe5, 0x27, 0x59, 0x5c, - 0x8a, 0xd9, 0x82, 0x53, 0x1c, 0xd1, 0xa7, 0x2d, 0xa8, 0xab, 0x2b, 0x48, 0xc4, 0x45, 0x7d, 0xcb, - 0xc5, 0x56, 0x3c, 0xe4, 0xde, 0x44, 0x75, 0xfc, 0xa0, 0x4a, 0xcf, 0x61, 0xcd, 0x18, 0xc5, 0xca, - 0x63, 0x3b, 0x78, 0x3c, 0x1e, 0x5b, 0xc8, 0xf1, 0xd6, 0xbe, 0x1b, 0xea, 0x6d, 0xc7, 0x77, 0xd7, - 0x48, 0x9c, 0x70, 0x27, 0xaa, 0x2c, 0xc4, 0x2a, 0x1b, 0xb1, 0x86, 0x53, 0x85, 0x1c, 0xb3, 0x17, - 0x4b, 0x0c, 0xaf, 0x27, 0x53, 0xc8, 0xcb, 0xba, 0x19, 0x9b, 0x38, 0xa6, 0x8b, 0x16, 0x1e, 0xa8, - 0x8b, 0x76, 0x68, 0x1f, 0x17, 0xed, 0x3f, 0xb2, 0xe0, 0x5c, 0xee, 0x57, 0x7b, 0x78, 0xc3, 0x0d, - 0xed, 0x2f, 0x57, 0xe1, 0x4c, 0x4e, 0x6d, 0x4d, 0xb4, 0x6d, 0xce, 0x67, 0xab, 0x88, 0x93, 0xfb, - 0xf4, 0x41, 0xb4, 0x1c, 0xc6, 0x9c, 0x49, 0x7c, 0xb0, 0x03, 0x12, 0x7d, 0x48, 0x51, 0xbe, 0xbf, - 0x87, 0x14, 0xc6, 0xb4, 0xac, 0x3c, 0xd0, 0x69, 0x59, 0xdd, 0x7b, 0x5a, 0xa2, 0x5f, 0xb5, 0x60, - 0xb4, 0xdd, 0xa3, 0xa0, 0xbb, 0x70, 0x3c, 0xde, 0x3a, 0x9e, 0x72, 0xf1, 0x53, 0x8f, 0xed, 0xee, - 0x8c, 0xf5, 0xac, 0xa3, 0x8f, 0x7b, 0xf6, 0xca, 0xfe, 0x76, 0x19, 0x58, 0x61, 0x57, 0x56, 0x3f, - 0x6d, 0x1b, 0x7d, 0xdc, 0x2c, 0xd1, 0x6b, 0x15, 0x55, 0x4e, 0x96, 0x13, 0x57, 0x25, 0x7e, 0xf9, - 0x08, 0xe6, 0x55, 0xfc, 0xcd, 0x0a, 0xad, 0x52, 0x1f, 0x42, 0xcb, 0x93, 0xb5, 0x90, 0xcb, 0xc5, - 0xd7, 0x42, 0xae, 0x67, 0xeb, 0x20, 0xef, 0xfd, 0x89, 0x2b, 0x0f, 0xe5, 0x27, 0xfe, 0x5b, 0x16, - 0x17, 0x3c, 0x99, 0xaf, 0xa0, 0x2d, 0x03, 0x6b, 0x0f, 0xcb, 0xe0, 0x19, 0xa8, 0xc5, 0xc4, 0x5b, - 0xbb, 0x4a, 0x1c, 0x4f, 0x58, 0x10, 0xfa, 0xd4, 0x58, 0xb4, 0x63, 0x85, 0xc1, 0x2e, 0x4b, 0xf5, - 0xbc, 0xe0, 0xee, 0xe5, 0x76, 0x98, 0x6c, 0x0b, 0x5b, 0x42, 0x5f, 0x96, 0xaa, 0x20, 0xd8, 0xc0, - 0xb2, 0xff, 0x76, 0x89, 0xcf, 0x40, 0x11, 0x7a, 0xf0, 0x62, 0xe6, 0x7a, 0xbb, 0xfe, 0x4f, 0xed, - 0x3f, 0x0a, 0xd0, 0x50, 0x17, 0xc8, 0x8b, 0x33, 0xa1, 0xab, 0x47, 0xbe, 0xdd, 0x5a, 0xd0, 0xd3, - 0xaf, 0xa1, 0xdb, 0xb0, 0xc1, 0x2f, 0x25, 0x4b, 0xcb, 0xfb, 0xca, 0xd2, 0x94, 0x58, 0xa9, 0xec, - 0xa3, 0xed, 0xfe, 0xc4, 0x82, 0x94, 0x45, 0x84, 0x42, 0xa8, 0xd2, 0xee, 0x6e, 0x17, 0x73, 0x37, - 0xbe, 0x49, 0x9a, 0x8a, 0x46, 0x31, 0xed, 0xd9, 0x4f, 0xcc, 0x19, 0x21, 0x4f, 0x44, 0x28, 0xf0, - 0x51, 0xbd, 0x51, 0x1c, 0xc3, 0xab, 0x41, 0xb0, 0xc1, 0x0f, 0x36, 0x75, 0xb4, 0x83, 0xfd, 0x22, - 0x9c, 0xee, 0xea, 0x14, 0xbb, 0xc9, 0x2a, 0xa0, 0xda, 0x27, 0x33, 0x5d, 0x59, 0xda, 0x24, 0xe6, - 0x30, 0xfb, 0xeb, 0x16, 0x9c, 0xca, 0x92, 0x47, 0x6f, 0x59, 0x70, 0x3a, 0xce, 0xd2, 0x3b, 0xae, - 0xb1, 0x53, 0x51, 0x86, 0x5d, 0x20, 0xdc, 0xdd, 0x09, 0xfb, 0xff, 0x8a, 0xc9, 0x7f, 0xdb, 0xf5, - 0x9b, 0xc1, 0x5d, 0x65, 0x98, 0x58, 0x3d, 0x0d, 0x13, 0xba, 0x1e, 0x1b, 0xeb, 0xa4, 0xd9, 0xf1, - 0xba, 0xf2, 0x35, 0x97, 0x45, 0x3b, 0x56, 0x18, 0x2c, 0x3d, 0xad, 0x23, 0x8a, 0xa5, 0x67, 0x26, - 0xe5, 0x8c, 0x68, 0xc7, 0x0a, 0x03, 0x3d, 0x0f, 0xc3, 0xc6, 0x4b, 0xca, 0x79, 0xc9, 0x0c, 0x72, - 0x43, 0x65, 0xc6, 0x38, 0x85, 0x85, 0xc6, 0x01, 0x94, 0x91, 0x23, 0x55, 0x24, 0x73, 0x14, 0x29, - 0x49, 0x14, 0x63, 0x03, 0x83, 0x25, 0x83, 0x7a, 0x9d, 0x98, 0xf9, 0xf8, 0x07, 0x74, 0x01, 0xcf, - 0x69, 0xd1, 0x86, 0x15, 0x94, 0x4a, 0x93, 0xb6, 0xe3, 0x77, 0x1c, 0x8f, 0x8e, 0x90, 0xd8, 0xfa, - 0xa9, 0x65, 0xb8, 0xa0, 0x20, 0xd8, 0xc0, 0xa2, 0x6f, 0x9c, 0xb8, 0x6d, 0xf2, 0x4a, 0xe0, 0xcb, - 0xe8, 0x30, 0x7d, 0xec, 0x23, 0xda, 0xb1, 0xc2, 0xb0, 0xff, 0x9b, 0x05, 0x27, 0x75, 0x6a, 0x39, - 0xbf, 0xb3, 0xda, 0xdc, 0xa9, 0x5a, 0xfb, 0xee, 0x54, 0xd3, 0x39, 0xb7, 0xa5, 0xbe, 0x72, 0x6e, - 0xcd, 0x74, 0xd8, 0xf2, 0x9e, 0xe9, 0xb0, 0x3f, 0xa0, 0xef, 0x43, 0xe5, 0x79, 0xb3, 0x43, 0x79, - 0x77, 0xa1, 0x22, 0x1b, 0x06, 0x1a, 0x8e, 0xaa, 0xab, 0x32, 0xcc, 0xf7, 0x0e, 0xd3, 0x93, 0x0c, - 0x49, 0x40, 0xec, 0x45, 0xa8, 0xab, 0xd3, 0x0f, 0xb9, 0x51, 0xb5, 0xf2, 0x37, 0xaa, 0x7d, 0xa5, - 0xe5, 0x4d, 0xad, 0x7e, 0xe3, 0x3b, 0x4f, 0xbc, 0xe3, 0x77, 0xbf, 0xf3, 0xc4, 0x3b, 0xfe, 0xe0, - 0x3b, 0x4f, 0xbc, 0xe3, 0x13, 0xbb, 0x4f, 0x58, 0xdf, 0xd8, 0x7d, 0xc2, 0xfa, 0xdd, 0xdd, 0x27, - 0xac, 0x3f, 0xd8, 0x7d, 0xc2, 0xfa, 0xf6, 0xee, 0x13, 0xd6, 0x17, 0xff, 0xcb, 0x13, 0xef, 0x78, - 0x25, 0x37, 0x3c, 0x90, 0xfe, 0x78, 0xb6, 0xd1, 0x9c, 0xd8, 0xbc, 0xc4, 0x22, 0xd4, 0xe8, 0xf2, - 0x9a, 0x30, 0xe6, 0xd4, 0x84, 0x5c, 0x5e, 0xff, 0x2f, 0x00, 0x00, 0xff, 0xff, 0x13, 0x68, 0x38, - 0x58, 0x64, 0xe0, 0x00, 0x00, + // 11120 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x7d, 0x6d, 0x70, 0x24, 0xc7, + 0x75, 0x98, 0x66, 0x17, 0x0b, 0xec, 0x3e, 0x7c, 0xdc, 0x5d, 0xdf, 0x1d, 0x09, 0x9e, 0x48, 0xe2, + 0x3c, 0xb4, 0x29, 0x3a, 0x22, 0x01, 0xf3, 0x44, 0xca, 0x8c, 0x68, 0x49, 0xc6, 0x02, 0x77, 0x38, + 0xdc, 0x01, 0x07, 0xb0, 0x81, 0xbb, 0x93, 0x28, 0x53, 0xd4, 0x60, 0xb7, 0xb1, 0x98, 0xc3, 0xec, + 0xcc, 0x72, 0x66, 0x16, 0x07, 0xd0, 0x92, 0x2c, 0x59, 0x92, 0xad, 0x44, 0x1f, 0x54, 0xa4, 0xa4, + 0x4c, 0x27, 0x96, 0x22, 0x5b, 0x4e, 0x2a, 0xae, 0x44, 0x15, 0x27, 0xf9, 0x11, 0x27, 0x4e, 0xca, + 0x15, 0x3b, 0x95, 0x52, 0xe2, 0xa4, 0xec, 0x72, 0xb9, 0x2c, 0x27, 0xb1, 0x11, 0xe9, 0x52, 0x29, + 0xa7, 0x52, 0x15, 0x57, 0x39, 0xf1, 0x8f, 0xe4, 0x92, 0x1f, 0xa9, 0xfe, 0xee, 0xf9, 0x58, 0x60, + 0x01, 0x0c, 0xee, 0x4e, 0x0a, 0xff, 0xed, 0xf6, 0x7b, 0xf3, 0x5e, 0x4f, 0x4f, 0xf7, 0x7b, 0xaf, + 0x5f, 0xbf, 0xf7, 0x1a, 0x16, 0x5a, 0x6e, 0xbc, 0xd1, 0x5d, 0x9b, 0x6c, 0x04, 0xed, 0x29, 0x27, + 0x6c, 0x05, 0x9d, 0x30, 0xb8, 0xc5, 0x7e, 0x3c, 0xd3, 0x68, 0x4e, 0x6d, 0x5d, 0x98, 0xea, 0x6c, + 0xb6, 0xa6, 0x9c, 0x8e, 0x1b, 0x4d, 0x39, 0x9d, 0x8e, 0xe7, 0x36, 0x9c, 0xd8, 0x0d, 0xfc, 0xa9, + 0xad, 0x67, 0x1d, 0xaf, 0xb3, 0xe1, 0x3c, 0x3b, 0xd5, 0x22, 0x3e, 0x09, 0x9d, 0x98, 0x34, 0x27, + 0x3b, 0x61, 0x10, 0x07, 0xe8, 0xc7, 0x34, 0xb5, 0x49, 0x49, 0x8d, 0xfd, 0x78, 0xb5, 0xd1, 0x9c, + 0xdc, 0xba, 0x30, 0xd9, 0xd9, 0x6c, 0x4d, 0x52, 0x6a, 0x93, 0x06, 0xb5, 0x49, 0x49, 0xed, 0xdc, + 0x33, 0x46, 0x5f, 0x5a, 0x41, 0x2b, 0x98, 0x62, 0x44, 0xd7, 0xba, 0xeb, 0xec, 0x1f, 0xfb, 0xc3, + 0x7e, 0x71, 0x66, 0xe7, 0xec, 0xcd, 0x17, 0xa2, 0x49, 0x37, 0xa0, 0xdd, 0x9b, 0x6a, 0x04, 0x21, + 0x99, 0xda, 0xca, 0x74, 0xe8, 0xdc, 0x65, 0x8d, 0x43, 0xb6, 0x63, 0xe2, 0x47, 0x6e, 0xe0, 0x47, + 0xcf, 0xd0, 0x2e, 0x90, 0x70, 0x8b, 0x84, 0xe6, 0xeb, 0x19, 0x08, 0x79, 0x94, 0x9e, 0xd3, 0x94, + 0xda, 0x4e, 0x63, 0xc3, 0xf5, 0x49, 0xb8, 0xa3, 0x1f, 0x6f, 0x93, 0xd8, 0xc9, 0x7b, 0x6a, 0xaa, + 0xd7, 0x53, 0x61, 0xd7, 0x8f, 0xdd, 0x36, 0xc9, 0x3c, 0xf0, 0xee, 0xfd, 0x1e, 0x88, 0x1a, 0x1b, + 0xa4, 0xed, 0x64, 0x9e, 0x7b, 0x57, 0xaf, 0xe7, 0xba, 0xb1, 0xeb, 0x4d, 0xb9, 0x7e, 0x1c, 0xc5, + 0x61, 0xfa, 0x21, 0xfb, 0x17, 0x2c, 0x18, 0x9d, 0xbe, 0xb9, 0x32, 0xdd, 0x8d, 0x37, 0x66, 0x02, + 0x7f, 0xdd, 0x6d, 0xa1, 0xe7, 0x61, 0xb8, 0xe1, 0x75, 0xa3, 0x98, 0x84, 0xd7, 0x9c, 0x36, 0x19, + 0xb7, 0xce, 0x5b, 0x4f, 0xd5, 0xea, 0xa7, 0xbf, 0xb5, 0x3b, 0xf1, 0xb6, 0x3b, 0xbb, 0x13, 0xc3, + 0x33, 0x1a, 0x84, 0x4d, 0x3c, 0xf4, 0xc3, 0x30, 0x14, 0x06, 0x1e, 0x99, 0xc6, 0xd7, 0xc6, 0x4b, + 0xec, 0x91, 0x13, 0xe2, 0x91, 0x21, 0xcc, 0x9b, 0xb1, 0x84, 0x53, 0xd4, 0x4e, 0x18, 0xac, 0xbb, + 0x1e, 0x19, 0x2f, 0x27, 0x51, 0x97, 0x79, 0x33, 0x96, 0x70, 0xfb, 0x0f, 0x4a, 0x00, 0xd3, 0x9d, + 0xce, 0x72, 0x18, 0xdc, 0x22, 0x8d, 0x18, 0x7d, 0x04, 0xaa, 0x74, 0x98, 0x9b, 0x4e, 0xec, 0xb0, + 0x8e, 0x0d, 0x5f, 0xf8, 0x91, 0x49, 0xfe, 0xd6, 0x93, 0xe6, 0x5b, 0xeb, 0x49, 0x46, 0xb1, 0x27, + 0xb7, 0x9e, 0x9d, 0x5c, 0x5a, 0xa3, 0xcf, 0x2f, 0x92, 0xd8, 0xa9, 0x23, 0xc1, 0x0c, 0x74, 0x1b, + 0x56, 0x54, 0x91, 0x0f, 0x03, 0x51, 0x87, 0x34, 0xd8, 0x3b, 0x0c, 0x5f, 0x58, 0x98, 0x3c, 0xca, + 0x6c, 0x9e, 0xd4, 0x3d, 0x5f, 0xe9, 0x90, 0x46, 0x7d, 0x44, 0x70, 0x1e, 0xa0, 0xff, 0x30, 0xe3, + 0x83, 0xb6, 0x60, 0x30, 0x8a, 0x9d, 0xb8, 0x1b, 0xb1, 0xa1, 0x18, 0xbe, 0x70, 0xad, 0x30, 0x8e, + 0x8c, 0x6a, 0x7d, 0x4c, 0xf0, 0x1c, 0xe4, 0xff, 0xb1, 0xe0, 0x66, 0xff, 0xb1, 0x05, 0x63, 0x1a, + 0x79, 0xc1, 0x8d, 0x62, 0xf4, 0x13, 0x99, 0xc1, 0x9d, 0xec, 0x6f, 0x70, 0xe9, 0xd3, 0x6c, 0x68, + 0x4f, 0x0a, 0x66, 0x55, 0xd9, 0x62, 0x0c, 0x6c, 0x1b, 0x2a, 0x6e, 0x4c, 0xda, 0xd1, 0x78, 0xe9, + 0x7c, 0xf9, 0xa9, 0xe1, 0x0b, 0x97, 0x8b, 0x7a, 0xcf, 0xfa, 0xa8, 0x60, 0x5a, 0x99, 0xa7, 0xe4, + 0x31, 0xe7, 0x62, 0xff, 0xca, 0x88, 0xf9, 0x7e, 0x74, 0xc0, 0xd1, 0xb3, 0x30, 0x1c, 0x05, 0xdd, + 0xb0, 0x41, 0x30, 0xe9, 0x04, 0xd1, 0xb8, 0x75, 0xbe, 0x4c, 0xa7, 0x1e, 0x9d, 0xd4, 0x2b, 0xba, + 0x19, 0x9b, 0x38, 0xe8, 0x8b, 0x16, 0x8c, 0x34, 0x49, 0x14, 0xbb, 0x3e, 0xe3, 0x2f, 0x3b, 0xbf, + 0x7a, 0xe4, 0xce, 0xcb, 0xc6, 0x59, 0x4d, 0xbc, 0x7e, 0x46, 0xbc, 0xc8, 0x88, 0xd1, 0x18, 0xe1, + 0x04, 0x7f, 0xba, 0x38, 0x9b, 0x24, 0x6a, 0x84, 0x6e, 0x87, 0xfe, 0x17, 0xcb, 0x47, 0x2d, 0xce, + 0x59, 0x0d, 0xc2, 0x26, 0x1e, 0xf2, 0xa1, 0x42, 0x17, 0x5f, 0x34, 0x3e, 0xc0, 0xfa, 0x3f, 0x7f, + 0xb4, 0xfe, 0x8b, 0x41, 0xa5, 0xeb, 0x5a, 0x8f, 0x3e, 0xfd, 0x17, 0x61, 0xce, 0x06, 0x7d, 0xc1, + 0x82, 0x71, 0x21, 0x1c, 0x30, 0xe1, 0x03, 0x7a, 0x73, 0xc3, 0x8d, 0x89, 0xe7, 0x46, 0xf1, 0x78, + 0x85, 0xf5, 0x61, 0xaa, 0xbf, 0xb9, 0x35, 0x17, 0x06, 0xdd, 0xce, 0x55, 0xd7, 0x6f, 0xd6, 0xcf, + 0x0b, 0x4e, 0xe3, 0x33, 0x3d, 0x08, 0xe3, 0x9e, 0x2c, 0xd1, 0x57, 0x2c, 0x38, 0xe7, 0x3b, 0x6d, + 0x12, 0x75, 0x1c, 0xfa, 0x69, 0x39, 0xb8, 0xee, 0x39, 0x8d, 0x4d, 0xd6, 0xa3, 0xc1, 0xc3, 0xf5, + 0xc8, 0x16, 0x3d, 0x3a, 0x77, 0xad, 0x27, 0x69, 0xbc, 0x07, 0x5b, 0xf4, 0x0d, 0x0b, 0x4e, 0x05, + 0x61, 0x67, 0xc3, 0xf1, 0x49, 0x53, 0x42, 0xa3, 0xf1, 0x21, 0xb6, 0xf4, 0x3e, 0x7c, 0xb4, 0x4f, + 0xb4, 0x94, 0x26, 0xbb, 0x18, 0xf8, 0x6e, 0x1c, 0x84, 0x2b, 0x24, 0x8e, 0x5d, 0xbf, 0x15, 0xd5, + 0xcf, 0xde, 0xd9, 0x9d, 0x38, 0x95, 0xc1, 0xc2, 0xd9, 0xfe, 0xa0, 0x9f, 0x84, 0xe1, 0x68, 0xc7, + 0x6f, 0xdc, 0x74, 0xfd, 0x66, 0x70, 0x3b, 0x1a, 0xaf, 0x16, 0xb1, 0x7c, 0x57, 0x14, 0x41, 0xb1, + 0x00, 0x35, 0x03, 0x6c, 0x72, 0xcb, 0xff, 0x70, 0x7a, 0x2a, 0xd5, 0x8a, 0xfe, 0x70, 0x7a, 0x32, + 0xed, 0xc1, 0x16, 0xfd, 0xac, 0x05, 0xa3, 0x91, 0xdb, 0xf2, 0x9d, 0xb8, 0x1b, 0x92, 0xab, 0x64, + 0x27, 0x1a, 0x07, 0xd6, 0x91, 0x2b, 0x47, 0x1c, 0x15, 0x83, 0x64, 0xfd, 0xac, 0xe8, 0xe3, 0xa8, + 0xd9, 0x1a, 0xe1, 0x24, 0xdf, 0xbc, 0x85, 0xa6, 0xa7, 0xf5, 0x70, 0xb1, 0x0b, 0x4d, 0x4f, 0xea, + 0x9e, 0x2c, 0xd1, 0x8f, 0xc3, 0x49, 0xde, 0xa4, 0x46, 0x36, 0x1a, 0x1f, 0x61, 0x82, 0xf6, 0xcc, + 0x9d, 0xdd, 0x89, 0x93, 0x2b, 0x29, 0x18, 0xce, 0x60, 0xa3, 0xd7, 0x60, 0xa2, 0x43, 0xc2, 0xb6, + 0x1b, 0x2f, 0xf9, 0xde, 0x8e, 0x14, 0xdf, 0x8d, 0xa0, 0x43, 0x9a, 0xa2, 0x3b, 0xd1, 0xf8, 0xe8, + 0x79, 0xeb, 0xa9, 0x6a, 0xfd, 0x1d, 0xa2, 0x9b, 0x13, 0xcb, 0x7b, 0xa3, 0xe3, 0xfd, 0xe8, 0xd9, + 0xff, 0xba, 0x04, 0x27, 0xd3, 0x8a, 0x13, 0xfd, 0x6d, 0x0b, 0x4e, 0xdc, 0xba, 0x1d, 0xaf, 0x06, + 0x9b, 0xc4, 0x8f, 0xea, 0x3b, 0x54, 0xbc, 0x31, 0x95, 0x31, 0x7c, 0xa1, 0x51, 0xac, 0x8a, 0x9e, + 0xbc, 0x92, 0xe4, 0x72, 0xd1, 0x8f, 0xc3, 0x9d, 0xfa, 0xc3, 0xe2, 0xed, 0x4e, 0x5c, 0xb9, 0xb9, + 0x6a, 0x42, 0x71, 0xba, 0x53, 0xe7, 0x3e, 0x67, 0xc1, 0x99, 0x3c, 0x12, 0xe8, 0x24, 0x94, 0x37, + 0xc9, 0x0e, 0x37, 0xe0, 0x30, 0xfd, 0x89, 0x5e, 0x81, 0xca, 0x96, 0xe3, 0x75, 0x89, 0xb0, 0x6e, + 0xe6, 0x8e, 0xf6, 0x22, 0xaa, 0x67, 0x98, 0x53, 0x7d, 0x4f, 0xe9, 0x05, 0xcb, 0xfe, 0x9d, 0x32, + 0x0c, 0x1b, 0xfa, 0xed, 0x1e, 0x58, 0x6c, 0x41, 0xc2, 0x62, 0x5b, 0x2c, 0x4c, 0x35, 0xf7, 0x34, + 0xd9, 0x6e, 0xa7, 0x4c, 0xb6, 0xa5, 0xe2, 0x58, 0xee, 0x69, 0xb3, 0xa1, 0x18, 0x6a, 0x41, 0x87, + 0x5a, 0xef, 0x54, 0xf5, 0x0f, 0x14, 0xf1, 0x09, 0x97, 0x24, 0xb9, 0xfa, 0xe8, 0x9d, 0xdd, 0x89, + 0x9a, 0xfa, 0x8b, 0x35, 0x23, 0xfb, 0xdb, 0x16, 0x9c, 0x31, 0xfa, 0x38, 0x13, 0xf8, 0x4d, 0x97, + 0x7d, 0xda, 0xf3, 0x30, 0x10, 0xef, 0x74, 0xe4, 0x0e, 0x41, 0x8d, 0xd4, 0xea, 0x4e, 0x87, 0x60, + 0x06, 0xa1, 0x86, 0x7e, 0x9b, 0x44, 0x91, 0xd3, 0x22, 0xe9, 0x3d, 0xc1, 0x22, 0x6f, 0xc6, 0x12, + 0x8e, 0x42, 0x40, 0x9e, 0x13, 0xc5, 0xab, 0xa1, 0xe3, 0x47, 0x8c, 0xfc, 0xaa, 0xdb, 0x26, 0x62, + 0x80, 0xff, 0x42, 0x7f, 0x33, 0x86, 0x3e, 0x51, 0x7f, 0xe8, 0xce, 0xee, 0x04, 0x5a, 0xc8, 0x50, + 0xc2, 0x39, 0xd4, 0xed, 0xaf, 0x58, 0xf0, 0x50, 0xbe, 0x2d, 0x86, 0x9e, 0x84, 0x41, 0xbe, 0x3d, + 0x14, 0x6f, 0xa7, 0x3f, 0x09, 0x6b, 0xc5, 0x02, 0x8a, 0xa6, 0xa0, 0xa6, 0xf4, 0x84, 0x78, 0xc7, + 0x53, 0x02, 0xb5, 0xa6, 0x95, 0x8b, 0xc6, 0xa1, 0x83, 0x46, 0xff, 0x08, 0xcb, 0x4d, 0x0d, 0x1a, + 0xdb, 0x4f, 0x31, 0x88, 0xfd, 0x9f, 0x2c, 0x38, 0x61, 0xf4, 0xea, 0x1e, 0x98, 0xe6, 0x7e, 0xd2, + 0x34, 0x9f, 0x2f, 0x6c, 0x3e, 0xf7, 0xb0, 0xcd, 0xbf, 0x60, 0xc1, 0x39, 0x03, 0x6b, 0xd1, 0x89, + 0x1b, 0x1b, 0x17, 0xb7, 0x3b, 0x21, 0x89, 0xe8, 0xd6, 0x1b, 0x3d, 0x66, 0xc8, 0xad, 0xfa, 0xb0, + 0xa0, 0x50, 0xbe, 0x4a, 0x76, 0xb8, 0x10, 0x7b, 0x1a, 0xaa, 0x7c, 0x72, 0x06, 0xa1, 0x18, 0x71, + 0xf5, 0x6e, 0x4b, 0xa2, 0x1d, 0x2b, 0x0c, 0x64, 0xc3, 0x20, 0x13, 0x4e, 0x74, 0xb1, 0x52, 0x35, + 0x04, 0xf4, 0x23, 0xde, 0x60, 0x2d, 0x58, 0x40, 0xec, 0x28, 0xd1, 0x9d, 0xe5, 0x90, 0xb0, 0x8f, + 0xdb, 0xbc, 0xe4, 0x12, 0xaf, 0x19, 0xd1, 0x6d, 0x83, 0xe3, 0xfb, 0x41, 0x2c, 0x76, 0x00, 0xc6, + 0xb6, 0x61, 0x5a, 0x37, 0x63, 0x13, 0x87, 0x32, 0xf5, 0x9c, 0x35, 0xe2, 0xf1, 0x11, 0x15, 0x4c, + 0x17, 0x58, 0x0b, 0x16, 0x10, 0xfb, 0x4e, 0x89, 0x6d, 0x50, 0xd4, 0xd2, 0x27, 0xf7, 0x62, 0x77, + 0x1b, 0x26, 0x64, 0xe5, 0x72, 0x71, 0x82, 0x8b, 0xf4, 0xde, 0xe1, 0xbe, 0x9e, 0x12, 0x97, 0xb8, + 0x50, 0xae, 0x7b, 0xef, 0x72, 0x3f, 0x51, 0x86, 0x89, 0xe4, 0x03, 0x19, 0x69, 0x4b, 0xb7, 0x54, + 0x06, 0xa3, 0xb4, 0xbf, 0xc3, 0xc0, 0xc7, 0x26, 0x5e, 0x0f, 0x81, 0x55, 0x3a, 0x4e, 0x81, 0x65, + 0xca, 0xd3, 0xf2, 0x3e, 0xf2, 0xf4, 0x49, 0x35, 0xea, 0x03, 0x29, 0x01, 0x96, 0xd4, 0x29, 0xe7, + 0x61, 0x20, 0x8a, 0x49, 0x67, 0xbc, 0x92, 0x94, 0x47, 0x2b, 0x31, 0xe9, 0x60, 0x06, 0x41, 0xef, + 0x85, 0x13, 0xb1, 0x13, 0xb6, 0x48, 0x1c, 0x92, 0x2d, 0x97, 0xf9, 0xc6, 0xd8, 0x7e, 0xa9, 0x56, + 0x3f, 0x4d, 0xcd, 0x93, 0x55, 0x06, 0xc2, 0x12, 0x84, 0xd3, 0xb8, 0xf6, 0x7f, 0x2b, 0xc1, 0xc3, + 0xc9, 0x4f, 0xa0, 0x35, 0xc8, 0xfb, 0x13, 0x1a, 0xe4, 0x9d, 0xa6, 0x06, 0xb9, 0xbb, 0x3b, 0xf1, + 0xf6, 0x1e, 0x8f, 0x7d, 0xcf, 0x28, 0x18, 0x34, 0x97, 0xfa, 0x08, 0x53, 0xc9, 0x8f, 0x70, 0x77, + 0x77, 0xe2, 0xb1, 0x1e, 0xef, 0x98, 0xfa, 0x4a, 0x4f, 0xc2, 0x60, 0x48, 0x9c, 0x28, 0xf0, 0xc5, + 0x77, 0x52, 0x5f, 0x13, 0xb3, 0x56, 0x2c, 0xa0, 0xf6, 0xef, 0xd5, 0xd2, 0x83, 0x3d, 0xc7, 0xfd, + 0x7d, 0x41, 0x88, 0x5c, 0x18, 0x60, 0xbb, 0x02, 0x2e, 0x59, 0xae, 0x1e, 0x6d, 0x15, 0x52, 0x2d, + 0xa2, 0x48, 0xd7, 0xab, 0xf4, 0xab, 0xd1, 0x26, 0xcc, 0x58, 0xa0, 0x6d, 0xa8, 0x36, 0xa4, 0xb1, + 0x5e, 0x2a, 0xc2, 0xad, 0x25, 0x4c, 0x75, 0xcd, 0x71, 0x84, 0x8a, 0x7b, 0x65, 0xe1, 0x2b, 0x6e, + 0x88, 0x40, 0xb9, 0xe5, 0xc6, 0xe2, 0xb3, 0x1e, 0x71, 0x3b, 0x36, 0xe7, 0x1a, 0xaf, 0x38, 0x44, + 0x75, 0xd0, 0x9c, 0x1b, 0x63, 0x4a, 0x1f, 0x7d, 0xc6, 0x82, 0xe1, 0xa8, 0xd1, 0x5e, 0x0e, 0x83, + 0x2d, 0xb7, 0x49, 0x42, 0x61, 0x8c, 0x1d, 0x51, 0xb2, 0xad, 0xcc, 0x2c, 0x4a, 0x82, 0x9a, 0x2f, + 0xdf, 0x1e, 0x6b, 0x08, 0x36, 0xf9, 0xd2, 0x4d, 0xca, 0xc3, 0xe2, 0xdd, 0x67, 0x49, 0x83, 0xad, + 0x38, 0xb9, 0x27, 0x63, 0x33, 0xe5, 0xc8, 0xc6, 0xe9, 0x6c, 0xb7, 0xb1, 0x49, 0xd7, 0x9b, 0xee, + 0xd0, 0xdb, 0xef, 0xec, 0x4e, 0x3c, 0x3c, 0x93, 0xcf, 0x13, 0xf7, 0xea, 0x0c, 0x1b, 0xb0, 0x4e, + 0xd7, 0xf3, 0x30, 0x79, 0xad, 0x4b, 0x98, 0xc7, 0xa5, 0x80, 0x01, 0x5b, 0xd6, 0x04, 0x53, 0x03, + 0x66, 0x40, 0xb0, 0xc9, 0x17, 0xbd, 0x06, 0x83, 0x6d, 0x27, 0x0e, 0xdd, 0x6d, 0xe1, 0x66, 0x39, + 0xe2, 0x76, 0x61, 0x91, 0xd1, 0xd2, 0xcc, 0x99, 0xa2, 0xe7, 0x8d, 0x58, 0x30, 0x42, 0x6d, 0xa8, + 0xb4, 0x49, 0xd8, 0x22, 0xe3, 0xd5, 0x22, 0x5c, 0xca, 0x8b, 0x94, 0x94, 0x66, 0x58, 0xa3, 0xc6, + 0x15, 0x6b, 0xc3, 0x9c, 0x0b, 0x7a, 0x05, 0xaa, 0x11, 0xf1, 0x48, 0x83, 0x9a, 0x47, 0x35, 0xc6, + 0xf1, 0x5d, 0x7d, 0x9a, 0x8a, 0xd4, 0x2e, 0x59, 0x11, 0x8f, 0xf2, 0x05, 0x26, 0xff, 0x61, 0x45, + 0x92, 0x0e, 0x60, 0xc7, 0xeb, 0xb6, 0x5c, 0x7f, 0x1c, 0x8a, 0x18, 0xc0, 0x65, 0x46, 0x2b, 0x35, + 0x80, 0xbc, 0x11, 0x0b, 0x46, 0xf6, 0x7f, 0xb1, 0x00, 0x25, 0x85, 0xda, 0x3d, 0xb0, 0x89, 0x5f, + 0x4b, 0xda, 0xc4, 0x0b, 0x45, 0x1a, 0x2d, 0x3d, 0xcc, 0xe2, 0x5f, 0xaf, 0x41, 0x4a, 0x1d, 0x5c, + 0x23, 0x51, 0x4c, 0x9a, 0x6f, 0x89, 0xf0, 0xb7, 0x44, 0xf8, 0x5b, 0x22, 0x5c, 0x89, 0xf0, 0xb5, + 0x94, 0x08, 0x7f, 0x9f, 0xb1, 0xea, 0xf5, 0xf9, 0xed, 0xab, 0xea, 0x80, 0xd7, 0xec, 0x81, 0x81, + 0x40, 0x25, 0xc1, 0x95, 0x95, 0xa5, 0x6b, 0xb9, 0x32, 0xfb, 0xd5, 0xa4, 0xcc, 0x3e, 0x2a, 0x8b, + 0xff, 0x1f, 0xa4, 0xf4, 0xbf, 0xb2, 0xe0, 0x1d, 0x49, 0xe9, 0x25, 0x67, 0xce, 0x7c, 0xcb, 0x0f, + 0x42, 0x32, 0xeb, 0xae, 0xaf, 0x93, 0x90, 0xf8, 0x0d, 0x12, 0x29, 0x27, 0x88, 0xd5, 0xcb, 0x09, + 0x82, 0x9e, 0x83, 0x91, 0x5b, 0x51, 0xe0, 0x2f, 0x07, 0xae, 0x2f, 0x44, 0x10, 0xdd, 0x71, 0x9c, + 0xbc, 0xb3, 0x3b, 0x31, 0x42, 0x47, 0x54, 0xb6, 0xe3, 0x04, 0x16, 0x9a, 0x81, 0x53, 0xb7, 0x5e, + 0x5b, 0x76, 0x62, 0xc3, 0x9b, 0x20, 0xf7, 0xfd, 0xec, 0xbc, 0xe3, 0xca, 0x4b, 0x29, 0x20, 0xce, + 0xe2, 0xdb, 0x7f, 0xa3, 0x04, 0x8f, 0xa4, 0x5e, 0x24, 0xf0, 0xbc, 0xa0, 0x1b, 0xd3, 0x3d, 0x11, + 0xfa, 0x9a, 0x05, 0x27, 0xdb, 0x49, 0x87, 0x45, 0x24, 0xfc, 0xc2, 0x1f, 0x28, 0x4c, 0x47, 0xa4, + 0x3c, 0x22, 0xf5, 0x71, 0x31, 0x42, 0x27, 0x53, 0x80, 0x08, 0x67, 0xfa, 0x82, 0x5e, 0x81, 0x5a, + 0xdb, 0xd9, 0xbe, 0xde, 0x69, 0x3a, 0xb1, 0xdc, 0x8e, 0xf6, 0xf6, 0x22, 0x74, 0x63, 0xd7, 0x9b, + 0xe4, 0x91, 0x01, 0x93, 0xf3, 0x7e, 0xbc, 0x14, 0xae, 0xc4, 0xa1, 0xeb, 0xb7, 0xb8, 0x37, 0x70, + 0x51, 0x92, 0xc1, 0x9a, 0xa2, 0xfd, 0x55, 0x2b, 0xad, 0xa4, 0xd4, 0xe8, 0x84, 0x4e, 0x4c, 0x5a, + 0x3b, 0xe8, 0xa3, 0x50, 0xa1, 0xfb, 0x46, 0x39, 0x2a, 0x37, 0x8b, 0xd4, 0x9c, 0xc6, 0x97, 0xd0, + 0x4a, 0x94, 0xfe, 0x8b, 0x30, 0x67, 0x6a, 0x7f, 0xad, 0x96, 0x36, 0x16, 0xd8, 0xd9, 0xef, 0x05, + 0x80, 0x56, 0xb0, 0x4a, 0xda, 0x1d, 0x8f, 0x0e, 0x8b, 0xc5, 0x0e, 0x10, 0x94, 0xab, 0x64, 0x4e, + 0x41, 0xb0, 0x81, 0x85, 0xfe, 0x92, 0x05, 0xd0, 0x92, 0x73, 0x5e, 0x1a, 0x02, 0xd7, 0x8b, 0x7c, + 0x1d, 0xbd, 0xa2, 0x74, 0x5f, 0x14, 0x43, 0x6c, 0x30, 0x47, 0x3f, 0x6d, 0x41, 0x35, 0x96, 0xdd, + 0xe7, 0xaa, 0x71, 0xb5, 0xc8, 0x9e, 0xc8, 0x97, 0xd6, 0x36, 0x91, 0x1a, 0x12, 0xc5, 0x17, 0xfd, + 0x8c, 0x05, 0x10, 0xed, 0xf8, 0x8d, 0xe5, 0xc0, 0x73, 0x1b, 0x3b, 0x42, 0x63, 0xde, 0x28, 0xd4, + 0x9d, 0xa3, 0xa8, 0xd7, 0xc7, 0xe8, 0x68, 0xe8, 0xff, 0xd8, 0xe0, 0x8c, 0x3e, 0x0e, 0xd5, 0x48, + 0x4c, 0x37, 0xa1, 0x23, 0x57, 0x8b, 0x75, 0x2a, 0x71, 0xda, 0x42, 0xbc, 0x8a, 0x7f, 0x58, 0xf1, + 0x44, 0x3f, 0x67, 0xc1, 0x89, 0x4e, 0xd2, 0x4d, 0x28, 0xd4, 0x61, 0x71, 0x32, 0x20, 0xe5, 0x86, + 0xe4, 0xde, 0x96, 0x54, 0x23, 0x4e, 0xf7, 0x82, 0x4a, 0x40, 0x3d, 0x83, 0x97, 0x3a, 0xdc, 0x65, + 0x39, 0xa4, 0x25, 0xe0, 0x5c, 0x1a, 0x88, 0xb3, 0xf8, 0x68, 0x19, 0xce, 0xd0, 0xde, 0xed, 0x70, + 0xf3, 0x53, 0xaa, 0x97, 0x88, 0x29, 0xc3, 0x6a, 0xfd, 0x51, 0x31, 0x43, 0xd8, 0xa1, 0x40, 0x1a, + 0x07, 0xe7, 0x3e, 0x89, 0x7e, 0xc7, 0x82, 0x47, 0x5d, 0xa6, 0x06, 0x4c, 0x7f, 0xbb, 0xd6, 0x08, + 0xe2, 0x20, 0x97, 0x14, 0x2a, 0x2b, 0x7a, 0xa9, 0x9f, 0xfa, 0x0f, 0x8a, 0x37, 0x78, 0x74, 0x7e, + 0x8f, 0x2e, 0xe1, 0x3d, 0x3b, 0x8c, 0x7e, 0x14, 0x46, 0xe5, 0xba, 0x58, 0xa6, 0x22, 0x98, 0x29, + 0xda, 0x5a, 0xfd, 0xd4, 0x9d, 0xdd, 0x89, 0xd1, 0x55, 0x13, 0x80, 0x93, 0x78, 0xf6, 0xbf, 0x29, + 0x27, 0x8e, 0x53, 0x94, 0x0f, 0x93, 0x89, 0x9b, 0x86, 0xf4, 0xff, 0x48, 0xe9, 0x59, 0xa8, 0xb8, + 0x51, 0xde, 0x25, 0x2d, 0x6e, 0x54, 0x53, 0x84, 0x0d, 0xe6, 0xd4, 0x28, 0x3d, 0xe5, 0xa4, 0x3d, + 0xa5, 0x42, 0x02, 0xbe, 0x52, 0x64, 0x97, 0xb2, 0x87, 0x5f, 0x8f, 0x88, 0xae, 0x9d, 0xca, 0x80, + 0x70, 0xb6, 0x4b, 0xe8, 0x63, 0x50, 0x0b, 0x55, 0xe4, 0x44, 0xb9, 0x88, 0xad, 0x9a, 0x9c, 0x36, + 0xa2, 0x3b, 0xea, 0x34, 0x47, 0xc7, 0x48, 0x68, 0x8e, 0xf6, 0x6f, 0x27, 0x4f, 0x90, 0x0c, 0xd9, + 0xd1, 0xc7, 0xe9, 0xd8, 0x17, 0x2d, 0x18, 0x0e, 0x03, 0xcf, 0x73, 0xfd, 0x16, 0x95, 0x73, 0x42, + 0x59, 0x7f, 0xe8, 0x58, 0xf4, 0xa5, 0x10, 0x68, 0xcc, 0xb2, 0xc6, 0x9a, 0x27, 0x36, 0x3b, 0x60, + 0xff, 0xb1, 0x05, 0xe3, 0xbd, 0xe4, 0x31, 0x22, 0xf0, 0x76, 0x29, 0x6c, 0xd4, 0x50, 0x2c, 0xf9, + 0xb3, 0xc4, 0x23, 0xca, 0x6d, 0x5e, 0xad, 0x3f, 0x21, 0x5e, 0xf3, 0xed, 0xcb, 0xbd, 0x51, 0xf1, + 0x5e, 0x74, 0xd0, 0xcb, 0x70, 0xd2, 0x78, 0xaf, 0x48, 0x0d, 0x4c, 0xad, 0x3e, 0x49, 0x0d, 0xa0, + 0xe9, 0x14, 0xec, 0xee, 0xee, 0xc4, 0x43, 0xe9, 0x36, 0xa1, 0x30, 0x32, 0x74, 0xec, 0x5f, 0x2e, + 0xa5, 0xbf, 0x96, 0xd2, 0xf5, 0x6f, 0x5a, 0x19, 0x6f, 0xc2, 0x07, 0x8e, 0x43, 0xbf, 0x32, 0xbf, + 0x83, 0x0a, 0x3f, 0xe9, 0x8d, 0x73, 0x1f, 0xcf, 0xb7, 0xed, 0x7f, 0x3b, 0x00, 0x7b, 0xf4, 0xac, + 0x0f, 0xe3, 0xfd, 0xc0, 0x87, 0xa2, 0x9f, 0xb7, 0xd4, 0x81, 0x19, 0x5f, 0xc3, 0xcd, 0xe3, 0x1a, + 0x7b, 0xbe, 0x7f, 0x8a, 0x78, 0x8c, 0x85, 0xf2, 0xa2, 0x27, 0x8f, 0xe6, 0xd0, 0xd7, 0xad, 0xe4, + 0x91, 0x1f, 0x0f, 0x9a, 0x73, 0x8f, 0xad, 0x4f, 0xc6, 0x39, 0x22, 0xef, 0x98, 0x3e, 0x7d, 0xea, + 0x75, 0xc2, 0x38, 0x09, 0xb0, 0xee, 0xfa, 0x8e, 0xe7, 0xbe, 0x4e, 0x77, 0x47, 0x15, 0xa6, 0xe0, + 0x99, 0xc5, 0x74, 0x49, 0xb5, 0x62, 0x03, 0xe3, 0xdc, 0x5f, 0x84, 0x61, 0xe3, 0xcd, 0x73, 0x42, + 0x43, 0xce, 0x98, 0xa1, 0x21, 0x35, 0x23, 0xa2, 0xe3, 0xdc, 0xfb, 0xe0, 0x64, 0xba, 0x83, 0x07, + 0x79, 0xde, 0xfe, 0x5f, 0x43, 0xe9, 0x33, 0xb8, 0x55, 0x12, 0xb6, 0x69, 0xd7, 0xde, 0x72, 0x6c, + 0xbd, 0xe5, 0xd8, 0x7a, 0xcb, 0xb1, 0x65, 0x9e, 0x4d, 0x08, 0xa7, 0xcd, 0xd0, 0x3d, 0x72, 0xda, + 0x24, 0xdc, 0x50, 0xd5, 0xc2, 0xdd, 0x50, 0xf6, 0x67, 0x32, 0x9e, 0xfb, 0xd5, 0x90, 0x10, 0x14, + 0x40, 0xc5, 0x0f, 0x9a, 0x44, 0xda, 0xb8, 0x57, 0x8a, 0x31, 0xd8, 0xae, 0x05, 0x4d, 0x23, 0x1c, + 0x99, 0xfe, 0x8b, 0x30, 0xe7, 0x63, 0xdf, 0xa9, 0x40, 0xc2, 0x9c, 0xe4, 0xdf, 0xfd, 0x87, 0x61, + 0x28, 0x24, 0x9d, 0xe0, 0x3a, 0x5e, 0x10, 0xba, 0x4c, 0x67, 0x2c, 0xf0, 0x66, 0x2c, 0xe1, 0x54, + 0xe7, 0x75, 0x9c, 0x78, 0x43, 0x28, 0x33, 0xa5, 0xf3, 0x96, 0x9d, 0x78, 0x03, 0x33, 0x08, 0x7a, + 0x1f, 0x8c, 0xc5, 0x89, 0xa3, 0x70, 0x71, 0xe4, 0xfb, 0x90, 0xc0, 0x1d, 0x4b, 0x1e, 0x94, 0xe3, + 0x14, 0x36, 0x7a, 0x0d, 0x06, 0x36, 0x88, 0xd7, 0x16, 0x9f, 0x7e, 0xa5, 0x38, 0x5d, 0xc3, 0xde, + 0xf5, 0x32, 0xf1, 0xda, 0x5c, 0x12, 0xd2, 0x5f, 0x98, 0xb1, 0xa2, 0xf3, 0xbe, 0xb6, 0xd9, 0x8d, + 0xe2, 0xa0, 0xed, 0xbe, 0x2e, 0x3d, 0x9d, 0x1f, 0x28, 0x98, 0xf1, 0x55, 0x49, 0x9f, 0xbb, 0x94, + 0xd4, 0x5f, 0xac, 0x39, 0xb3, 0x7e, 0x34, 0xdd, 0x90, 0x4d, 0x99, 0x1d, 0xe1, 0xb0, 0x2c, 0xba, + 0x1f, 0xb3, 0x92, 0x3e, 0xef, 0x87, 0xfa, 0x8b, 0x35, 0x67, 0xb4, 0xa3, 0xd6, 0xdf, 0x30, 0xeb, + 0xc3, 0xf5, 0x82, 0xfb, 0xc0, 0xd7, 0x5e, 0xee, 0x3a, 0x7c, 0x02, 0x2a, 0x8d, 0x0d, 0x27, 0x8c, + 0xc7, 0x47, 0xd8, 0xa4, 0x51, 0xb3, 0x78, 0x86, 0x36, 0x62, 0x0e, 0x43, 0x8f, 0x41, 0x39, 0x24, + 0xeb, 0x2c, 0xfa, 0xd5, 0x88, 0x8b, 0xc2, 0x64, 0x1d, 0xd3, 0x76, 0xfb, 0x17, 0x4b, 0x49, 0xb3, + 0x2d, 0xf9, 0xde, 0x7c, 0xb6, 0x37, 0xba, 0x61, 0x24, 0xdd, 0x5f, 0xc6, 0x6c, 0x67, 0xcd, 0x58, + 0xc2, 0xd1, 0x27, 0x2d, 0x18, 0xba, 0x15, 0x05, 0xbe, 0x4f, 0x62, 0xa1, 0x22, 0x6f, 0x14, 0x3c, + 0x14, 0x57, 0x38, 0x75, 0xdd, 0x07, 0xd1, 0x80, 0x25, 0x5f, 0xda, 0x5d, 0xb2, 0xdd, 0xf0, 0xba, + 0xcd, 0x4c, 0xa8, 0xcb, 0x45, 0xde, 0x8c, 0x25, 0x9c, 0xa2, 0xba, 0x3e, 0x47, 0x1d, 0x48, 0xa2, + 0xce, 0xfb, 0x02, 0x55, 0xc0, 0xed, 0xbf, 0x36, 0x08, 0x67, 0x73, 0x17, 0x07, 0x35, 0xa8, 0x98, + 0xc9, 0x72, 0xc9, 0xf5, 0x88, 0x0c, 0xf2, 0x62, 0x06, 0xd5, 0x0d, 0xd5, 0x8a, 0x0d, 0x0c, 0xf4, + 0x53, 0x00, 0x1d, 0x27, 0x74, 0xda, 0x44, 0xb9, 0xa7, 0x8f, 0x6c, 0xb7, 0xd0, 0x7e, 0x2c, 0x4b, + 0x9a, 0x7a, 0x8b, 0xae, 0x9a, 0x22, 0x6c, 0xb0, 0x44, 0xcf, 0xc3, 0x70, 0x48, 0x3c, 0xe2, 0x44, + 0x2c, 0x78, 0x3a, 0x9d, 0x09, 0x82, 0x35, 0x08, 0x9b, 0x78, 0xe8, 0x49, 0x15, 0x0f, 0x97, 0x8a, + 0x0b, 0x4a, 0xc6, 0xc4, 0xa1, 0x37, 0x2c, 0x18, 0x5b, 0x77, 0x3d, 0xa2, 0xb9, 0x8b, 0xbc, 0x8d, + 0xa5, 0xa3, 0xbf, 0xe4, 0x25, 0x93, 0xae, 0x96, 0x90, 0x89, 0xe6, 0x08, 0xa7, 0xd8, 0xd3, 0xcf, + 0xbc, 0x45, 0x42, 0x26, 0x5a, 0x07, 0x93, 0x9f, 0xf9, 0x06, 0x6f, 0xc6, 0x12, 0x8e, 0xa6, 0xe1, + 0x44, 0xc7, 0x89, 0xa2, 0x99, 0x90, 0x34, 0x89, 0x1f, 0xbb, 0x8e, 0xc7, 0xb3, 0x2a, 0xaa, 0x3a, + 0xaa, 0x7a, 0x39, 0x09, 0xc6, 0x69, 0x7c, 0xf4, 0x41, 0x78, 0x98, 0xfb, 0x7f, 0x16, 0xdd, 0x28, + 0x72, 0xfd, 0x96, 0x9e, 0x06, 0xc2, 0x0d, 0x36, 0x21, 0x48, 0x3d, 0x3c, 0x9f, 0x8f, 0x86, 0x7b, + 0x3d, 0x8f, 0x9e, 0x86, 0x6a, 0xb4, 0xe9, 0x76, 0x66, 0xc2, 0x66, 0xc4, 0xce, 0x7e, 0xaa, 0xda, + 0xe9, 0xba, 0x22, 0xda, 0xb1, 0xc2, 0x40, 0x0d, 0x18, 0xe1, 0x9f, 0x84, 0x07, 0xf4, 0x09, 0xf9, + 0xf8, 0x4c, 0x4f, 0x35, 0x2d, 0x92, 0x04, 0x27, 0xb1, 0x73, 0xfb, 0xa2, 0x3c, 0x89, 0xe2, 0x07, + 0x27, 0x37, 0x0c, 0x32, 0x38, 0x41, 0xd4, 0xfe, 0xf9, 0x52, 0x72, 0xe7, 0x6f, 0x2e, 0x52, 0x14, + 0xd1, 0xa5, 0x18, 0xdf, 0x70, 0x42, 0xa9, 0xb0, 0x8f, 0x98, 0xfc, 0x21, 0xe8, 0xde, 0x70, 0x42, + 0x73, 0x51, 0x33, 0x06, 0x58, 0x72, 0x42, 0xb7, 0x60, 0x20, 0xf6, 0x9c, 0x82, 0xb2, 0xc5, 0x0c, + 0x8e, 0xda, 0x11, 0xb3, 0x30, 0x1d, 0x61, 0xc6, 0x03, 0x3d, 0x4a, 0x77, 0x1f, 0x6b, 0xf2, 0xa4, + 0x48, 0x6c, 0x18, 0xd6, 0x22, 0xcc, 0x5a, 0xed, 0xbb, 0x90, 0x23, 0x57, 0x95, 0x22, 0x43, 0x17, + 0x00, 0xe8, 0x46, 0x76, 0x39, 0x24, 0xeb, 0xee, 0xb6, 0x30, 0x24, 0xd4, 0xda, 0xbd, 0xa6, 0x20, + 0xd8, 0xc0, 0x92, 0xcf, 0xac, 0x74, 0xd7, 0xe9, 0x33, 0xa5, 0xec, 0x33, 0x1c, 0x82, 0x0d, 0x2c, + 0xf4, 0x1c, 0x0c, 0xba, 0x6d, 0xa7, 0xa5, 0x02, 0x59, 0x1f, 0xa5, 0x8b, 0x76, 0x9e, 0xb5, 0xdc, + 0xdd, 0x9d, 0x18, 0x53, 0x1d, 0x62, 0x4d, 0x58, 0xe0, 0xa2, 0x5f, 0xb6, 0x60, 0xa4, 0x11, 0xb4, + 0xdb, 0x81, 0xcf, 0xb7, 0x7f, 0x62, 0x2f, 0x7b, 0xeb, 0xb8, 0xd4, 0xfc, 0xe4, 0x8c, 0xc1, 0x8c, + 0x6f, 0x66, 0x55, 0x5a, 0x9b, 0x09, 0xc2, 0x89, 0x5e, 0x99, 0x6b, 0xbb, 0xb2, 0xcf, 0xda, 0xfe, + 0x35, 0x0b, 0x4e, 0xf1, 0x67, 0x8d, 0x5d, 0xa9, 0xc8, 0xe0, 0x0a, 0x8e, 0xf9, 0xb5, 0x32, 0x1b, + 0x75, 0xe5, 0xac, 0xcc, 0xc0, 0x71, 0xb6, 0x93, 0x68, 0x0e, 0x4e, 0xad, 0x07, 0x61, 0x83, 0x98, + 0x03, 0x21, 0x04, 0x93, 0x22, 0x74, 0x29, 0x8d, 0x80, 0xb3, 0xcf, 0xa0, 0x1b, 0xf0, 0x90, 0xd1, + 0x68, 0x8e, 0x03, 0x97, 0x4d, 0x8f, 0x0b, 0x6a, 0x0f, 0x5d, 0xca, 0xc5, 0xc2, 0x3d, 0x9e, 0x4e, + 0x3a, 0x6e, 0x6a, 0x7d, 0x38, 0x6e, 0x5e, 0x85, 0x47, 0x1a, 0xd9, 0x91, 0xd9, 0x8a, 0xba, 0x6b, + 0x11, 0x97, 0x54, 0xd5, 0xfa, 0x0f, 0x08, 0x02, 0x8f, 0xcc, 0xf4, 0x42, 0xc4, 0xbd, 0x69, 0xa0, + 0x8f, 0x42, 0x35, 0x24, 0xec, 0xab, 0x44, 0x22, 0x9d, 0xe9, 0x88, 0xbb, 0x75, 0x6d, 0x81, 0x72, + 0xb2, 0x5a, 0xf6, 0x8a, 0x86, 0x08, 0x2b, 0x8e, 0xe8, 0x36, 0x0c, 0x75, 0x9c, 0xb8, 0xb1, 0x21, + 0x92, 0x98, 0x8e, 0xec, 0x5b, 0x56, 0xcc, 0xd9, 0x51, 0x80, 0x91, 0xf6, 0xcc, 0x99, 0x60, 0xc9, + 0x8d, 0x5a, 0x23, 0x8d, 0xa0, 0xdd, 0x09, 0x7c, 0xe2, 0xc7, 0xd1, 0xf8, 0xa8, 0xb6, 0x46, 0x66, + 0x54, 0x2b, 0x36, 0x30, 0xd0, 0x32, 0x9c, 0x61, 0xbe, 0xab, 0x9b, 0x6e, 0xbc, 0x11, 0x74, 0x63, + 0xb9, 0x15, 0x1b, 0x1f, 0x4b, 0x9e, 0xd8, 0x2c, 0xe4, 0xe0, 0xe0, 0xdc, 0x27, 0xcf, 0xbd, 0x1f, + 0x4e, 0x65, 0x96, 0xf2, 0x81, 0xdc, 0x46, 0xb3, 0xf0, 0x50, 0xfe, 0xa2, 0x39, 0x90, 0xf3, 0xe8, + 0x1f, 0xa5, 0xa2, 0x87, 0x0d, 0x43, 0xba, 0x0f, 0x47, 0xa4, 0x03, 0x65, 0xe2, 0x6f, 0x09, 0x1d, + 0x72, 0xe9, 0x68, 0xdf, 0xee, 0xa2, 0xbf, 0xc5, 0xd7, 0x3c, 0xf3, 0xb6, 0x5c, 0xf4, 0xb7, 0x30, + 0xa5, 0x8d, 0xbe, 0x6c, 0x25, 0x0c, 0x41, 0xee, 0xbe, 0xfc, 0xf0, 0xb1, 0xec, 0x1c, 0xfa, 0xb6, + 0x0d, 0xed, 0x7f, 0x57, 0x82, 0xf3, 0xfb, 0x11, 0xe9, 0x63, 0xf8, 0x9e, 0x80, 0xc1, 0x88, 0xc5, + 0x03, 0x08, 0xa1, 0x3c, 0x4c, 0xe7, 0x2a, 0x8f, 0x10, 0x78, 0x15, 0x0b, 0x10, 0xf2, 0xa0, 0xdc, + 0x76, 0x3a, 0xc2, 0xab, 0x35, 0x7f, 0xd4, 0x74, 0x24, 0xfa, 0xdf, 0xf1, 0x16, 0x9d, 0x0e, 0xf7, + 0x95, 0x18, 0x0d, 0x98, 0xb2, 0x41, 0x31, 0x54, 0x9c, 0x30, 0x74, 0xe4, 0xe1, 0xf3, 0xd5, 0x62, + 0xf8, 0x4d, 0x53, 0x92, 0xfc, 0xec, 0x2e, 0xd1, 0x84, 0x39, 0x33, 0xfb, 0xf3, 0x43, 0x89, 0x94, + 0x1c, 0x16, 0x51, 0x10, 0xc1, 0xa0, 0x70, 0x66, 0x59, 0x45, 0x67, 0x81, 0xf1, 0x9c, 0x4a, 0xb6, + 0x4f, 0x14, 0x99, 0xe9, 0x82, 0x15, 0xfa, 0x9c, 0xc5, 0xf2, 0xbf, 0x65, 0x9a, 0x92, 0xd8, 0x9d, + 0x1d, 0x4f, 0x3a, 0xba, 0x99, 0x55, 0x2e, 0x1b, 0xb1, 0xc9, 0x5d, 0xd4, 0x71, 0x60, 0x56, 0x69, + 0xb6, 0x8e, 0x03, 0xb3, 0x32, 0x25, 0x1c, 0x6d, 0xe7, 0x44, 0x0e, 0x14, 0x90, 0x43, 0xdc, 0x47, + 0xac, 0xc0, 0xd7, 0x2d, 0x38, 0xe5, 0xa6, 0x8f, 0x80, 0xc5, 0x5e, 0xe6, 0x66, 0x31, 0x9e, 0xa7, + 0xec, 0x09, 0xb3, 0x52, 0xe7, 0x19, 0x10, 0xce, 0x76, 0x06, 0x35, 0x61, 0xc0, 0xf5, 0xd7, 0x03, + 0x61, 0xc4, 0xd4, 0x8f, 0xd6, 0xa9, 0x79, 0x7f, 0x3d, 0xd0, 0xab, 0x99, 0xfe, 0xc3, 0x8c, 0x3a, + 0x5a, 0x80, 0x33, 0x32, 0x2b, 0xe3, 0xb2, 0x1b, 0xc5, 0x41, 0xb8, 0xb3, 0xe0, 0xb6, 0xdd, 0x98, + 0x19, 0x20, 0xe5, 0xfa, 0x38, 0xd5, 0x0f, 0x38, 0x07, 0x8e, 0x73, 0x9f, 0x42, 0xaf, 0xc3, 0x90, + 0x3c, 0x76, 0xad, 0x16, 0xb1, 0x2f, 0xcc, 0xce, 0x7f, 0x35, 0x99, 0x56, 0xc4, 0xb9, 0xab, 0x64, + 0x68, 0xbf, 0x31, 0x0c, 0xd9, 0xd3, 0xe1, 0xe4, 0x51, 0xb0, 0x75, 0xaf, 0x8f, 0x82, 0xe9, 0x86, + 0x25, 0xd2, 0xa7, 0xb8, 0x05, 0xcc, 0x6d, 0xc1, 0x55, 0x9f, 0xd0, 0xed, 0xf8, 0x0d, 0xcc, 0x78, + 0xa0, 0x10, 0x06, 0x37, 0x88, 0xe3, 0xc5, 0x1b, 0xc5, 0x1c, 0x26, 0x5c, 0x66, 0xb4, 0xd2, 0xa9, + 0x54, 0xbc, 0x15, 0x0b, 0x4e, 0x68, 0x1b, 0x86, 0x36, 0xf8, 0x04, 0x10, 0x7b, 0x88, 0xc5, 0xa3, + 0x0e, 0x6e, 0x62, 0x56, 0xe9, 0xcf, 0x2d, 0x1a, 0xb0, 0x64, 0xc7, 0xc2, 0x8e, 0x8c, 0xc0, 0x08, + 0xbe, 0x74, 0x8b, 0xcb, 0x22, 0xeb, 0x3f, 0x2a, 0xe2, 0x23, 0x30, 0x12, 0x92, 0x46, 0xe0, 0x37, + 0x5c, 0x8f, 0x34, 0xa7, 0xe5, 0x41, 0xc1, 0x41, 0x92, 0x87, 0xd8, 0x3e, 0x1c, 0x1b, 0x34, 0x70, + 0x82, 0x22, 0xfa, 0xac, 0x05, 0x63, 0x2a, 0xf3, 0x96, 0x7e, 0x10, 0x22, 0x1c, 0xc2, 0x0b, 0x05, + 0xe5, 0xf9, 0x32, 0x9a, 0x75, 0x74, 0x67, 0x77, 0x62, 0x2c, 0xd9, 0x86, 0x53, 0x7c, 0xd1, 0xcb, + 0x00, 0xc1, 0x1a, 0x8f, 0x2d, 0x9a, 0x8e, 0x85, 0x77, 0xf8, 0x20, 0xaf, 0x3a, 0xc6, 0x93, 0x10, + 0x25, 0x05, 0x6c, 0x50, 0x43, 0x57, 0x01, 0xf8, 0xb2, 0x59, 0xdd, 0xe9, 0xc8, 0x8d, 0x86, 0xcc, + 0xfe, 0x82, 0x15, 0x05, 0xb9, 0xbb, 0x3b, 0x91, 0xf5, 0xd6, 0xb1, 0x00, 0x0a, 0xe3, 0x71, 0xf4, + 0x93, 0x30, 0x14, 0x75, 0xdb, 0x6d, 0x47, 0xf9, 0x8e, 0x0b, 0x4c, 0x6b, 0xe4, 0x74, 0x0d, 0x51, + 0xc4, 0x1b, 0xb0, 0xe4, 0x88, 0x6e, 0x51, 0xa1, 0x1a, 0x09, 0x37, 0x22, 0x5b, 0x45, 0xdc, 0x26, + 0x18, 0x66, 0xef, 0xf4, 0x6e, 0x69, 0x78, 0xe3, 0x1c, 0x9c, 0xbb, 0xbb, 0x13, 0x0f, 0x25, 0xdb, + 0x17, 0x02, 0x91, 0x68, 0x98, 0x4b, 0x13, 0x5d, 0x91, 0xf5, 0x6b, 0xe8, 0x6b, 0xcb, 0xb2, 0x0a, + 0x4f, 0xe9, 0xfa, 0x35, 0xac, 0xb9, 0xf7, 0x98, 0x99, 0x0f, 0xa3, 0x45, 0x38, 0xdd, 0x08, 0xfc, + 0x38, 0x0c, 0x3c, 0x8f, 0xd7, 0x6f, 0xe2, 0x7b, 0x3e, 0xee, 0x5b, 0x7e, 0xbb, 0xe8, 0xf6, 0xe9, + 0x99, 0x2c, 0x0a, 0xce, 0x7b, 0xce, 0xf6, 0x93, 0xe7, 0x3c, 0x62, 0x70, 0x9e, 0x83, 0x11, 0xb2, + 0x1d, 0x93, 0xd0, 0x77, 0xbc, 0xeb, 0x78, 0x41, 0x7a, 0x55, 0xd9, 0x1a, 0xb8, 0x68, 0xb4, 0xe3, + 0x04, 0x16, 0xb2, 0x95, 0xa3, 0xc3, 0x48, 0x9e, 0xe5, 0x8e, 0x0e, 0xe9, 0xd6, 0xb0, 0xff, 0x77, + 0x29, 0x61, 0x90, 0xdd, 0x97, 0x53, 0x25, 0x56, 0x05, 0x44, 0x96, 0x4b, 0x61, 0x00, 0xb1, 0xd1, + 0x28, 0x92, 0xb3, 0xaa, 0x02, 0xb2, 0x64, 0x32, 0xc2, 0x49, 0xbe, 0x68, 0x13, 0x2a, 0x1b, 0x41, + 0x14, 0xcb, 0xed, 0xc7, 0x11, 0x77, 0x3a, 0x97, 0x83, 0x28, 0x66, 0x56, 0x84, 0x7a, 0x6d, 0xda, + 0x12, 0x61, 0xce, 0xc3, 0xfe, 0x13, 0x2b, 0xe1, 0x43, 0xbf, 0xc9, 0x02, 0x90, 0xb7, 0x88, 0x4f, + 0x97, 0xb5, 0x19, 0xf2, 0xf4, 0xa3, 0xa9, 0x74, 0xce, 0x77, 0xf4, 0x2a, 0x4f, 0x76, 0x9b, 0x52, + 0x98, 0x64, 0x24, 0x8c, 0xe8, 0xa8, 0x4f, 0x58, 0xc9, 0xbc, 0xdc, 0x52, 0x11, 0x1b, 0x0c, 0x33, + 0x37, 0x7d, 0xdf, 0x14, 0x5f, 0xfb, 0xcb, 0x16, 0x0c, 0xd5, 0x9d, 0xc6, 0x66, 0xb0, 0xbe, 0x8e, + 0x9e, 0x86, 0x6a, 0xb3, 0x1b, 0x9a, 0x29, 0xc2, 0xca, 0x71, 0x30, 0x2b, 0xda, 0xb1, 0xc2, 0xa0, + 0x73, 0x78, 0xdd, 0x69, 0xc8, 0x0c, 0xf5, 0x32, 0x9f, 0xc3, 0x97, 0x58, 0x0b, 0x16, 0x10, 0xf4, + 0x3c, 0x0c, 0xb7, 0x9d, 0x6d, 0xf9, 0x70, 0xda, 0x81, 0xbf, 0xa8, 0x41, 0xd8, 0xc4, 0xb3, 0xff, + 0xa5, 0x05, 0xe3, 0x75, 0x27, 0x72, 0x1b, 0xd3, 0xdd, 0x78, 0xa3, 0xee, 0xc6, 0x6b, 0xdd, 0xc6, + 0x26, 0x89, 0x79, 0x59, 0x02, 0xda, 0xcb, 0x6e, 0x44, 0x97, 0x92, 0xda, 0xd7, 0xa9, 0x5e, 0x5e, + 0x17, 0xed, 0x58, 0x61, 0xa0, 0xd7, 0x61, 0xb8, 0xe3, 0x44, 0xd1, 0xed, 0x20, 0x6c, 0x62, 0xb2, + 0x5e, 0x4c, 0x51, 0x90, 0x15, 0xd2, 0x08, 0x49, 0x8c, 0xc9, 0xba, 0x38, 0xec, 0xd6, 0xf4, 0xb1, + 0xc9, 0xcc, 0xfe, 0xa2, 0x05, 0x8f, 0xd4, 0x89, 0x13, 0x92, 0x90, 0xd5, 0x10, 0x51, 0x2f, 0x32, + 0xe3, 0x05, 0xdd, 0x26, 0x7a, 0x0d, 0xaa, 0x31, 0x6d, 0xa6, 0xdd, 0xb2, 0x8a, 0xed, 0x16, 0x3b, + 0xab, 0x5e, 0x15, 0xc4, 0xb1, 0x62, 0x63, 0xff, 0x75, 0x0b, 0x46, 0xd8, 0x71, 0xdb, 0x2c, 0x89, + 0x1d, 0xd7, 0xcb, 0x94, 0xda, 0xb2, 0xfa, 0x2c, 0xb5, 0x75, 0x1e, 0x06, 0x36, 0x82, 0x36, 0x49, + 0x1f, 0x15, 0x5f, 0x0e, 0xe8, 0xb6, 0x9a, 0x42, 0xd0, 0xb3, 0xf4, 0xc3, 0xbb, 0x7e, 0xec, 0xd0, + 0x25, 0x20, 0xdd, 0xb9, 0x27, 0xf8, 0x47, 0x57, 0xcd, 0xd8, 0xc4, 0xb1, 0xff, 0x45, 0x0d, 0x86, + 0x44, 0x5c, 0x43, 0xdf, 0xa5, 0x29, 0xe4, 0xfe, 0xbe, 0xd4, 0x73, 0x7f, 0x1f, 0xc1, 0x60, 0x83, + 0xd5, 0xfc, 0x13, 0x66, 0xe4, 0xd5, 0x42, 0x02, 0x61, 0x78, 0x19, 0x41, 0xdd, 0x2d, 0xfe, 0x1f, + 0x0b, 0x56, 0xe8, 0x4b, 0x16, 0x9c, 0x68, 0x04, 0xbe, 0x4f, 0x1a, 0xda, 0xc6, 0x19, 0x28, 0x22, + 0xde, 0x61, 0x26, 0x49, 0x54, 0x9f, 0xf5, 0xa4, 0x00, 0x38, 0xcd, 0x1e, 0xbd, 0x08, 0xa3, 0x7c, + 0xcc, 0x6e, 0x24, 0x7c, 0xd0, 0xba, 0x02, 0x93, 0x09, 0xc4, 0x49, 0x5c, 0x34, 0xc9, 0x7d, 0xf9, + 0xa2, 0xd6, 0xd1, 0xa0, 0x76, 0xd5, 0x19, 0x55, 0x8e, 0x0c, 0x0c, 0x14, 0x02, 0x0a, 0xc9, 0x7a, + 0x48, 0xa2, 0x0d, 0x11, 0xf7, 0xc1, 0xec, 0xab, 0xa1, 0xc3, 0xe5, 0xa1, 0xe3, 0x0c, 0x25, 0x9c, + 0x43, 0x1d, 0x6d, 0x8a, 0x0d, 0x66, 0xb5, 0x08, 0x19, 0x2a, 0x3e, 0x73, 0xcf, 0x7d, 0xe6, 0x04, + 0x54, 0xa2, 0x0d, 0x27, 0x6c, 0x32, 0xbb, 0xae, 0xcc, 0x73, 0x9f, 0x56, 0x68, 0x03, 0xe6, 0xed, + 0x68, 0x16, 0x4e, 0xa6, 0xea, 0x47, 0x45, 0xc2, 0x57, 0xac, 0xf2, 0x5c, 0x52, 0x95, 0xa7, 0x22, + 0x9c, 0x79, 0xc2, 0x74, 0x3e, 0x0c, 0xef, 0xe3, 0x7c, 0xd8, 0x51, 0xd1, 0x85, 0xdc, 0x8b, 0xfb, + 0x52, 0x21, 0x03, 0xd0, 0x57, 0x28, 0xe1, 0x17, 0x52, 0xa1, 0x84, 0xa3, 0xac, 0x03, 0x37, 0x8a, + 0xe9, 0xc0, 0xc1, 0xe3, 0x06, 0xef, 0x67, 0x1c, 0xe0, 0x9f, 0x5b, 0x20, 0xbf, 0xeb, 0x8c, 0xd3, + 0xd8, 0x20, 0x74, 0xca, 0xa0, 0xf7, 0xc1, 0x98, 0xda, 0x42, 0xcf, 0x04, 0x5d, 0x9f, 0x87, 0x00, + 0x96, 0xf5, 0xa1, 0x30, 0x4e, 0x40, 0x71, 0x0a, 0x1b, 0x4d, 0x41, 0x8d, 0x8e, 0x13, 0x7f, 0x94, + 0xeb, 0x5a, 0xb5, 0x4d, 0x9f, 0x5e, 0x9e, 0x17, 0x4f, 0x69, 0x1c, 0x14, 0xc0, 0x29, 0xcf, 0x89, + 0x62, 0xd6, 0x03, 0xba, 0xa3, 0x3e, 0x64, 0x15, 0x08, 0x96, 0x4c, 0xb1, 0x90, 0x26, 0x84, 0xb3, + 0xb4, 0xed, 0x6f, 0x0f, 0xc0, 0x68, 0x42, 0x32, 0x1e, 0x50, 0x49, 0x3f, 0x0d, 0x55, 0xa9, 0x37, + 0xd3, 0xe5, 0x6e, 0x94, 0x72, 0x55, 0x18, 0x54, 0x69, 0xad, 0x69, 0xad, 0x9a, 0x36, 0x2a, 0x0c, + 0x85, 0x8b, 0x4d, 0x3c, 0x26, 0x94, 0x63, 0x2f, 0x9a, 0xf1, 0x5c, 0xe2, 0xc7, 0xbc, 0x9b, 0xc5, + 0x08, 0xe5, 0xd5, 0x85, 0x15, 0x93, 0xa8, 0x16, 0xca, 0x29, 0x00, 0x4e, 0xb3, 0x47, 0x9f, 0xb6, + 0x60, 0xd4, 0xb9, 0x1d, 0xe9, 0xc2, 0xb4, 0x22, 0x68, 0xf0, 0x88, 0x4a, 0x2a, 0x51, 0xeb, 0x96, + 0xbb, 0x7c, 0x13, 0x4d, 0x38, 0xc9, 0x14, 0xbd, 0x69, 0x01, 0x22, 0xdb, 0xa4, 0x21, 0xc3, 0x1a, + 0x45, 0x5f, 0x06, 0x8b, 0xd8, 0x69, 0x5e, 0xcc, 0xd0, 0xe5, 0x52, 0x3d, 0xdb, 0x8e, 0x73, 0xfa, + 0x60, 0xff, 0xd3, 0xb2, 0x5a, 0x50, 0x3a, 0x92, 0xd6, 0x31, 0x22, 0xfa, 0xac, 0xc3, 0x47, 0xf4, + 0xe9, 0x88, 0x84, 0x6c, 0x72, 0x69, 0x22, 0x17, 0xad, 0x74, 0x9f, 0x72, 0xd1, 0x7e, 0xda, 0x4a, + 0x14, 0x76, 0x1a, 0xbe, 0xf0, 0x72, 0xb1, 0x51, 0xbc, 0x93, 0x3c, 0x5a, 0x22, 0x25, 0xdd, 0x93, + 0x41, 0x32, 0x54, 0x9a, 0x1a, 0x68, 0x07, 0x92, 0x86, 0xff, 0xa1, 0x0c, 0xc3, 0x86, 0x26, 0xcd, + 0x35, 0x8b, 0xac, 0x07, 0xcc, 0x2c, 0x2a, 0x1d, 0xc0, 0x2c, 0xfa, 0x29, 0xa8, 0x35, 0xa4, 0x94, + 0x2f, 0xa6, 0xb4, 0x71, 0x5a, 0x77, 0x68, 0x41, 0xaf, 0x9a, 0xb0, 0xe6, 0x89, 0xe6, 0x12, 0x19, + 0x4c, 0x42, 0x43, 0x0c, 0x30, 0x0d, 0x91, 0x97, 0x62, 0x24, 0x34, 0x45, 0xf6, 0x19, 0x56, 0xff, + 0xab, 0xe3, 0x8a, 0xf7, 0x92, 0xb1, 0xf6, 0xbc, 0xfe, 0xd7, 0xf2, 0xbc, 0x6c, 0xc6, 0x26, 0x8e, + 0xfd, 0x6d, 0x4b, 0x7d, 0xdc, 0x7b, 0x50, 0xaa, 0xe2, 0x56, 0xb2, 0x54, 0xc5, 0xc5, 0x42, 0x86, + 0xb9, 0x47, 0x8d, 0x8a, 0x6b, 0x30, 0x34, 0x13, 0xb4, 0xdb, 0x8e, 0xdf, 0x44, 0x3f, 0x04, 0x43, + 0x0d, 0xfe, 0x53, 0x38, 0x76, 0xd8, 0xf1, 0xa0, 0x80, 0x62, 0x09, 0x43, 0x8f, 0xc2, 0x80, 0x13, + 0xb6, 0xa4, 0x33, 0x87, 0x05, 0xd7, 0x4c, 0x87, 0xad, 0x08, 0xb3, 0x56, 0xfb, 0x1f, 0x0e, 0x00, + 0x3b, 0xd3, 0x76, 0x42, 0xd2, 0x5c, 0x0d, 0x58, 0x69, 0xc5, 0x63, 0x3d, 0x54, 0xd3, 0x9b, 0xa5, + 0x07, 0xf9, 0x60, 0xcd, 0x38, 0x5c, 0x29, 0xdf, 0xe3, 0xc3, 0x95, 0x1e, 0xe7, 0x65, 0x03, 0x0f, + 0xd0, 0x79, 0x99, 0xfd, 0x79, 0x0b, 0x90, 0x0a, 0x84, 0xd0, 0x07, 0xda, 0x53, 0x50, 0x53, 0x21, + 0x11, 0xc2, 0xb0, 0xd2, 0x22, 0x42, 0x02, 0xb0, 0xc6, 0xe9, 0x63, 0x87, 0xfc, 0x84, 0x94, 0xdf, + 0xe5, 0x64, 0x5c, 0x2e, 0x93, 0xfa, 0x42, 0x9c, 0xdb, 0xbf, 0x59, 0x82, 0x87, 0xb8, 0x4a, 0x5e, + 0x74, 0x7c, 0xa7, 0x45, 0xda, 0xb4, 0x57, 0xfd, 0x86, 0x28, 0x34, 0xe8, 0xd6, 0xcc, 0x95, 0x71, + 0xb6, 0x47, 0x5d, 0xbb, 0x7c, 0xcd, 0xf1, 0x55, 0x36, 0xef, 0xbb, 0x31, 0x66, 0xc4, 0x51, 0x04, + 0x55, 0x59, 0xf7, 0x5f, 0xc8, 0xe2, 0x82, 0x18, 0x29, 0xb1, 0x24, 0xf4, 0x26, 0xc1, 0x8a, 0x11, + 0x35, 0x5c, 0xbd, 0xa0, 0xb1, 0x89, 0x49, 0x27, 0x60, 0x72, 0xd7, 0x08, 0x73, 0x5c, 0x10, 0xed, + 0x58, 0x61, 0xd8, 0xbf, 0x69, 0x41, 0x5a, 0x23, 0x19, 0x35, 0xec, 0xac, 0x3d, 0x6b, 0xd8, 0x1d, + 0xa0, 0x0a, 0xdc, 0x4f, 0xc0, 0xb0, 0x13, 0x53, 0x23, 0x82, 0x6f, 0xbb, 0xcb, 0x87, 0x3b, 0xd6, + 0x58, 0x0c, 0x9a, 0xee, 0xba, 0xcb, 0xb6, 0xdb, 0x26, 0x39, 0xfb, 0x7f, 0x0c, 0xc0, 0xa9, 0x4c, + 0x56, 0x0a, 0x7a, 0x01, 0x46, 0x1a, 0x62, 0x7a, 0x74, 0xa4, 0x43, 0xab, 0x66, 0x86, 0xc5, 0x69, + 0x18, 0x4e, 0x60, 0xf6, 0x31, 0x41, 0xe7, 0xe1, 0x74, 0x48, 0x37, 0xfa, 0x5d, 0x32, 0xbd, 0x1e, + 0x93, 0x70, 0x85, 0x34, 0x02, 0xbf, 0xc9, 0x2b, 0x2d, 0x96, 0xeb, 0x0f, 0xdf, 0xd9, 0x9d, 0x38, + 0x8d, 0xb3, 0x60, 0x9c, 0xf7, 0x0c, 0xea, 0xc0, 0xa8, 0x67, 0xda, 0x80, 0x62, 0x03, 0x70, 0x28, + 0xf3, 0x51, 0xd9, 0x08, 0x89, 0x66, 0x9c, 0x64, 0x90, 0x34, 0x24, 0x2b, 0xf7, 0xc9, 0x90, 0xfc, + 0x94, 0x36, 0x24, 0xf9, 0xf9, 0xfb, 0x87, 0x0a, 0xce, 0x4a, 0x3a, 0x6e, 0x4b, 0xf2, 0x25, 0xa8, + 0xca, 0xd8, 0xa4, 0xbe, 0x62, 0x7a, 0x4c, 0x3a, 0x3d, 0x24, 0xda, 0x93, 0xf0, 0x83, 0x17, 0xc3, + 0xd0, 0x18, 0xcc, 0x6b, 0x41, 0x3c, 0xed, 0x79, 0xc1, 0x6d, 0xaa, 0xa4, 0xaf, 0x47, 0x44, 0x78, + 0x58, 0xec, 0xbb, 0x25, 0xc8, 0xd9, 0xac, 0xd0, 0xf5, 0xa8, 0x2d, 0x83, 0xc4, 0x7a, 0x3c, 0x98, + 0x75, 0x80, 0xb6, 0x79, 0xfc, 0x16, 0xd7, 0x81, 0x1f, 0x2c, 0x7a, 0xb3, 0xa5, 0x43, 0xba, 0x54, + 0x32, 0x85, 0x0a, 0xeb, 0xba, 0x00, 0xa0, 0x0d, 0x3a, 0x11, 0x2a, 0xaf, 0x8e, 0x87, 0xb5, 0xdd, + 0x87, 0x0d, 0x2c, 0xba, 0xf7, 0x76, 0xfd, 0x28, 0x76, 0x3c, 0xef, 0xb2, 0xeb, 0xc7, 0xc2, 0x89, + 0xa8, 0x94, 0xfd, 0xbc, 0x06, 0x61, 0x13, 0xef, 0xdc, 0xbb, 0x8d, 0xef, 0x77, 0x90, 0xef, 0xbe, + 0x01, 0x8f, 0xcc, 0xb9, 0xb1, 0x4a, 0xf0, 0x50, 0xf3, 0x8d, 0xda, 0x6b, 0x2a, 0x61, 0xc9, 0xea, + 0x99, 0xb0, 0x64, 0x24, 0x58, 0x94, 0x92, 0xf9, 0x20, 0xe9, 0x04, 0x0b, 0xfb, 0x05, 0x38, 0x33, + 0xe7, 0xc6, 0x97, 0x5c, 0x8f, 0x1c, 0x90, 0x89, 0xfd, 0x1b, 0x83, 0x30, 0x62, 0xa6, 0x2a, 0x1e, + 0x24, 0xe7, 0xea, 0x8b, 0xd4, 0x24, 0x13, 0x6f, 0xe7, 0xaa, 0xc3, 0xb5, 0x9b, 0x47, 0xce, 0x9b, + 0xcc, 0x1f, 0x31, 0xc3, 0x2a, 0xd3, 0x3c, 0xb1, 0xd9, 0x01, 0x74, 0x1b, 0x2a, 0xeb, 0x2c, 0x01, + 0xa0, 0x5c, 0x44, 0x04, 0x42, 0xde, 0x88, 0xea, 0xe5, 0xc8, 0x53, 0x08, 0x38, 0x3f, 0xaa, 0x49, + 0xc3, 0x64, 0x56, 0x99, 0x11, 0xb4, 0x2a, 0xf2, 0xc9, 0x14, 0x46, 0x2f, 0x95, 0x50, 0x39, 0x84, + 0x4a, 0x48, 0x08, 0xe8, 0xc1, 0xfb, 0x24, 0xa0, 0x59, 0x32, 0x47, 0xbc, 0xc1, 0xec, 0x3c, 0x11, + 0x65, 0x3f, 0xc4, 0x06, 0xc1, 0x48, 0xe6, 0x48, 0x80, 0x71, 0x1a, 0x1f, 0x7d, 0x5c, 0x89, 0xf8, + 0x6a, 0x11, 0xfe, 0x57, 0x73, 0x46, 0x1f, 0xb7, 0x74, 0xff, 0x7c, 0x09, 0xc6, 0xe6, 0xfc, 0xee, + 0xf2, 0xdc, 0x72, 0x77, 0xcd, 0x73, 0x1b, 0x57, 0xc9, 0x0e, 0x15, 0xe1, 0x9b, 0x64, 0x67, 0x7e, + 0x56, 0xac, 0x20, 0x35, 0x67, 0xae, 0xd2, 0x46, 0xcc, 0x61, 0x54, 0x18, 0xad, 0xbb, 0x7e, 0x8b, + 0x84, 0x9d, 0xd0, 0x15, 0xae, 0x51, 0x43, 0x18, 0x5d, 0xd2, 0x20, 0x6c, 0xe2, 0x51, 0xda, 0xc1, + 0x6d, 0x9f, 0x84, 0x69, 0x83, 0x77, 0x89, 0x36, 0x62, 0x0e, 0xa3, 0x48, 0x71, 0xd8, 0x8d, 0x62, + 0x31, 0x19, 0x15, 0xd2, 0x2a, 0x6d, 0xc4, 0x1c, 0x46, 0x57, 0x7a, 0xd4, 0x5d, 0x63, 0x01, 0x1e, + 0xa9, 0x90, 0xfe, 0x15, 0xde, 0x8c, 0x25, 0x9c, 0xa2, 0x6e, 0x92, 0x9d, 0x59, 0xba, 0x3b, 0x4e, + 0x65, 0xf6, 0x5c, 0xe5, 0xcd, 0x58, 0xc2, 0x59, 0x2d, 0xc8, 0xe4, 0x70, 0x7c, 0xcf, 0xd5, 0x82, + 0x4c, 0x76, 0xbf, 0xc7, 0x3e, 0xfb, 0x97, 0x2c, 0x18, 0x31, 0xc3, 0xb2, 0x50, 0x2b, 0x65, 0x0b, + 0x2f, 0x65, 0x4a, 0x09, 0xbf, 0x37, 0xef, 0x1a, 0xb7, 0x96, 0x1b, 0x07, 0x9d, 0xe8, 0x19, 0xe2, + 0xb7, 0x5c, 0x9f, 0xb0, 0xd3, 0x76, 0x1e, 0xce, 0x95, 0x88, 0xf9, 0x9a, 0x09, 0x9a, 0xe4, 0x10, + 0xc6, 0xb4, 0x7d, 0x13, 0x4e, 0x65, 0xd2, 0xb9, 0xfa, 0x30, 0x41, 0xf6, 0x4d, 0xa6, 0xb5, 0x31, + 0x0c, 0x53, 0xc2, 0xb2, 0x1e, 0xd1, 0x0c, 0x9c, 0xe2, 0x0b, 0x89, 0x72, 0x5a, 0x69, 0x6c, 0x90, + 0xb6, 0x4a, 0xd1, 0x63, 0x7e, 0xf8, 0x1b, 0x69, 0x20, 0xce, 0xe2, 0xdb, 0x5f, 0xb0, 0x60, 0x34, + 0x91, 0x61, 0x57, 0x90, 0xb1, 0xc4, 0x56, 0x5a, 0xc0, 0xa2, 0x04, 0x59, 0xa8, 0x74, 0x99, 0x29, + 0x53, 0xbd, 0xd2, 0x34, 0x08, 0x9b, 0x78, 0xf6, 0x97, 0x4b, 0x50, 0x95, 0x91, 0x16, 0x7d, 0x74, + 0xe5, 0x73, 0x16, 0x8c, 0xaa, 0xb3, 0x0f, 0xe6, 0x54, 0x2b, 0x15, 0x91, 0x0e, 0x41, 0x7b, 0xa0, + 0xb6, 0xe5, 0xfe, 0x7a, 0xa0, 0x2d, 0x77, 0x6c, 0x32, 0xc3, 0x49, 0xde, 0xe8, 0x06, 0x40, 0xb4, + 0x13, 0xc5, 0xa4, 0x6d, 0xb8, 0xf7, 0x6c, 0x63, 0xc5, 0x4d, 0x36, 0x82, 0x90, 0xd0, 0xf5, 0x75, + 0x2d, 0x68, 0x92, 0x15, 0x85, 0xa9, 0x4d, 0x28, 0xdd, 0x86, 0x0d, 0x4a, 0xf6, 0xdf, 0x2f, 0xc1, + 0xc9, 0x74, 0x97, 0xd0, 0x87, 0x60, 0x44, 0x72, 0x37, 0x6e, 0xa4, 0x93, 0xe1, 0x25, 0x23, 0xd8, + 0x80, 0xdd, 0xdd, 0x9d, 0x98, 0xc8, 0x5e, 0x09, 0x38, 0x69, 0xa2, 0xe0, 0x04, 0x31, 0x7e, 0x00, + 0x25, 0x4e, 0x4a, 0xeb, 0x3b, 0xd3, 0x9d, 0x8e, 0x38, 0x45, 0x32, 0x0e, 0xa0, 0x4c, 0x28, 0x4e, + 0x61, 0xa3, 0x65, 0x38, 0x63, 0xb4, 0x5c, 0x23, 0x6e, 0x6b, 0x63, 0x2d, 0x08, 0xe5, 0x0e, 0xec, + 0x51, 0x1d, 0x00, 0x96, 0xc5, 0xc1, 0xb9, 0x4f, 0x52, 0x6d, 0xdf, 0x70, 0x3a, 0x4e, 0xc3, 0x8d, + 0x77, 0x84, 0xbf, 0x52, 0xc9, 0xa6, 0x19, 0xd1, 0x8e, 0x15, 0x86, 0xbd, 0x08, 0x03, 0x7d, 0xce, + 0xa0, 0xbe, 0x2c, 0xff, 0x97, 0xa0, 0x4a, 0xc9, 0x49, 0xf3, 0xae, 0x08, 0x92, 0x01, 0x54, 0xe5, + 0x4d, 0x31, 0xc8, 0x86, 0xb2, 0xeb, 0xc8, 0x33, 0x3e, 0xf5, 0x5a, 0xf3, 0x51, 0xd4, 0x65, 0x9b, + 0x69, 0x0a, 0x44, 0x4f, 0x40, 0x99, 0x6c, 0x77, 0xd2, 0x87, 0x79, 0x17, 0xb7, 0x3b, 0x6e, 0x48, + 0x22, 0x8a, 0x44, 0xb6, 0x3b, 0xe8, 0x1c, 0x94, 0xdc, 0xa6, 0x50, 0x52, 0x20, 0x70, 0x4a, 0xf3, + 0xb3, 0xb8, 0xe4, 0x36, 0xed, 0x6d, 0xa8, 0xa9, 0xab, 0x69, 0xd0, 0xa6, 0x94, 0xdd, 0x56, 0x11, + 0xa1, 0x51, 0x92, 0x6e, 0x0f, 0xa9, 0xdd, 0x05, 0xd0, 0xa9, 0x86, 0x45, 0xc9, 0x97, 0xf3, 0x30, + 0xd0, 0x08, 0x44, 0x1a, 0x74, 0x55, 0x93, 0x61, 0x42, 0x9b, 0x41, 0xec, 0x9b, 0x30, 0x76, 0xd5, + 0x0f, 0x6e, 0xb3, 0xc2, 0xf8, 0xac, 0x0e, 0x1c, 0x25, 0xbc, 0x4e, 0x7f, 0xa4, 0x4d, 0x04, 0x06, + 0xc5, 0x1c, 0xa6, 0x2a, 0x54, 0x95, 0x7a, 0x55, 0xa8, 0xb2, 0x3f, 0x61, 0xc1, 0x88, 0xca, 0x59, + 0x9a, 0xdb, 0xda, 0xa4, 0x74, 0x5b, 0x61, 0xd0, 0xed, 0xa4, 0xe9, 0xb2, 0xcb, 0xa3, 0x30, 0x87, + 0x99, 0xc9, 0x7c, 0xa5, 0x7d, 0x92, 0xf9, 0xce, 0xc3, 0xc0, 0xa6, 0xeb, 0x37, 0xd3, 0xb7, 0xa1, + 0x5c, 0x75, 0xfd, 0x26, 0x66, 0x10, 0xda, 0x85, 0x93, 0xaa, 0x0b, 0x52, 0x21, 0xbc, 0x00, 0x23, + 0x6b, 0x5d, 0xd7, 0x6b, 0xca, 0x02, 0x77, 0x29, 0x8f, 0x4a, 0xdd, 0x80, 0xe1, 0x04, 0x26, 0xdd, + 0xd7, 0xad, 0xb9, 0xbe, 0x13, 0xee, 0x2c, 0x6b, 0x0d, 0xa4, 0x84, 0x52, 0x5d, 0x41, 0xb0, 0x81, + 0x65, 0xbf, 0x51, 0x86, 0xb1, 0x64, 0xe6, 0x56, 0x1f, 0xdb, 0xab, 0x27, 0xa0, 0xc2, 0x92, 0xb9, + 0xd2, 0x9f, 0x96, 0xd7, 0x84, 0xe3, 0x30, 0x14, 0xc1, 0x20, 0x2f, 0x03, 0x51, 0xcc, 0x4d, 0x42, + 0xaa, 0x93, 0xca, 0x0f, 0xc3, 0xe2, 0xce, 0x44, 0xe5, 0x09, 0xc1, 0x0a, 0x7d, 0xda, 0x82, 0xa1, + 0xa0, 0x63, 0x56, 0x36, 0xfa, 0x60, 0x91, 0x59, 0x6d, 0x22, 0xa9, 0x46, 0x58, 0xc4, 0xea, 0xd3, + 0xcb, 0xcf, 0x21, 0x59, 0x9f, 0x7b, 0x0f, 0x8c, 0x98, 0x98, 0xfb, 0x19, 0xc5, 0x55, 0xd3, 0x28, + 0xfe, 0x9c, 0x39, 0x29, 0x44, 0xde, 0x5e, 0x1f, 0xcb, 0xed, 0x3a, 0x54, 0x1a, 0x2a, 0x50, 0xe0, + 0x50, 0x65, 0x51, 0x55, 0x5d, 0x06, 0x76, 0x58, 0xc4, 0xa9, 0xd9, 0xdf, 0xb6, 0x8c, 0xf9, 0x81, + 0x49, 0x34, 0xdf, 0x44, 0x21, 0x94, 0x5b, 0x5b, 0x9b, 0xc2, 0x14, 0xbd, 0x52, 0xd0, 0xf0, 0xce, + 0x6d, 0x6d, 0xea, 0x39, 0x6e, 0xb6, 0x62, 0xca, 0xac, 0x0f, 0x67, 0x61, 0x22, 0xbd, 0xb3, 0xbc, + 0x7f, 0x7a, 0xa7, 0xfd, 0x66, 0x09, 0x4e, 0x65, 0x26, 0x15, 0x7a, 0x1d, 0x2a, 0x21, 0x7d, 0x4b, + 0xf1, 0x7a, 0x0b, 0x85, 0x25, 0x64, 0x46, 0xf3, 0x4d, 0xad, 0x77, 0x93, 0xed, 0x98, 0xb3, 0x44, + 0x57, 0x00, 0xe9, 0x70, 0x16, 0xe5, 0xa9, 0xe4, 0xaf, 0x7c, 0x4e, 0x3c, 0x8a, 0xa6, 0x33, 0x18, + 0x38, 0xe7, 0x29, 0xf4, 0x62, 0xda, 0xe1, 0x59, 0x4e, 0x9e, 0x6f, 0xee, 0xe5, 0xbb, 0xb4, 0xff, + 0x59, 0x09, 0x46, 0x13, 0x85, 0xa6, 0x90, 0x07, 0x55, 0xe2, 0x31, 0xe7, 0xbf, 0x54, 0x36, 0x47, + 0x2d, 0x1b, 0xad, 0x14, 0xe4, 0x45, 0x41, 0x17, 0x2b, 0x0e, 0x0f, 0xc6, 0x21, 0xfc, 0x0b, 0x30, + 0x22, 0x3b, 0xf4, 0x41, 0xa7, 0xed, 0x89, 0x01, 0x54, 0x73, 0xf4, 0xa2, 0x01, 0xc3, 0x09, 0x4c, + 0xfb, 0xb7, 0xca, 0x30, 0xce, 0x4f, 0x4b, 0x9a, 0x6a, 0xe6, 0x2d, 0xca, 0xfd, 0xd6, 0x5f, 0xd6, + 0xe5, 0xe0, 0xf8, 0x40, 0xae, 0x1d, 0xf5, 0x96, 0x86, 0x7c, 0x46, 0x7d, 0x45, 0x70, 0x7d, 0x2d, + 0x15, 0xc1, 0xc5, 0xcd, 0xee, 0xd6, 0x31, 0xf5, 0xe8, 0x7b, 0x2b, 0xa4, 0xeb, 0xef, 0x94, 0xe0, + 0x44, 0xea, 0x0a, 0x0c, 0xf4, 0x46, 0xb2, 0x6a, 0xb2, 0x55, 0x84, 0x4f, 0x7d, 0xcf, 0x5b, 0x11, + 0x0e, 0x56, 0x3b, 0xf9, 0x3e, 0x2d, 0x15, 0xfb, 0xf7, 0x4b, 0x30, 0x96, 0xbc, 0xbb, 0xe3, 0x01, + 0x1c, 0xa9, 0x77, 0x42, 0x8d, 0x95, 0xa7, 0x67, 0x57, 0x9a, 0x72, 0x97, 0x3c, 0xaf, 0x04, 0x2e, + 0x1b, 0xb1, 0x86, 0x3f, 0x10, 0x25, 0xa9, 0xed, 0xbf, 0x6b, 0xc1, 0x59, 0xfe, 0x96, 0xe9, 0x79, + 0xf8, 0x57, 0xf2, 0x46, 0xf7, 0x95, 0x62, 0x3b, 0x98, 0x2a, 0x63, 0xb8, 0xdf, 0xf8, 0xb2, 0xab, + 0x14, 0x45, 0x6f, 0x93, 0x53, 0xe1, 0x01, 0xec, 0xec, 0x81, 0x26, 0x83, 0xfd, 0xfb, 0x65, 0xd0, + 0xb7, 0x47, 0x22, 0x57, 0xe4, 0x42, 0x16, 0x52, 0xce, 0x71, 0x65, 0xc7, 0x6f, 0xe8, 0x7b, 0x2a, + 0xab, 0xa9, 0x54, 0xc8, 0x9f, 0xb5, 0x60, 0xd8, 0xf5, 0xdd, 0xd8, 0x75, 0xd8, 0x36, 0xba, 0x98, + 0x9b, 0xed, 0x14, 0xbb, 0x79, 0x4e, 0x39, 0x08, 0xcd, 0x73, 0x1c, 0xc5, 0x0c, 0x9b, 0x9c, 0xd1, + 0x47, 0x44, 0x90, 0x75, 0xb9, 0xb0, 0x2c, 0xde, 0x6a, 0x2a, 0xb2, 0xba, 0x43, 0x0d, 0xaf, 0x38, + 0x2c, 0x28, 0xf9, 0x1d, 0x53, 0x52, 0xaa, 0x32, 0xb0, 0xbe, 0xc7, 0x9b, 0x36, 0x63, 0xce, 0xc8, + 0x8e, 0x00, 0x65, 0xc7, 0xe2, 0x80, 0x01, 0xac, 0x53, 0x50, 0x73, 0xba, 0x71, 0xd0, 0xa6, 0xc3, + 0x24, 0x8e, 0x9a, 0x74, 0x88, 0xae, 0x04, 0x60, 0x8d, 0x63, 0xbf, 0x51, 0x81, 0x54, 0x72, 0x22, + 0xda, 0x36, 0x6f, 0x3e, 0xb5, 0x8a, 0xbd, 0xf9, 0x54, 0x75, 0x26, 0xef, 0xf6, 0x53, 0xd4, 0x82, + 0x4a, 0x67, 0xc3, 0x89, 0xa4, 0x59, 0xfd, 0x92, 0xda, 0xc7, 0xd1, 0xc6, 0xbb, 0xbb, 0x13, 0x3f, + 0xde, 0x9f, 0xd7, 0x95, 0xce, 0xd5, 0x29, 0x5e, 0xe6, 0x44, 0xb3, 0x66, 0x34, 0x30, 0xa7, 0x7f, + 0x90, 0xbb, 0xfd, 0x3e, 0x29, 0xea, 0xf0, 0x63, 0x12, 0x75, 0xbd, 0x58, 0xcc, 0x86, 0x97, 0x0a, + 0x5c, 0x65, 0x9c, 0xb0, 0x4e, 0xab, 0xe7, 0xff, 0xb1, 0xc1, 0x14, 0x7d, 0x08, 0x6a, 0x51, 0xec, + 0x84, 0xf1, 0x21, 0x13, 0x61, 0xd5, 0xa0, 0xaf, 0x48, 0x22, 0x58, 0xd3, 0x43, 0x2f, 0xb3, 0xea, + 0xb6, 0x6e, 0xb4, 0x71, 0xc8, 0xdc, 0x08, 0x59, 0x09, 0x57, 0x50, 0xc0, 0x06, 0x35, 0x74, 0x01, + 0x80, 0xcd, 0x6d, 0x1e, 0x10, 0x58, 0x65, 0x5e, 0x26, 0x25, 0x0a, 0xb1, 0x82, 0x60, 0x03, 0xcb, + 0xfe, 0x11, 0x48, 0xd6, 0x85, 0x40, 0x13, 0xb2, 0x0c, 0x05, 0xf7, 0x42, 0xb3, 0x1c, 0x87, 0x44, + 0xc5, 0x88, 0x5f, 0xb3, 0xc0, 0x2c, 0x5e, 0x81, 0x5e, 0xe3, 0x55, 0x32, 0xac, 0x22, 0x4e, 0x0e, + 0x0d, 0xba, 0x93, 0x8b, 0x4e, 0x27, 0x75, 0x84, 0x2d, 0x4b, 0x65, 0x9c, 0x7b, 0x37, 0x54, 0x25, + 0xf4, 0x40, 0x46, 0xdd, 0xc7, 0xe1, 0x74, 0xfa, 0x5e, 0x78, 0x71, 0xea, 0xb4, 0xbf, 0xeb, 0x47, + 0xfa, 0x73, 0x4a, 0xbd, 0xfc, 0x39, 0x7d, 0xdc, 0x7f, 0xfb, 0xeb, 0x16, 0x9c, 0xdf, 0xef, 0xfa, + 0x7a, 0xf4, 0x28, 0x0c, 0xdc, 0x76, 0x42, 0x59, 0x76, 0x9c, 0x09, 0xca, 0x9b, 0x4e, 0xe8, 0x63, + 0xd6, 0x8a, 0x76, 0x60, 0x90, 0x47, 0x8d, 0x09, 0x6b, 0xfd, 0xa5, 0x62, 0x2f, 0xd3, 0xbf, 0x4a, + 0x8c, 0xed, 0x02, 0x8f, 0x58, 0xc3, 0x82, 0xa1, 0xfd, 0x1d, 0x0b, 0xd0, 0xd2, 0x16, 0x09, 0x43, + 0xb7, 0x69, 0xc4, 0xb9, 0xb1, 0xfb, 0x6c, 0x8c, 0x7b, 0x6b, 0xcc, 0x54, 0xd8, 0xd4, 0x7d, 0x36, + 0xc6, 0xbf, 0xfc, 0xfb, 0x6c, 0x4a, 0x07, 0xbb, 0xcf, 0x06, 0x2d, 0xc1, 0xd9, 0x36, 0xdf, 0x6e, + 0xf0, 0x3b, 0x22, 0xf8, 0xde, 0x43, 0x25, 0x9e, 0x3d, 0x72, 0x67, 0x77, 0xe2, 0xec, 0x62, 0x1e, + 0x02, 0xce, 0x7f, 0xce, 0x7e, 0x37, 0x20, 0x1e, 0xde, 0x36, 0x93, 0x17, 0xab, 0xd4, 0xd3, 0xfd, + 0x62, 0x7f, 0xb5, 0x02, 0x27, 0x52, 0x45, 0x69, 0xe9, 0x56, 0x2f, 0x1b, 0x1c, 0x75, 0x64, 0xfd, + 0x9d, 0xed, 0x5e, 0x5f, 0xe1, 0x56, 0x3e, 0x54, 0x5c, 0xbf, 0xd3, 0x8d, 0x8b, 0xc9, 0x35, 0xe5, + 0x9d, 0x98, 0xa7, 0x04, 0x0d, 0x77, 0x31, 0xfd, 0x8b, 0x39, 0x9b, 0x22, 0x83, 0xb7, 0x12, 0xc6, + 0xf8, 0xc0, 0x7d, 0x72, 0x07, 0x7c, 0x52, 0x87, 0x52, 0x55, 0x8a, 0x70, 0x2c, 0xa6, 0x26, 0xcb, + 0x71, 0x1f, 0xb5, 0xff, 0x6a, 0x09, 0x86, 0x8d, 0x8f, 0x86, 0x7e, 0x31, 0x59, 0xda, 0xc9, 0x2a, + 0xee, 0x95, 0x18, 0xfd, 0x49, 0x5d, 0xbc, 0x89, 0xbf, 0xd2, 0x93, 0xd9, 0xaa, 0x4e, 0x77, 0x77, + 0x27, 0x4e, 0xa6, 0xea, 0x36, 0x25, 0x2a, 0x3d, 0x9d, 0xfb, 0x18, 0x9c, 0x48, 0x91, 0xc9, 0x79, + 0xe5, 0xd5, 0xe4, 0xb5, 0xff, 0x47, 0x74, 0x4b, 0x99, 0x43, 0xf6, 0x4d, 0x3a, 0x64, 0x22, 0xdd, + 0x2e, 0xf0, 0x48, 0x1f, 0x3e, 0xd8, 0x54, 0x56, 0x6d, 0xa9, 0xcf, 0xac, 0xda, 0xa7, 0xa0, 0xda, + 0x09, 0x3c, 0xb7, 0xe1, 0xaa, 0xfa, 0x87, 0x2c, 0x8f, 0x77, 0x59, 0xb4, 0x61, 0x05, 0x45, 0xb7, + 0xa1, 0x76, 0xeb, 0x76, 0xcc, 0x4f, 0x7f, 0x84, 0x7f, 0xbb, 0xa8, 0x43, 0x1f, 0x65, 0xb4, 0xa8, + 0xe3, 0x25, 0xac, 0x79, 0x21, 0x1b, 0x06, 0x99, 0x12, 0x94, 0x29, 0x02, 0xcc, 0xf7, 0xce, 0xb4, + 0x63, 0x84, 0x05, 0xc4, 0xfe, 0x46, 0x0d, 0xce, 0xe4, 0x55, 0x06, 0x47, 0x1f, 0x85, 0x41, 0xde, + 0xc7, 0x62, 0x2e, 0x9f, 0xc8, 0xe3, 0x31, 0xc7, 0x08, 0x8a, 0x6e, 0xb1, 0xdf, 0x58, 0xf0, 0x14, + 0xdc, 0x3d, 0x67, 0x4d, 0xcc, 0x90, 0xe3, 0xe1, 0xbe, 0xe0, 0x68, 0xee, 0x0b, 0x0e, 0xe7, 0xee, + 0x39, 0x6b, 0x68, 0x1b, 0x2a, 0x2d, 0x37, 0x26, 0x8e, 0x70, 0x22, 0xdc, 0x3c, 0x16, 0xe6, 0xc4, + 0xe1, 0x56, 0x1a, 0xfb, 0x89, 0x39, 0x43, 0xf4, 0x75, 0x0b, 0x4e, 0xac, 0x25, 0x53, 0xe8, 0x85, + 0xf0, 0x74, 0x8e, 0xa1, 0xfa, 0x7b, 0x92, 0x11, 0xbf, 0xd0, 0x29, 0xd5, 0x88, 0xd3, 0xdd, 0x41, + 0x9f, 0xb2, 0x60, 0x68, 0xdd, 0xf5, 0x8c, 0x02, 0xbc, 0xc7, 0xf0, 0x71, 0x2e, 0x31, 0x06, 0x7a, + 0xc7, 0xc1, 0xff, 0x47, 0x58, 0x72, 0xee, 0xa5, 0xa9, 0x06, 0x8f, 0xaa, 0xa9, 0x86, 0xee, 0x93, + 0xa6, 0xfa, 0xac, 0x05, 0x35, 0x35, 0xd2, 0x22, 0x2d, 0xfa, 0x43, 0xc7, 0xf8, 0xc9, 0xb9, 0xe7, + 0x44, 0xfd, 0xc5, 0x9a, 0x39, 0xfa, 0x92, 0x05, 0xc3, 0xce, 0xeb, 0xdd, 0x90, 0x34, 0xc9, 0x56, + 0xd0, 0x89, 0xc4, 0x6d, 0x90, 0xaf, 0x14, 0xdf, 0x99, 0x69, 0xca, 0x64, 0x96, 0x6c, 0x2d, 0x75, + 0x22, 0x91, 0xbe, 0xa4, 0x1b, 0xb0, 0xd9, 0x05, 0x7b, 0xb7, 0x04, 0x13, 0xfb, 0x50, 0x40, 0x2f, + 0xc0, 0x48, 0x10, 0xb6, 0x1c, 0xdf, 0x7d, 0xdd, 0xac, 0x89, 0xa1, 0xac, 0xac, 0x25, 0x03, 0x86, + 0x13, 0x98, 0x66, 0xe2, 0x76, 0x69, 0x9f, 0xc4, 0xed, 0xf3, 0x30, 0x10, 0x92, 0x4e, 0x90, 0xde, + 0x2c, 0xb0, 0xd4, 0x01, 0x06, 0x41, 0x8f, 0x41, 0xd9, 0xe9, 0xb8, 0x22, 0x10, 0x4d, 0xed, 0x81, + 0xa6, 0x97, 0xe7, 0x31, 0x6d, 0x4f, 0xd4, 0x91, 0xa8, 0xdc, 0x93, 0x3a, 0x12, 0x54, 0x0d, 0x88, + 0xb3, 0x8b, 0x41, 0xad, 0x06, 0x92, 0x67, 0x0a, 0xf6, 0x9b, 0x65, 0x78, 0x6c, 0xcf, 0xf9, 0xa2, + 0xe3, 0xf0, 0xac, 0x3d, 0xe2, 0xf0, 0xe4, 0xf0, 0x94, 0xf6, 0x1b, 0x9e, 0x72, 0x8f, 0xe1, 0xf9, + 0x14, 0x5d, 0x06, 0xb2, 0x96, 0x48, 0x31, 0xf7, 0xf9, 0xf5, 0x2a, 0x4d, 0x22, 0x56, 0x80, 0x84, + 0x62, 0xcd, 0x97, 0xee, 0x01, 0x12, 0x49, 0xcb, 0x95, 0x22, 0xd4, 0x40, 0xcf, 0xda, 0x22, 0x7c, + 0xee, 0xf7, 0xca, 0x84, 0xb6, 0x7f, 0xae, 0x04, 0x4f, 0xf4, 0x21, 0xbd, 0xcd, 0x59, 0x6c, 0xf5, + 0x39, 0x8b, 0xbf, 0xb7, 0x3f, 0x93, 0xfd, 0x57, 0x2d, 0x38, 0xd7, 0x5b, 0x79, 0xa0, 0x67, 0x61, + 0x78, 0x2d, 0x74, 0xfc, 0xc6, 0x06, 0xbb, 0xa3, 0x54, 0x0e, 0x0a, 0x1b, 0x6b, 0xdd, 0x8c, 0x4d, + 0x1c, 0xba, 0xbd, 0xe5, 0x31, 0x09, 0x06, 0x86, 0x4c, 0x32, 0xa5, 0xdb, 0xdb, 0xd5, 0x34, 0x10, + 0x67, 0xf1, 0xed, 0x3f, 0x2b, 0xe5, 0x77, 0x8b, 0x1b, 0x19, 0x07, 0xf9, 0x4e, 0xe2, 0x2b, 0x94, + 0xfa, 0x90, 0x25, 0xe5, 0x7b, 0x2d, 0x4b, 0x06, 0x7a, 0xc9, 0x12, 0x34, 0x0b, 0x27, 0x8d, 0x4b, + 0x64, 0x78, 0xe2, 0x30, 0x0f, 0xb8, 0x55, 0xd5, 0x34, 0x96, 0x53, 0x70, 0x9c, 0x79, 0x02, 0x3d, + 0x0d, 0x55, 0xd7, 0x8f, 0x48, 0xa3, 0x1b, 0xf2, 0x40, 0x6f, 0x23, 0x59, 0x6b, 0x5e, 0xb4, 0x63, + 0x85, 0x61, 0xff, 0x52, 0x09, 0x1e, 0xe9, 0x69, 0x67, 0xdd, 0x23, 0xd9, 0x65, 0x7e, 0x8e, 0x81, + 0x7b, 0xf3, 0x39, 0xcc, 0x41, 0xaa, 0xec, 0x3b, 0x48, 0x7f, 0xd0, 0x7b, 0x62, 0x52, 0x9b, 0xfb, + 0xfb, 0x76, 0x94, 0x5e, 0x84, 0x51, 0xa7, 0xd3, 0xe1, 0x78, 0x2c, 0x5e, 0x33, 0x55, 0x4d, 0x67, + 0xda, 0x04, 0xe2, 0x24, 0x6e, 0x5f, 0xda, 0xf3, 0x8f, 0x2c, 0xa8, 0x61, 0xb2, 0xce, 0xa5, 0x03, + 0xba, 0x25, 0x86, 0xc8, 0x2a, 0xa2, 0xee, 0x26, 0x1d, 0xd8, 0xc8, 0x65, 0xf5, 0x28, 0xf3, 0x06, + 0x3b, 0x7b, 0xc9, 0x4f, 0xe9, 0x40, 0x97, 0xfc, 0xa8, 0x6b, 0x5e, 0xca, 0xbd, 0xaf, 0x79, 0xb1, + 0xbf, 0x39, 0x44, 0x5f, 0xaf, 0x13, 0xcc, 0x84, 0xa4, 0x19, 0xd1, 0xef, 0xdb, 0x0d, 0x3d, 0x31, + 0x49, 0xd4, 0xf7, 0xbd, 0x8e, 0x17, 0x30, 0x6d, 0x4f, 0x1c, 0xc5, 0x94, 0x0e, 0x54, 0x4b, 0xa4, + 0xbc, 0x6f, 0x2d, 0x91, 0x17, 0x61, 0x34, 0x8a, 0x36, 0x96, 0x43, 0x77, 0xcb, 0x89, 0xc9, 0x55, + 0xb2, 0x23, 0xac, 0x2c, 0x9d, 0xff, 0xbf, 0x72, 0x59, 0x03, 0x71, 0x12, 0x17, 0xcd, 0xc1, 0x29, + 0x5d, 0xd1, 0x83, 0x84, 0x31, 0x8b, 0xee, 0xe7, 0x33, 0x41, 0x25, 0xfb, 0xea, 0x1a, 0x20, 0x02, + 0x01, 0x67, 0x9f, 0xa1, 0xf2, 0x2d, 0xd1, 0x48, 0x3b, 0x32, 0x98, 0x94, 0x6f, 0x09, 0x3a, 0xb4, + 0x2f, 0x99, 0x27, 0xd0, 0x22, 0x9c, 0xe6, 0x13, 0x63, 0xba, 0xd3, 0x31, 0xde, 0x68, 0x28, 0x59, + 0xef, 0x70, 0x2e, 0x8b, 0x82, 0xf3, 0x9e, 0x43, 0xcf, 0xc3, 0xb0, 0x6a, 0x9e, 0x9f, 0x15, 0xa7, + 0x08, 0xca, 0x8b, 0xa1, 0xc8, 0xcc, 0x37, 0xb1, 0x89, 0x87, 0x3e, 0x08, 0x0f, 0xeb, 0xbf, 0x3c, + 0x05, 0x8c, 0x1f, 0xad, 0xcd, 0x8a, 0x62, 0x49, 0xea, 0x52, 0x91, 0xb9, 0x5c, 0xb4, 0x26, 0xee, + 0xf5, 0x3c, 0x5a, 0x83, 0x73, 0x0a, 0x74, 0xd1, 0x8f, 0x59, 0x3e, 0x47, 0x44, 0xea, 0x4e, 0x44, + 0xae, 0x87, 0x9e, 0xb8, 0x9c, 0x56, 0xdd, 0x3b, 0x39, 0xe7, 0xc6, 0x97, 0xf3, 0x30, 0xf1, 0x02, + 0xde, 0x83, 0x0a, 0x9a, 0x82, 0x1a, 0xf1, 0x9d, 0x35, 0x8f, 0x2c, 0xcd, 0xcc, 0xb3, 0xa2, 0x4b, + 0xc6, 0x49, 0xde, 0x45, 0x09, 0xc0, 0x1a, 0x47, 0x45, 0x98, 0x8e, 0xf4, 0xbc, 0x03, 0x75, 0x19, + 0xce, 0xb4, 0x1a, 0x1d, 0x6a, 0x7b, 0xb8, 0x0d, 0x32, 0xdd, 0x60, 0x01, 0x75, 0xf4, 0xc3, 0xf0, + 0x42, 0x94, 0x2a, 0x7c, 0x7a, 0x6e, 0x66, 0x39, 0x83, 0x83, 0x73, 0x9f, 0x64, 0x81, 0x97, 0x61, + 0xb0, 0xbd, 0x33, 0x7e, 0x3a, 0x15, 0x78, 0x49, 0x1b, 0x31, 0x87, 0xa1, 0x2b, 0x80, 0x58, 0x2c, + 0xfe, 0xe5, 0x38, 0xee, 0x28, 0x63, 0x67, 0xfc, 0x0c, 0x7b, 0x25, 0x15, 0x46, 0x76, 0x29, 0x83, + 0x81, 0x73, 0x9e, 0xb2, 0xff, 0xa3, 0x05, 0xa3, 0x6a, 0xbd, 0xde, 0x83, 0x6c, 0x14, 0x2f, 0x99, + 0x8d, 0x32, 0x77, 0x74, 0x89, 0xc7, 0x7a, 0xde, 0x23, 0xa4, 0xf9, 0x33, 0xc3, 0x00, 0x5a, 0x2a, + 0x2a, 0x85, 0x64, 0xf5, 0x54, 0x48, 0x0f, 0xac, 0x44, 0xca, 0xab, 0xb0, 0x52, 0xb9, 0xbf, 0x15, + 0x56, 0x56, 0xe0, 0xac, 0x34, 0x17, 0xf8, 0x59, 0xd1, 0xe5, 0x20, 0x52, 0x02, 0xae, 0x5a, 0x7f, + 0x4c, 0x10, 0x3a, 0x3b, 0x9f, 0x87, 0x84, 0xf3, 0x9f, 0x4d, 0x58, 0x29, 0x43, 0xfb, 0x59, 0x29, + 0x7a, 0x4d, 0x2f, 0xac, 0xcb, 0xdb, 0x43, 0x52, 0x6b, 0x7a, 0xe1, 0xd2, 0x0a, 0xd6, 0x38, 0xf9, + 0x82, 0xbd, 0x56, 0x90, 0x60, 0x87, 0x03, 0x0b, 0x76, 0x29, 0x62, 0x86, 0x7b, 0x8a, 0x18, 0xe9, + 0x93, 0x1e, 0xe9, 0xe9, 0x93, 0x7e, 0x1f, 0x8c, 0xb9, 0xfe, 0x06, 0x09, 0xdd, 0x98, 0x34, 0xd9, + 0x5a, 0x60, 0xe2, 0xa7, 0xaa, 0xd5, 0xfa, 0x7c, 0x02, 0x8a, 0x53, 0xd8, 0x49, 0xb9, 0x38, 0xd6, + 0x87, 0x5c, 0xec, 0xa1, 0x8d, 0x4e, 0x14, 0xa3, 0x8d, 0x4e, 0x1e, 0x5d, 0x1b, 0x9d, 0x3a, 0x56, + 0x6d, 0x84, 0x0a, 0xd1, 0x46, 0x7d, 0x09, 0x7a, 0x63, 0xfb, 0x77, 0x66, 0x9f, 0xed, 0x5f, 0x2f, + 0x55, 0x74, 0xf6, 0xd0, 0xaa, 0x28, 0x5f, 0xcb, 0x3c, 0x74, 0x28, 0x2d, 0xf3, 0xd9, 0x12, 0x9c, + 0xd5, 0x72, 0x98, 0xce, 0x7e, 0x77, 0x9d, 0x4a, 0x22, 0x76, 0x01, 0x15, 0x3f, 0xb7, 0x31, 0x92, + 0xa3, 0x74, 0x9e, 0x95, 0x82, 0x60, 0x03, 0x8b, 0xe5, 0x18, 0x91, 0x90, 0x95, 0xdb, 0x4d, 0x0b, + 0xe9, 0x19, 0xd1, 0x8e, 0x15, 0x06, 0x9d, 0x5f, 0xf4, 0xb7, 0xc8, 0xdb, 0x4c, 0x17, 0x95, 0x9b, + 0xd1, 0x20, 0x6c, 0xe2, 0xa1, 0xa7, 0x38, 0x13, 0x26, 0x20, 0xa8, 0xa0, 0x1e, 0x11, 0x37, 0xe3, + 0x4a, 0x99, 0xa0, 0xa0, 0xb2, 0x3b, 0x2c, 0x99, 0xac, 0x92, 0xed, 0x0e, 0x0b, 0x81, 0x52, 0x18, + 0xf6, 0xff, 0xb4, 0xe0, 0x91, 0xdc, 0xa1, 0xb8, 0x07, 0xca, 0x77, 0x3b, 0xa9, 0x7c, 0x57, 0x8a, + 0xda, 0x6e, 0x18, 0x6f, 0xd1, 0x43, 0x11, 0xff, 0x7b, 0x0b, 0xc6, 0x34, 0xfe, 0x3d, 0x78, 0x55, + 0x37, 0xf9, 0xaa, 0xc5, 0xed, 0xac, 0x6a, 0x99, 0x77, 0xfb, 0xad, 0x12, 0xa8, 0x42, 0x8f, 0xd3, + 0x0d, 0x59, 0x46, 0x77, 0x9f, 0x93, 0xc4, 0x1d, 0x18, 0x64, 0x07, 0xa1, 0x51, 0x31, 0x41, 0x1e, + 0x49, 0xfe, 0xec, 0x50, 0x55, 0x1f, 0x32, 0xb3, 0xbf, 0x11, 0x16, 0x0c, 0x59, 0x31, 0x68, 0x37, + 0xa2, 0xd2, 0xbc, 0x29, 0xd2, 0xb2, 0x74, 0x31, 0x68, 0xd1, 0x8e, 0x15, 0x06, 0x55, 0x0f, 0x6e, + 0x23, 0xf0, 0x67, 0x3c, 0x27, 0x92, 0xb7, 0x2e, 0x2a, 0xf5, 0x30, 0x2f, 0x01, 0x58, 0xe3, 0xb0, + 0x33, 0x52, 0x37, 0xea, 0x78, 0xce, 0x8e, 0xb1, 0x7f, 0x36, 0xea, 0x13, 0x28, 0x10, 0x36, 0xf1, + 0xec, 0x36, 0x8c, 0x27, 0x5f, 0x62, 0x96, 0xac, 0xb3, 0x00, 0xc5, 0xbe, 0x86, 0x73, 0x0a, 0x6a, + 0x0e, 0x7b, 0x6a, 0xa1, 0xeb, 0xa4, 0x2f, 0x6d, 0x9f, 0x96, 0x00, 0xac, 0x71, 0xec, 0x5f, 0xb1, + 0xe0, 0x74, 0xce, 0xa0, 0x15, 0x98, 0xf6, 0x16, 0x6b, 0x69, 0x93, 0xa7, 0xd8, 0x7f, 0x18, 0x86, + 0x9a, 0x64, 0xdd, 0x91, 0x21, 0x70, 0x86, 0x6c, 0x9f, 0xe5, 0xcd, 0x58, 0xc2, 0xed, 0xff, 0x6e, + 0xc1, 0x89, 0x64, 0x5f, 0x23, 0x96, 0x4a, 0xc2, 0x87, 0xc9, 0x8d, 0x1a, 0xc1, 0x16, 0x09, 0x77, + 0xe8, 0x9b, 0x5b, 0xa9, 0x54, 0x92, 0x0c, 0x06, 0xce, 0x79, 0x8a, 0x95, 0x79, 0x6d, 0xaa, 0xd1, + 0x96, 0x33, 0xf2, 0x46, 0x91, 0x33, 0x52, 0x7f, 0x4c, 0xf3, 0xb8, 0x5c, 0xb1, 0xc4, 0x26, 0x7f, + 0xfb, 0x3b, 0x03, 0xa0, 0xf2, 0x62, 0x59, 0xfc, 0x51, 0x41, 0xd1, 0x5b, 0x07, 0xcd, 0x20, 0x52, + 0x93, 0x61, 0x60, 0xaf, 0x80, 0x00, 0xee, 0x25, 0x31, 0x5d, 0x97, 0xea, 0x0d, 0x57, 0x35, 0x08, + 0x9b, 0x78, 0xb4, 0x27, 0x9e, 0xbb, 0x45, 0xf8, 0x43, 0x83, 0xc9, 0x9e, 0x2c, 0x48, 0x00, 0xd6, + 0x38, 0xb4, 0x27, 0x4d, 0x77, 0x7d, 0x5d, 0x6c, 0xf9, 0x55, 0x4f, 0xe8, 0xe8, 0x60, 0x06, 0xe1, + 0x95, 0xbb, 0x83, 0x4d, 0x61, 0x05, 0x1b, 0x95, 0xbb, 0x83, 0x4d, 0xcc, 0x20, 0xd4, 0x6e, 0xf3, + 0x83, 0xb0, 0xcd, 0x2e, 0xd5, 0x6f, 0x2a, 0x2e, 0xc2, 0xfa, 0x55, 0x76, 0xdb, 0xb5, 0x2c, 0x0a, + 0xce, 0x7b, 0x8e, 0xce, 0xc0, 0x4e, 0x48, 0x9a, 0x6e, 0x23, 0x36, 0xa9, 0x41, 0x72, 0x06, 0x2e, + 0x67, 0x30, 0x70, 0xce, 0x53, 0x68, 0x1a, 0x4e, 0xc8, 0xbc, 0x66, 0x59, 0xb5, 0x66, 0x38, 0x59, + 0x25, 0x03, 0x27, 0xc1, 0x38, 0x8d, 0x4f, 0xa5, 0x5a, 0x5b, 0x14, 0xb6, 0x62, 0xc6, 0xb2, 0x21, + 0xd5, 0x64, 0xc1, 0x2b, 0xac, 0x30, 0xec, 0x4f, 0x96, 0xa9, 0x16, 0xee, 0x51, 0xd0, 0xed, 0x9e, + 0x45, 0x0b, 0x26, 0x67, 0xe4, 0x40, 0x1f, 0x33, 0xf2, 0x39, 0x18, 0xb9, 0x15, 0x05, 0xbe, 0x8a, + 0xc4, 0xab, 0xf4, 0x8c, 0xc4, 0x33, 0xb0, 0xf2, 0x23, 0xf1, 0x06, 0x8b, 0x8a, 0xc4, 0x1b, 0x3a, + 0x64, 0x24, 0xde, 0x6f, 0x57, 0x40, 0x5d, 0x21, 0x72, 0x8d, 0xc4, 0xb7, 0x83, 0x70, 0xd3, 0xf5, + 0x5b, 0x2c, 0x1f, 0xfc, 0xeb, 0x16, 0x8c, 0xf0, 0xf5, 0xb2, 0x60, 0x66, 0x52, 0xad, 0x17, 0x74, + 0x37, 0x45, 0x82, 0xd9, 0xe4, 0xaa, 0xc1, 0x28, 0x75, 0xe9, 0xa7, 0x09, 0xc2, 0x89, 0x1e, 0xa1, + 0x8f, 0x01, 0x48, 0xff, 0xe8, 0xba, 0x14, 0x99, 0xf3, 0xc5, 0xf4, 0x0f, 0x93, 0x75, 0x6d, 0x03, + 0xaf, 0x2a, 0x26, 0xd8, 0x60, 0x88, 0x3e, 0xab, 0xb3, 0xcc, 0x78, 0xc8, 0xfe, 0x47, 0x8e, 0x65, + 0x6c, 0xfa, 0xc9, 0x31, 0xc3, 0x30, 0xe4, 0xfa, 0x2d, 0x3a, 0x4f, 0x44, 0xc4, 0xd2, 0x3b, 0xf2, + 0x6a, 0x29, 0x2c, 0x04, 0x4e, 0xb3, 0xee, 0x78, 0x8e, 0xdf, 0x20, 0xe1, 0x3c, 0x47, 0x37, 0xaf, + 0xba, 0x66, 0x0d, 0x58, 0x12, 0xca, 0x5c, 0xbe, 0x52, 0xe9, 0xe7, 0xf2, 0x95, 0x73, 0xef, 0x87, + 0x53, 0x99, 0x8f, 0x79, 0xa0, 0x94, 0xb2, 0xc3, 0x67, 0xa3, 0xd9, 0xff, 0x7c, 0x50, 0x2b, 0xad, + 0x6b, 0x41, 0x93, 0x5f, 0x01, 0x12, 0xea, 0x2f, 0x2a, 0x6c, 0xdc, 0x02, 0xa7, 0x88, 0x71, 0x5d, + 0xb6, 0x6a, 0xc4, 0x26, 0x4b, 0x3a, 0x47, 0x3b, 0x4e, 0x48, 0xfc, 0xe3, 0x9e, 0xa3, 0xcb, 0x8a, + 0x09, 0x36, 0x18, 0xa2, 0x8d, 0x44, 0x4e, 0xc9, 0xa5, 0xa3, 0xe7, 0x94, 0xb0, 0x2a, 0x53, 0x79, + 0x55, 0xfb, 0xbf, 0x64, 0xc1, 0x98, 0x9f, 0x98, 0xb9, 0xc5, 0x84, 0x91, 0xe6, 0xaf, 0x0a, 0x7e, + 0x03, 0x55, 0xb2, 0x0d, 0xa7, 0xf8, 0xe7, 0xa9, 0xb4, 0xca, 0x01, 0x55, 0x9a, 0xbe, 0x4b, 0x68, + 0xb0, 0xd7, 0x5d, 0x42, 0xc8, 0x57, 0x97, 0xa9, 0x0d, 0x15, 0x7e, 0x99, 0x1a, 0xe4, 0x5c, 0xa4, + 0x76, 0x13, 0x6a, 0x8d, 0x90, 0x38, 0xf1, 0x21, 0xef, 0xd5, 0x62, 0x07, 0xf4, 0x33, 0x92, 0x00, + 0xd6, 0xb4, 0xec, 0xff, 0x33, 0x00, 0x27, 0xe5, 0x88, 0xc8, 0x10, 0x74, 0xaa, 0x1f, 0x39, 0x5f, + 0x6d, 0xdc, 0x2a, 0xfd, 0x78, 0x59, 0x02, 0xb0, 0xc6, 0xa1, 0xf6, 0x58, 0x37, 0x22, 0x4b, 0x1d, + 0xe2, 0x2f, 0xb8, 0x6b, 0x91, 0x38, 0xe7, 0x54, 0x0b, 0xe5, 0xba, 0x06, 0x61, 0x13, 0x8f, 0x1a, + 0xe3, 0xdc, 0x2e, 0x8e, 0xd2, 0xe9, 0x2b, 0xc2, 0xde, 0xc6, 0x12, 0x8e, 0x7e, 0x3e, 0xb7, 0xc2, + 0x6c, 0x31, 0x89, 0x5b, 0x99, 0xc8, 0xfb, 0x03, 0x5e, 0xc5, 0xf8, 0xb7, 0x2c, 0x38, 0xcb, 0x5b, + 0xe5, 0x48, 0x5e, 0xef, 0x34, 0x9d, 0x98, 0x44, 0xc5, 0x54, 0x7c, 0xcf, 0xe9, 0x9f, 0x76, 0xf2, + 0xe6, 0xb1, 0xc5, 0xf9, 0xbd, 0x41, 0x6f, 0x58, 0x70, 0x62, 0x33, 0x51, 0xf3, 0x43, 0xaa, 0x8e, + 0xa3, 0xa6, 0xe3, 0x27, 0x88, 0xea, 0xa5, 0x96, 0x6c, 0x8f, 0x70, 0x9a, 0xbb, 0xfd, 0x67, 0x16, + 0x98, 0x62, 0xf4, 0xde, 0x97, 0x0a, 0x39, 0xb8, 0x29, 0x28, 0xad, 0xcb, 0x4a, 0x4f, 0xeb, 0xf2, + 0x31, 0x28, 0x77, 0xdd, 0xa6, 0xd8, 0x5f, 0xe8, 0xd3, 0xd7, 0xf9, 0x59, 0x4c, 0xdb, 0xed, 0x7f, + 0x52, 0xd1, 0x7e, 0x0b, 0x91, 0x17, 0xf5, 0x7d, 0xf1, 0xda, 0xeb, 0xaa, 0xd8, 0x18, 0x7f, 0xf3, + 0x6b, 0x99, 0x62, 0x63, 0x3f, 0x76, 0xf0, 0xb4, 0x37, 0x3e, 0x40, 0xbd, 0x6a, 0x8d, 0x0d, 0xed, + 0x93, 0xf3, 0x76, 0x0b, 0xaa, 0x74, 0x0b, 0xc6, 0x1c, 0x90, 0xd5, 0x44, 0xa7, 0xaa, 0x97, 0x45, + 0xfb, 0xdd, 0xdd, 0x89, 0xf7, 0x1c, 0xbc, 0x5b, 0xf2, 0x69, 0xac, 0xe8, 0xa3, 0x08, 0x6a, 0xf4, + 0x37, 0x4b, 0xcf, 0x13, 0x9b, 0xbb, 0xeb, 0x4a, 0x66, 0x4a, 0x40, 0x21, 0xb9, 0x7f, 0x9a, 0x0f, + 0xf2, 0xa1, 0xc6, 0x6e, 0xad, 0x65, 0x4c, 0xf9, 0x1e, 0x70, 0x59, 0x25, 0xc9, 0x49, 0xc0, 0xdd, + 0xdd, 0x89, 0x17, 0x0f, 0xce, 0x54, 0x3d, 0x8e, 0x35, 0x0b, 0xfb, 0xcb, 0x03, 0x7a, 0xee, 0x8a, + 0x1a, 0x73, 0xdf, 0x17, 0x73, 0xf7, 0x85, 0xd4, 0xdc, 0x3d, 0x9f, 0x99, 0xbb, 0x63, 0xfa, 0x76, + 0xd5, 0xc4, 0x6c, 0xbc, 0xd7, 0x86, 0xc0, 0xfe, 0xfe, 0x06, 0x66, 0x01, 0xbd, 0xd6, 0x75, 0x43, + 0x12, 0x2d, 0x87, 0x5d, 0xdf, 0xf5, 0x5b, 0x6c, 0x3a, 0x56, 0x4d, 0x0b, 0x28, 0x01, 0xc6, 0x69, + 0x7c, 0xba, 0xa9, 0xa7, 0xdf, 0xfc, 0xa6, 0xb3, 0xc5, 0x67, 0x95, 0x51, 0x76, 0x6b, 0x45, 0xb4, + 0x63, 0x85, 0x61, 0x7f, 0x93, 0x9d, 0x65, 0x1b, 0x79, 0xc1, 0x74, 0x4e, 0x78, 0xec, 0x9a, 0x60, + 0x5e, 0xb3, 0x4b, 0xcd, 0x09, 0x7e, 0x37, 0x30, 0x87, 0xa1, 0xdb, 0x30, 0xb4, 0xc6, 0xef, 0xc9, + 0x2b, 0xa6, 0x8e, 0xb9, 0xb8, 0x74, 0x8f, 0xdd, 0x86, 0x22, 0x6f, 0xe0, 0xbb, 0xab, 0x7f, 0x62, + 0xc9, 0xcd, 0xfe, 0xbd, 0x0a, 0x9c, 0x48, 0x5d, 0x24, 0x9b, 0xa8, 0x96, 0x5a, 0xda, 0xb7, 0x5a, + 0xea, 0x87, 0x01, 0x9a, 0xa4, 0xe3, 0x05, 0x3b, 0xcc, 0x1c, 0x1b, 0x38, 0xb0, 0x39, 0xa6, 0x2c, + 0xf8, 0x59, 0x45, 0x05, 0x1b, 0x14, 0x45, 0xa1, 0x32, 0x5e, 0x7c, 0x35, 0x55, 0xa8, 0xcc, 0xb8, + 0xed, 0x60, 0xf0, 0xde, 0xde, 0x76, 0xe0, 0xc2, 0x09, 0xde, 0x45, 0x95, 0x7d, 0x7b, 0x88, 0x24, + 0x5b, 0x96, 0xbf, 0x30, 0x9b, 0x24, 0x83, 0xd3, 0x74, 0xef, 0xe7, 0x3d, 0xd1, 0xe8, 0x9d, 0x50, + 0x93, 0xdf, 0x39, 0x1a, 0xaf, 0xe9, 0x0a, 0x06, 0x72, 0x1a, 0xb0, 0xfb, 0x9b, 0xc5, 0xcf, 0x4c, + 0x21, 0x01, 0xb8, 0x5f, 0x85, 0x04, 0xec, 0x2f, 0x96, 0xa8, 0x1d, 0xcf, 0xfb, 0xa5, 0x6a, 0xe2, + 0x3c, 0x09, 0x83, 0x4e, 0x37, 0xde, 0x08, 0x32, 0xb7, 0xfe, 0x4d, 0xb3, 0x56, 0x2c, 0xa0, 0x68, + 0x01, 0x06, 0x9a, 0xba, 0xce, 0xc9, 0x41, 0xbe, 0xa7, 0x76, 0x89, 0x3a, 0x31, 0xc1, 0x8c, 0x0a, + 0x7a, 0x14, 0x06, 0x62, 0xa7, 0x25, 0x53, 0xae, 0x58, 0x9a, 0xed, 0xaa, 0xd3, 0x8a, 0x30, 0x6b, + 0x35, 0xd5, 0xf7, 0xc0, 0x3e, 0xea, 0xfb, 0x45, 0x18, 0x8d, 0xdc, 0x96, 0xef, 0xc4, 0xdd, 0x90, + 0x18, 0xc7, 0x7c, 0x3a, 0x72, 0xc3, 0x04, 0xe2, 0x24, 0xae, 0xfd, 0x1b, 0x23, 0x70, 0x66, 0x65, + 0x66, 0x51, 0x56, 0xef, 0x3e, 0xb6, 0xac, 0xa9, 0x3c, 0x1e, 0xf7, 0x2e, 0x6b, 0xaa, 0x07, 0x77, + 0xcf, 0xc8, 0x9a, 0xf2, 0x8c, 0xac, 0xa9, 0x64, 0x0a, 0x4b, 0xb9, 0x88, 0x14, 0x96, 0xbc, 0x1e, + 0xf4, 0x93, 0xc2, 0x72, 0x6c, 0x69, 0x54, 0x7b, 0x76, 0xe8, 0x40, 0x69, 0x54, 0x2a, 0xc7, 0xac, + 0x90, 0xe4, 0x82, 0x1e, 0x9f, 0x2a, 0x37, 0xc7, 0x4c, 0xe5, 0xf7, 0xf0, 0xc4, 0x19, 0x21, 0xea, + 0x5f, 0x29, 0xbe, 0x03, 0x7d, 0xe4, 0xf7, 0x88, 0xdc, 0x1d, 0x33, 0xa7, 0x6c, 0xa8, 0x88, 0x9c, + 0xb2, 0xbc, 0xee, 0xec, 0x9b, 0x53, 0xf6, 0x22, 0x8c, 0x36, 0xbc, 0xc0, 0x27, 0xcb, 0x61, 0x10, + 0x07, 0x8d, 0xc0, 0x13, 0x66, 0xbd, 0x12, 0x09, 0x33, 0x26, 0x10, 0x27, 0x71, 0x7b, 0x25, 0xa4, + 0xd5, 0x8e, 0x9a, 0x90, 0x06, 0xf7, 0x29, 0x21, 0xed, 0x67, 0x74, 0xea, 0xf4, 0x30, 0xfb, 0x22, + 0x1f, 0x2e, 0xfe, 0x8b, 0xf4, 0x93, 0x3f, 0x8d, 0xde, 0xe4, 0xd7, 0xee, 0x51, 0xc3, 0x78, 0x26, + 0x68, 0x53, 0xc3, 0x6f, 0x84, 0x0d, 0xc9, 0xab, 0xc7, 0x30, 0x61, 0x6f, 0xae, 0x68, 0x36, 0xea, + 0x2a, 0x3e, 0xdd, 0x84, 0x93, 0x1d, 0x39, 0x4a, 0x6a, 0xf7, 0x57, 0x4b, 0xf0, 0x03, 0xfb, 0x76, + 0x01, 0xdd, 0x06, 0x88, 0x9d, 0x96, 0x98, 0xa8, 0xe2, 0xc0, 0xe4, 0x88, 0xe1, 0x95, 0xab, 0x92, + 0x1e, 0xaf, 0x49, 0xa2, 0xfe, 0xb2, 0xa3, 0x08, 0xf9, 0x9b, 0x45, 0x55, 0x06, 0x5e, 0xa6, 0x74, + 0x23, 0x0e, 0x3c, 0x82, 0x19, 0x84, 0xaa, 0xff, 0x90, 0xb4, 0xf4, 0x3d, 0xd1, 0xea, 0xf3, 0x61, + 0xd6, 0x8a, 0x05, 0x14, 0x3d, 0x0f, 0xc3, 0x8e, 0xe7, 0xf1, 0xfc, 0x18, 0x12, 0x89, 0x7b, 0x77, + 0x74, 0x0d, 0x39, 0x0d, 0xc2, 0x26, 0x9e, 0xfd, 0xa7, 0x25, 0x98, 0xd8, 0x47, 0xa6, 0x64, 0x32, + 0xfe, 0x2a, 0x7d, 0x67, 0xfc, 0x89, 0x1c, 0x85, 0xc1, 0x1e, 0x39, 0x0a, 0xcf, 0xc3, 0x70, 0x4c, + 0x9c, 0xb6, 0x08, 0xc8, 0x12, 0x9e, 0x00, 0x7d, 0x02, 0xac, 0x41, 0xd8, 0xc4, 0xa3, 0x52, 0x6c, + 0xcc, 0x69, 0x34, 0x48, 0x14, 0xc9, 0x24, 0x04, 0xe1, 0x4d, 0x2d, 0x2c, 0xc3, 0x81, 0x39, 0xa9, + 0xa7, 0x13, 0x2c, 0x70, 0x8a, 0x65, 0x7a, 0xc0, 0x6b, 0x7d, 0x0e, 0xf8, 0x37, 0x4a, 0xf0, 0xd8, + 0x9e, 0xda, 0xad, 0xef, 0xfc, 0x90, 0x6e, 0x44, 0xc2, 0xf4, 0xc4, 0xb9, 0x1e, 0x91, 0x10, 0x33, + 0x08, 0x1f, 0xa5, 0x4e, 0xc7, 0xb8, 0x87, 0xbb, 0xe8, 0xe4, 0x25, 0x3e, 0x4a, 0x09, 0x16, 0x38, + 0xc5, 0xf2, 0xb0, 0xd3, 0xf2, 0xef, 0x95, 0xe0, 0x89, 0x3e, 0x6c, 0x80, 0x02, 0x93, 0xbc, 0x92, + 0xa9, 0x76, 0xe5, 0xfb, 0x94, 0x11, 0x79, 0xc8, 0xe1, 0xfa, 0x66, 0x09, 0xce, 0xf5, 0x56, 0xc5, + 0xe8, 0xbd, 0x70, 0x22, 0x54, 0x51, 0x58, 0x66, 0x96, 0xde, 0x69, 0xee, 0x49, 0x48, 0x80, 0x70, + 0x1a, 0x17, 0x4d, 0x02, 0x74, 0x9c, 0x78, 0x23, 0xba, 0xb8, 0xed, 0x46, 0xb1, 0xa8, 0x42, 0x33, + 0xc6, 0xcf, 0xae, 0x64, 0x2b, 0x36, 0x30, 0x28, 0x3b, 0xf6, 0x6f, 0x36, 0xb8, 0x16, 0xc4, 0xfc, + 0x21, 0xbe, 0x8d, 0x38, 0x2d, 0xef, 0xec, 0x30, 0x40, 0x38, 0x8d, 0x4b, 0xd9, 0xb1, 0xd3, 0x51, + 0xde, 0x51, 0xbe, 0xbf, 0x60, 0xec, 0x16, 0x54, 0x2b, 0x36, 0x30, 0xd2, 0xf9, 0x87, 0x95, 0xfd, + 0xf3, 0x0f, 0xed, 0x7f, 0x5c, 0x82, 0x47, 0x7a, 0x9a, 0x72, 0xfd, 0x2d, 0xc0, 0x07, 0x2f, 0x67, + 0xf0, 0x70, 0x73, 0xe7, 0x80, 0xb9, 0x6d, 0x7f, 0xd4, 0x63, 0xa6, 0x89, 0xdc, 0xb6, 0xc3, 0x27, + 0x87, 0x3f, 0x78, 0xe3, 0x99, 0x49, 0x67, 0x1b, 0x38, 0x40, 0x3a, 0x5b, 0xea, 0x63, 0x54, 0xfa, + 0x5c, 0xc8, 0x7f, 0x5e, 0xee, 0x39, 0xbc, 0x74, 0xeb, 0xd7, 0x97, 0x9f, 0x76, 0x16, 0x4e, 0xba, + 0x3e, 0xbb, 0xbf, 0x69, 0xa5, 0xbb, 0x26, 0x0a, 0x93, 0x94, 0x92, 0xb7, 0xac, 0xcf, 0xa7, 0xe0, + 0x38, 0xf3, 0xc4, 0x03, 0x98, 0x5e, 0x78, 0xb8, 0x21, 0x3d, 0x58, 0x82, 0x2b, 0x5a, 0x82, 0xb3, + 0x72, 0x28, 0x36, 0x9c, 0x90, 0x34, 0x85, 0x1a, 0x89, 0x44, 0x42, 0xc5, 0x23, 0x3c, 0x29, 0x23, + 0x07, 0x01, 0xe7, 0x3f, 0xc7, 0xae, 0xcc, 0x09, 0x3a, 0x6e, 0x43, 0x6c, 0x72, 0xf4, 0x95, 0x39, + 0xb4, 0x11, 0x73, 0x98, 0xfd, 0x61, 0xa8, 0xa9, 0xf7, 0xe7, 0x61, 0xdd, 0x6a, 0xd2, 0x65, 0xc2, + 0xba, 0xd5, 0x8c, 0x33, 0xb0, 0xe8, 0xd7, 0xa2, 0x26, 0x71, 0x6a, 0xf5, 0x5c, 0x25, 0x3b, 0xcc, + 0x3e, 0xb6, 0xdf, 0x05, 0x23, 0xca, 0xcf, 0xd2, 0xef, 0x45, 0x42, 0xf6, 0x9f, 0x0c, 0xc2, 0x68, + 0xa2, 0x38, 0x60, 0xc2, 0xc1, 0x6a, 0xed, 0xeb, 0x60, 0x65, 0x61, 0xfa, 0x5d, 0x5f, 0xde, 0x32, + 0x66, 0x84, 0xe9, 0x77, 0x7d, 0x82, 0x39, 0x8c, 0x9a, 0xb7, 0xcd, 0x70, 0x07, 0x77, 0x7d, 0x11, + 0x4e, 0xab, 0xcc, 0xdb, 0x59, 0xd6, 0x8a, 0x05, 0x14, 0x7d, 0xc2, 0x82, 0x91, 0x88, 0x79, 0xef, + 0xb9, 0x7b, 0x5a, 0x4c, 0xba, 0x2b, 0x47, 0xaf, 0x7d, 0xa8, 0x0a, 0x61, 0xb2, 0x08, 0x19, 0xb3, + 0x05, 0x27, 0x38, 0xa2, 0x4f, 0x5b, 0x50, 0x53, 0x97, 0xa1, 0x88, 0x2b, 0x03, 0x57, 0x8a, 0xad, + 0xbd, 0xc8, 0xfd, 0x9a, 0xea, 0x20, 0x44, 0x15, 0xc1, 0xc3, 0x9a, 0x31, 0x8a, 0x94, 0xef, 0x78, + 0xe8, 0x78, 0x7c, 0xc7, 0x90, 0xe3, 0x37, 0x7e, 0x27, 0xd4, 0xda, 0x8e, 0xef, 0xae, 0x93, 0x28, + 0xe6, 0xee, 0x5c, 0x59, 0x12, 0x56, 0x36, 0x62, 0x0d, 0xa7, 0x0a, 0x39, 0x62, 0x2f, 0x16, 0x1b, + 0xfe, 0x57, 0xa6, 0x90, 0x57, 0x74, 0x33, 0x36, 0x71, 0x4c, 0x67, 0x31, 0xdc, 0x57, 0x67, 0xf1, + 0xf0, 0x3e, 0xce, 0xe2, 0x15, 0x38, 0xeb, 0x74, 0xe3, 0xe0, 0x32, 0x71, 0xbc, 0x69, 0x7e, 0xff, + 0xa7, 0xb8, 0x60, 0x7a, 0x84, 0x39, 0x20, 0xd4, 0x99, 0xfe, 0x0a, 0xf1, 0xd6, 0x33, 0x48, 0x38, + 0xff, 0x59, 0xfb, 0x1f, 0x58, 0x70, 0x36, 0x77, 0x2a, 0x3c, 0xb8, 0xd1, 0x94, 0xf6, 0x57, 0x2a, + 0x70, 0x3a, 0xa7, 0x74, 0x28, 0xda, 0x31, 0x17, 0x89, 0x55, 0x44, 0x60, 0x42, 0xf2, 0x9c, 0x5d, + 0x7e, 0x9b, 0x9c, 0x95, 0x71, 0xb0, 0xf3, 0x1f, 0x7d, 0x06, 0x53, 0xbe, 0xb7, 0x67, 0x30, 0xc6, + 0x5c, 0x1f, 0xb8, 0xaf, 0x73, 0xbd, 0xb2, 0xcf, 0x5c, 0xff, 0x55, 0x0b, 0xc6, 0xdb, 0x3d, 0xea, + 0xd5, 0x0b, 0x6f, 0xe6, 0x8d, 0xe3, 0xa9, 0x86, 0x5f, 0x7f, 0xf4, 0xce, 0xee, 0x44, 0xcf, 0x6b, + 0x02, 0x70, 0xcf, 0x5e, 0xd9, 0xdf, 0x29, 0x03, 0xab, 0x5b, 0xcb, 0xca, 0xc3, 0xed, 0xa0, 0x8f, + 0x9b, 0x15, 0x88, 0xad, 0xa2, 0xaa, 0xe5, 0x72, 0xe2, 0xaa, 0x82, 0x31, 0x1f, 0xc1, 0xbc, 0x82, + 0xc6, 0x69, 0x49, 0x58, 0xea, 0x43, 0x12, 0x7a, 0xb2, 0xd4, 0x73, 0xb9, 0xf8, 0x52, 0xcf, 0xb5, + 0x74, 0x99, 0xe7, 0xbd, 0x3f, 0xf1, 0xc0, 0x03, 0xf9, 0x89, 0x7f, 0xc1, 0xe2, 0x82, 0x27, 0xf5, + 0x15, 0xb4, 0xb9, 0x61, 0xed, 0x61, 0x6e, 0x3c, 0x0d, 0xd5, 0x48, 0x48, 0x66, 0x61, 0x96, 0xe8, + 0x43, 0x71, 0xd1, 0x8e, 0x15, 0x06, 0xbb, 0x0b, 0xd6, 0xf3, 0x82, 0xdb, 0x17, 0xdb, 0x9d, 0x78, + 0x47, 0x18, 0x28, 0xfa, 0x2e, 0x58, 0x05, 0xc1, 0x06, 0x96, 0xfd, 0x37, 0x4b, 0x7c, 0x06, 0x8a, + 0xc8, 0x8a, 0x17, 0x52, 0xb7, 0xf7, 0xf5, 0x1f, 0x94, 0xf0, 0x51, 0x80, 0x86, 0xba, 0x1f, 0x5f, + 0x1c, 0x34, 0x5d, 0x3e, 0xf2, 0xe5, 0xdd, 0x82, 0x9e, 0x7e, 0x0d, 0xdd, 0x86, 0x0d, 0x7e, 0x09, + 0x59, 0x5a, 0xde, 0x57, 0x96, 0x26, 0xc4, 0xca, 0xc0, 0xde, 0x62, 0xc5, 0xfe, 0x53, 0x0b, 0x12, + 0x66, 0x16, 0xea, 0x40, 0x85, 0x76, 0x77, 0xa7, 0x98, 0xab, 0xff, 0x4d, 0xd2, 0x54, 0x34, 0x8a, + 0x69, 0xcf, 0x7e, 0x62, 0xce, 0x08, 0x79, 0x22, 0x00, 0x83, 0x8f, 0xea, 0xb5, 0xe2, 0x18, 0x5e, + 0x0e, 0x82, 0x4d, 0x7e, 0x5a, 0xaa, 0x83, 0x39, 0xec, 0x17, 0xe0, 0x54, 0xa6, 0x53, 0xec, 0xa2, + 0xae, 0x80, 0x6a, 0x9f, 0xd4, 0x74, 0x65, 0x59, 0xa1, 0x98, 0xc3, 0xec, 0x6f, 0x5a, 0x70, 0x32, + 0x4d, 0x1e, 0xbd, 0x69, 0xc1, 0xa9, 0x28, 0x4d, 0xef, 0xb8, 0xc6, 0x4e, 0x05, 0x51, 0x66, 0x40, + 0x38, 0xdb, 0x09, 0xfb, 0xff, 0x8a, 0xc9, 0x7f, 0xd3, 0xf5, 0x9b, 0xc1, 0x6d, 0x65, 0x98, 0x58, + 0x3d, 0x0d, 0x13, 0xba, 0x1e, 0x1b, 0x1b, 0xa4, 0xd9, 0xf5, 0x32, 0xe9, 0xa8, 0x2b, 0xa2, 0x1d, + 0x2b, 0x0c, 0x96, 0x7d, 0xd7, 0x15, 0xb5, 0xe0, 0x53, 0x93, 0x72, 0x56, 0xb4, 0x63, 0x85, 0x81, + 0x9e, 0x83, 0x11, 0xe3, 0x25, 0xe5, 0xbc, 0x64, 0x56, 0xbe, 0xa1, 0x32, 0x23, 0x9c, 0xc0, 0x42, + 0x93, 0x00, 0xca, 0xc8, 0x91, 0x2a, 0x92, 0x79, 0x9f, 0x94, 0x24, 0x8a, 0xb0, 0x81, 0xc1, 0x72, + 0x5d, 0xbd, 0x6e, 0xc4, 0x0e, 0x0e, 0x06, 0x75, 0x7d, 0xd2, 0x19, 0xd1, 0x86, 0x15, 0x94, 0x4a, + 0x93, 0xb6, 0xe3, 0x77, 0x1d, 0x8f, 0x8e, 0x90, 0xd8, 0x4f, 0xaa, 0x65, 0xb8, 0xa8, 0x20, 0xd8, + 0xc0, 0xa2, 0x6f, 0x1c, 0xbb, 0x6d, 0xf2, 0x72, 0xe0, 0xcb, 0xe0, 0x37, 0x7d, 0x96, 0x24, 0xda, + 0xb1, 0xc2, 0xb0, 0xff, 0xab, 0x05, 0x27, 0x74, 0xe6, 0x3c, 0xbf, 0x92, 0xdb, 0xdc, 0xfe, 0x5a, + 0xfb, 0x6e, 0x7f, 0x93, 0x29, 0xc5, 0xa5, 0xbe, 0x52, 0x8a, 0xcd, 0x6c, 0xdf, 0xf2, 0x9e, 0xd9, + 0xbe, 0x3f, 0xa4, 0xaf, 0x7b, 0xe5, 0x69, 0xc1, 0xc3, 0x79, 0x57, 0xbd, 0x22, 0x1b, 0x06, 0x1b, + 0x8e, 0x2a, 0x1b, 0x33, 0xc2, 0x37, 0x24, 0x33, 0xd3, 0x0c, 0x49, 0x40, 0xec, 0x25, 0xa8, 0xa9, + 0x23, 0x15, 0xb9, 0xfb, 0xb5, 0xf2, 0x77, 0xbf, 0x7d, 0x65, 0x1d, 0xd6, 0xd7, 0xbe, 0xf5, 0xdd, + 0xc7, 0xdf, 0xf6, 0xbb, 0xdf, 0x7d, 0xfc, 0x6d, 0x7f, 0xf8, 0xdd, 0xc7, 0xdf, 0xf6, 0x89, 0x3b, + 0x8f, 0x5b, 0xdf, 0xba, 0xf3, 0xb8, 0xf5, 0xbb, 0x77, 0x1e, 0xb7, 0xfe, 0xf0, 0xce, 0xe3, 0xd6, + 0x77, 0xee, 0x3c, 0x6e, 0x7d, 0xe9, 0x3f, 0x3f, 0xfe, 0xb6, 0x97, 0x73, 0xa3, 0x1f, 0xe9, 0x8f, + 0x67, 0x1a, 0xcd, 0xa9, 0xad, 0x0b, 0x2c, 0x00, 0x8f, 0x2e, 0xaf, 0x29, 0x63, 0x4e, 0x4d, 0xc9, + 0xe5, 0xf5, 0xff, 0x02, 0x00, 0x00, 0xff, 0xff, 0x5b, 0x12, 0xf9, 0x07, 0x70, 0xe3, 0x00, 0x00, } func (m *AWSAuthConfig) Marshal() (dAtA []byte, err error) { @@ -5186,6 +5224,11 @@ func (m *AWSAuthConfig) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + i -= len(m.Profile) + copy(dAtA[i:], m.Profile) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Profile))) + i-- + dAtA[i] = 0x1a i -= len(m.RoleARN) copy(dAtA[i:], m.RoleARN) i = encodeVarintGenerated(dAtA, i, uint64(len(m.RoleARN))) @@ -5880,6 +5923,15 @@ func (m *ApplicationSetApplicationStatus) MarshalToSizedBuffer(dAtA []byte) (int _ = i var l int _ = l + if len(m.TargetRevisions) > 0 { + for iNdEx := len(m.TargetRevisions) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.TargetRevisions[iNdEx]) + copy(dAtA[i:], m.TargetRevisions[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.TargetRevisions[iNdEx]))) + i-- + dAtA[i] = 0x32 + } + } i -= len(m.Step) copy(dAtA[i:], m.Step) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Step))) @@ -6455,6 +6507,13 @@ func (m *ApplicationSetSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.TemplatePatch != nil { + i -= len(*m.TemplatePatch) + copy(dAtA[i:], *m.TemplatePatch) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.TemplatePatch))) + i-- + dAtA[i] = 0x52 + } if len(m.IgnoreApplicationDifferences) > 0 { for iNdEx := len(m.IgnoreApplicationDifferences) - 1; iNdEx >= 0; iNdEx-- { { @@ -6577,6 +6636,20 @@ func (m *ApplicationSetStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if len(m.Resources) > 0 { + for iNdEx := len(m.Resources) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Resources[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } if len(m.ApplicationStatus) > 0 { for iNdEx := len(m.ApplicationStatus) - 1; iNdEx >= 0; iNdEx-- { { @@ -6938,6 +7011,43 @@ func (m *ApplicationSetTerminalGenerator) MarshalToSizedBuffer(dAtA []byte) (int return len(dAtA) - i, nil } +func (m *ApplicationSetTree) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ApplicationSetTree) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ApplicationSetTree) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Nodes) > 0 { + for iNdEx := len(m.Nodes) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Nodes[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + func (m *ApplicationSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -7276,6 +7386,23 @@ func (m *ApplicationSourceKustomize) MarshalToSizedBuffer(dAtA []byte) (int, err _ = i var l int _ = l + i-- + if m.LabelWithoutSelector { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x70 + if len(m.Components) > 0 { + for iNdEx := len(m.Components) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Components[iNdEx]) + copy(dAtA[i:], m.Components[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Components[iNdEx]))) + i-- + dAtA[i] = 0x6a + } + } if len(m.Patches) > 0 { for iNdEx := len(m.Patches) - 1; iNdEx >= 0; iNdEx-- { { @@ -12829,6 +12956,16 @@ func (m *RevisionHistory) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + { + size, err := m.InitiatedBy.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x52 if len(m.Revisions) > 0 { for iNdEx := len(m.Revisions) - 1; iNdEx >= 0; iNdEx-- { i -= len(m.Revisions[iNdEx]) @@ -13682,6 +13819,9 @@ func (m *SyncOperation) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + i = encodeVarintGenerated(dAtA, i, uint64(m.SelfHealAttemptsCount)) + i-- + dAtA[i] = 0x60 if len(m.Revisions) > 0 { for iNdEx := len(m.Revisions) - 1; iNdEx >= 0; iNdEx-- { i -= len(m.Revisions[iNdEx]) @@ -14382,6 +14522,8 @@ func (m *AWSAuthConfig) Size() (n int) { n += 1 + l + sovGenerated(uint64(l)) l = len(m.RoleARN) n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Profile) + n += 1 + l + sovGenerated(uint64(l)) return n } @@ -14652,6 +14794,12 @@ func (m *ApplicationSetApplicationStatus) Size() (n int) { n += 1 + l + sovGenerated(uint64(l)) l = len(m.Step) n += 1 + l + sovGenerated(uint64(l)) + if len(m.TargetRevisions) > 0 { + for _, s := range m.TargetRevisions { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } return n } @@ -14888,6 +15036,10 @@ func (m *ApplicationSetSpec) Size() (n int) { n += 1 + l + sovGenerated(uint64(l)) } } + if m.TemplatePatch != nil { + l = len(*m.TemplatePatch) + n += 1 + l + sovGenerated(uint64(l)) + } return n } @@ -14909,6 +15061,12 @@ func (m *ApplicationSetStatus) Size() (n int) { n += 1 + l + sovGenerated(uint64(l)) } } + if len(m.Resources) > 0 { + for _, e := range m.Resources { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } return n } @@ -15030,6 +15188,21 @@ func (m *ApplicationSetTerminalGenerator) Size() (n int) { return n } +func (m *ApplicationSetTree) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Nodes) > 0 { + for _, e := range m.Nodes { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + func (m *ApplicationSource) Size() (n int) { if m == nil { return 0 @@ -15199,6 +15372,13 @@ func (m *ApplicationSourceKustomize) Size() (n int) { n += 1 + l + sovGenerated(uint64(l)) } } + if len(m.Components) > 0 { + for _, s := range m.Components { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + n += 2 return n } @@ -17260,6 +17440,8 @@ func (m *RevisionHistory) Size() (n int) { n += 1 + l + sovGenerated(uint64(l)) } } + l = m.InitiatedBy.Size() + n += 1 + l + sovGenerated(uint64(l)) return n } @@ -17589,6 +17771,7 @@ func (m *SyncOperation) Size() (n int) { n += 1 + l + sovGenerated(uint64(l)) } } + n += 1 + sovGenerated(uint64(m.SelfHealAttemptsCount)) return n } @@ -17828,6 +18011,7 @@ func (this *AWSAuthConfig) String() string { s := strings.Join([]string{`&AWSAuthConfig{`, `ClusterName:` + fmt.Sprintf("%v", this.ClusterName) + `,`, `RoleARN:` + fmt.Sprintf("%v", this.RoleARN) + `,`, + `Profile:` + fmt.Sprintf("%v", this.Profile) + `,`, `}`, }, "") return s @@ -18040,6 +18224,7 @@ func (this *ApplicationSetApplicationStatus) String() string { `Message:` + fmt.Sprintf("%v", this.Message) + `,`, `Status:` + fmt.Sprintf("%v", this.Status) + `,`, `Step:` + fmt.Sprintf("%v", this.Step) + `,`, + `TargetRevisions:` + fmt.Sprintf("%v", this.TargetRevisions) + `,`, `}`, }, "") return s @@ -18179,6 +18364,7 @@ func (this *ApplicationSetSpec) String() string { `GoTemplateOptions:` + fmt.Sprintf("%v", this.GoTemplateOptions) + `,`, `ApplyNestedSelectors:` + fmt.Sprintf("%v", this.ApplyNestedSelectors) + `,`, `IgnoreApplicationDifferences:` + repeatedStringForIgnoreApplicationDifferences + `,`, + `TemplatePatch:` + valueToStringGenerated(this.TemplatePatch) + `,`, `}`, }, "") return s @@ -18197,9 +18383,15 @@ func (this *ApplicationSetStatus) String() string { repeatedStringForApplicationStatus += strings.Replace(strings.Replace(f.String(), "ApplicationSetApplicationStatus", "ApplicationSetApplicationStatus", 1), `&`, ``, 1) + "," } repeatedStringForApplicationStatus += "}" + repeatedStringForResources := "[]ResourceStatus{" + for _, f := range this.Resources { + repeatedStringForResources += strings.Replace(strings.Replace(f.String(), "ResourceStatus", "ResourceStatus", 1), `&`, ``, 1) + "," + } + repeatedStringForResources += "}" s := strings.Join([]string{`&ApplicationSetStatus{`, `Conditions:` + repeatedStringForConditions + `,`, `ApplicationStatus:` + repeatedStringForApplicationStatus + `,`, + `Resources:` + repeatedStringForResources + `,`, `}`, }, "") return s @@ -18288,6 +18480,21 @@ func (this *ApplicationSetTerminalGenerator) String() string { }, "") return s } +func (this *ApplicationSetTree) String() string { + if this == nil { + return "nil" + } + repeatedStringForNodes := "[]ResourceNode{" + for _, f := range this.Nodes { + repeatedStringForNodes += strings.Replace(strings.Replace(f.String(), "ResourceNode", "ResourceNode", 1), `&`, ``, 1) + "," + } + repeatedStringForNodes += "}" + s := strings.Join([]string{`&ApplicationSetTree{`, + `Nodes:` + repeatedStringForNodes + `,`, + `}`, + }, "") + return s +} func (this *ApplicationSource) String() string { if this == nil { return "nil" @@ -18417,6 +18624,8 @@ func (this *ApplicationSourceKustomize) String() string { `CommonAnnotationsEnvsubst:` + fmt.Sprintf("%v", this.CommonAnnotationsEnvsubst) + `,`, `Replicas:` + repeatedStringForReplicas + `,`, `Patches:` + repeatedStringForPatches + `,`, + `Components:` + fmt.Sprintf("%v", this.Components) + `,`, + `LabelWithoutSelector:` + fmt.Sprintf("%v", this.LabelWithoutSelector) + `,`, `}`, }, "") return s @@ -20013,6 +20222,7 @@ func (this *RevisionHistory) String() string { `DeployStartedAt:` + strings.Replace(fmt.Sprintf("%v", this.DeployStartedAt), "Time", "v1.Time", 1) + `,`, `Sources:` + repeatedStringForSources + `,`, `Revisions:` + fmt.Sprintf("%v", this.Revisions) + `,`, + `InitiatedBy:` + strings.Replace(strings.Replace(this.InitiatedBy.String(), "OperationInitiator", "OperationInitiator", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -20230,6 +20440,7 @@ func (this *SyncOperation) String() string { `SyncOptions:` + fmt.Sprintf("%v", this.SyncOptions) + `,`, `Sources:` + repeatedStringForSources + `,`, `Revisions:` + fmt.Sprintf("%v", this.Revisions) + `,`, + `SelfHealAttemptsCount:` + fmt.Sprintf("%v", this.SelfHealAttemptsCount) + `,`, `}`, }, "") return s @@ -20484,6 +20695,38 @@ func (m *AWSAuthConfig) Unmarshal(dAtA []byte) error { } m.RoleARN = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Profile", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Profile = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -22624,6 +22867,38 @@ func (m *ApplicationSetApplicationStatus) Unmarshal(dAtA []byte) error { } m.Step = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TargetRevisions", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TargetRevisions = append(m.TargetRevisions, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -24456,6 +24731,39 @@ func (m *ApplicationSetSpec) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TemplatePatch", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.TemplatePatch = &s + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -24574,6 +24882,40 @@ func (m *ApplicationSetStatus) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resources", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Resources = append(m.Resources, ResourceStatus{}) + if err := m.Resources[len(m.Resources)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -25670,6 +26012,90 @@ func (m *ApplicationSetTerminalGenerator) Unmarshal(dAtA []byte) error { } return nil } +func (m *ApplicationSetTree) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ApplicationSetTree: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ApplicationSetTree: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Nodes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Nodes = append(m.Nodes, ResourceNode{}) + if err := m.Nodes[len(m.Nodes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *ApplicationSource) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 @@ -27254,6 +27680,58 @@ func (m *ApplicationSourceKustomize) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 13: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Components", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Components = append(m.Components, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 14: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field LabelWithoutSelector", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.LabelWithoutSelector = bool(v != 0) default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -45792,6 +46270,39 @@ func (m *RevisionHistory) Unmarshal(dAtA []byte) error { } m.Revisions = append(m.Revisions, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field InitiatedBy", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.InitiatedBy.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -48683,6 +49194,25 @@ func (m *SyncOperation) Unmarshal(dAtA []byte) error { } m.Revisions = append(m.Revisions, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex + case 12: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field SelfHealAttemptsCount", wireType) + } + m.SelfHealAttemptsCount = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.SelfHealAttemptsCount |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) diff --git a/vendor/github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1/generated.proto b/vendor/github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1/generated.proto index ec6363dbd0..3b68e8eb1f 100644 --- a/vendor/github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1/generated.proto +++ b/vendor/github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1/generated.proto @@ -22,6 +22,9 @@ message AWSAuthConfig { // RoleARN contains optional role ARN. If set then AWS IAM Authenticator assume a role to perform cluster operations instead of the default AWS credential provider chain. optional string roleARN = 2; + + // Profile contains optional role ARN. If set then AWS IAM Authenticator uses the profile to perform cluster operations instead of the default AWS credential provider chain. + optional string profile = 3; } // AppProject provides a logical grouping of applications, providing controls for: @@ -106,6 +109,7 @@ message AppProjectStatus { // +kubebuilder:printcolumn:name="Sync Status",type=string,JSONPath=`.status.sync.status` // +kubebuilder:printcolumn:name="Health Status",type=string,JSONPath=`.status.health.status` // +kubebuilder:printcolumn:name="Revision",type=string,JSONPath=`.status.sync.revision`,priority=10 +// +kubebuilder:printcolumn:name="Project",type=string,JSONPath=`.spec.project`,priority=10 message Application { optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; @@ -193,9 +197,12 @@ message ApplicationSetApplicationStatus { // Step tracks which step this Application should be updated in optional string step = 5; + + // TargetRevision tracks the desired revisions the Application should be synced to. + repeated string targetrevisions = 6; } -// ApplicationSetCondition contains details about an applicationset condition, which is usally an error or warning +// ApplicationSetCondition contains details about an applicationset condition, which is usually an error or warning message ApplicationSetCondition { // Type is an applicationset condition type optional string type = 1; @@ -316,6 +323,8 @@ message ApplicationSetSpec { optional bool applyNestedSelectors = 8; repeated ApplicationSetResourceIgnoreDifferences ignoreApplicationDifferences = 9; + + optional string templatePatch = 10; } // ApplicationSetStatus defines the observed state of ApplicationSet @@ -325,6 +334,9 @@ message ApplicationSetStatus { repeated ApplicationSetCondition conditions = 1; repeated ApplicationSetApplicationStatus applicationStatus = 2; + + // Resources is a list of Applications resources managed by this application set. + repeated ResourceStatus resources = 3; } // ApplicationSetStrategy configures how generated Applications are updated in sequence. @@ -390,6 +402,13 @@ message ApplicationSetTerminalGenerator { optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 8; } +// ApplicationSetTree holds nodes which belongs to the application +// Used to build a tree of an ApplicationSet and its children +message ApplicationSetTree { + // Nodes contains list of nodes which are directly managed by the applicationset + repeated ResourceNode nodes = 1; +} + // ApplicationSource contains all required information about the source of an application message ApplicationSource { // RepoURL is the URL to the repository (Git or Helm) that contains the application manifests @@ -521,6 +540,12 @@ message ApplicationSourceKustomize { // Patches is a list of Kustomize patches repeated KustomizePatch patches = 12; + + // Components specifies a list of kustomize components to add to the kustomization before building + repeated string components = 13; + + // LabelWithoutSelector specifies whether to apply common labels to resource selectors or not + optional bool labelWithoutSelector = 14; } // ApplicationSourcePlugin holds options specific to config management plugins @@ -704,11 +729,11 @@ message Cluster { // Config holds cluster information for connecting to a cluster optional ClusterConfig config = 3; - // DEPRECATED: use Info.ConnectionState field instead. + // Deprecated: use Info.ConnectionState field instead. // ConnectionState contains information about cluster connection state optional ConnectionState connectionState = 4; - // DEPRECATED: use Info.ServerVersion field instead. + // Deprecated: use Info.ServerVersion field instead. // The server version optional string serverVersion = 5; @@ -1488,7 +1513,7 @@ message RefTarget { // RepoCreds holds the definition for repository credentials message RepoCreds { - // URL is the URL that this credentials matches to + // URL is the URL to which these credentials match optional string url = 1; // Username for authenticating at the repo server @@ -1601,7 +1626,7 @@ message Repository { // Proxy specifies the HTTP/HTTPS proxy used to access the repo optional string proxy = 19; - // Reference between project and repository that allow you automatically to be added as item inside SourceRepos project entity + // Reference between project and repository that allows it to be automatically added as an item inside SourceRepos project entity optional string project = 20; // GCPServiceAccountKey specifies the service account key in JSON format to be used for getting credentials to Google Cloud Source repos @@ -1894,6 +1919,9 @@ message RevisionHistory { // Revisions holds the revision of each source in sources field the sync was performed against repeated string revisions = 9; + + // InitiatedBy contains information about who initiated the operations + optional OperationInitiator initiatedBy = 10; } // RevisionMetadata contains metadata for a specific revision in a Git repository @@ -2146,6 +2174,9 @@ message SyncOperation { // Revisions is the list of revision (Git) or chart version (Helm) which to sync each source in sources field for the application to // If omitted, will use the revision specified in app spec. repeated string revisions = 11; + + // SelfHealAttemptsCount contains the number of auto-heal attempts + optional int64 autoHealAttemptsCount = 12; } // SyncOperationResource contains resources to sync. @@ -2213,7 +2244,6 @@ message SyncStatus { optional string status = 1; // ComparedTo contains information about what has been compared - // +patchStrategy=replace optional ComparedTo comparedTo = 2; // Revision contains information about the revision the comparison has been performed to diff --git a/vendor/github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1/openapi_generated.go b/vendor/github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1/openapi_generated.go index faaec52bbb..8d51447d2b 100644 --- a/vendor/github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1/openapi_generated.go +++ b/vendor/github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1/openapi_generated.go @@ -41,6 +41,7 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.ApplicationSetTemplate": schema_pkg_apis_application_v1alpha1_ApplicationSetTemplate(ref), "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.ApplicationSetTemplateMeta": schema_pkg_apis_application_v1alpha1_ApplicationSetTemplateMeta(ref), "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.ApplicationSetTerminalGenerator": schema_pkg_apis_application_v1alpha1_ApplicationSetTerminalGenerator(ref), + "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.ApplicationSetTree": schema_pkg_apis_application_v1alpha1_ApplicationSetTree(ref), "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.ApplicationSource": schema_pkg_apis_application_v1alpha1_ApplicationSource(ref), "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.ApplicationSourceDirectory": schema_pkg_apis_application_v1alpha1_ApplicationSourceDirectory(ref), "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.ApplicationSourceHelm": schema_pkg_apis_application_v1alpha1_ApplicationSourceHelm(ref), @@ -192,6 +193,13 @@ func schema_pkg_apis_application_v1alpha1_AWSAuthConfig(ref common.ReferenceCall Format: "", }, }, + "profile": { + SchemaProps: spec.SchemaProps{ + Description: "Profile contains optional role ARN. If set then AWS IAM Authenticator uses the profile to perform cluster operations instead of the default AWS credential provider chain.", + Type: []string{"string"}, + Format: "", + }, + }, }, }, }, @@ -846,8 +854,23 @@ func schema_pkg_apis_application_v1alpha1_ApplicationSetApplicationStatus(ref co Format: "", }, }, + "targetRevisions": { + SchemaProps: spec.SchemaProps{ + Description: "TargetRevision tracks the desired revisions the Application should be synced to.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, }, - Required: []string{"application", "message", "status", "step"}, + Required: []string{"application", "message", "status", "step", "targetRevisions"}, }, }, Dependencies: []string{ @@ -859,7 +882,7 @@ func schema_pkg_apis_application_v1alpha1_ApplicationSetCondition(ref common.Ref return common.OpenAPIDefinition{ Schema: spec.Schema{ SchemaProps: spec.SchemaProps{ - Description: "ApplicationSetCondition contains details about an applicationset condition, which is usally an error or warning", + Description: "ApplicationSetCondition contains details about an applicationset condition, which is usually an error or warning", Type: []string{"object"}, Properties: map[string]spec.Schema{ "type": { @@ -1282,6 +1305,12 @@ func schema_pkg_apis_application_v1alpha1_ApplicationSetSpec(ref common.Referenc }, }, }, + "templatePatch": { + SchemaProps: spec.SchemaProps{ + Type: []string{"string"}, + Format: "", + }, + }, }, Required: []string{"generators", "template"}, }, @@ -1325,11 +1354,25 @@ func schema_pkg_apis_application_v1alpha1_ApplicationSetStatus(ref common.Refere }, }, }, + "resources": { + SchemaProps: spec.SchemaProps{ + Description: "Resources is a list of Applications resources managed by this application set.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.ResourceStatus"), + }, + }, + }, + }, + }, }, }, }, Dependencies: []string{ - "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.ApplicationSetApplicationStatus", "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.ApplicationSetCondition"}, + "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.ApplicationSetApplicationStatus", "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.ApplicationSetCondition", "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.ResourceStatus"}, } } @@ -1539,6 +1582,35 @@ func schema_pkg_apis_application_v1alpha1_ApplicationSetTerminalGenerator(ref co } } +func schema_pkg_apis_application_v1alpha1_ApplicationSetTree(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "ApplicationSetTree holds nodes which belongs to the application Used to build a tree of an ApplicationSet and its children", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "nodes": { + SchemaProps: spec.SchemaProps{ + Description: "Nodes contains list of nodes which are directly managed by the applicationset", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.ResourceNode"), + }, + }, + }, + }, + }, + }, + }, + }, + Dependencies: []string{ + "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.ResourceNode"}, + } +} + func schema_pkg_apis_application_v1alpha1_ApplicationSource(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{ @@ -1958,6 +2030,28 @@ func schema_pkg_apis_application_v1alpha1_ApplicationSourceKustomize(ref common. }, }, }, + "components": { + SchemaProps: spec.SchemaProps{ + Description: "Components specifies a list of kustomize components to add to the kustomization before building", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + "labelWithoutSelector": { + SchemaProps: spec.SchemaProps{ + Description: "LabelWithoutSelector specifies whether to apply common labels to resource selectors or not", + Type: []string{"boolean"}, + Format: "", + }, + }, }, }, }, @@ -2551,14 +2645,14 @@ func schema_pkg_apis_application_v1alpha1_Cluster(ref common.ReferenceCallback) }, "connectionState": { SchemaProps: spec.SchemaProps{ - Description: "DEPRECATED: use Info.ConnectionState field instead. ConnectionState contains information about cluster connection state", + Description: "Deprecated: use Info.ConnectionState field instead. ConnectionState contains information about cluster connection state", Default: map[string]interface{}{}, Ref: ref("github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.ConnectionState"), }, }, "serverVersion": { SchemaProps: spec.SchemaProps{ - Description: "DEPRECATED: use Info.ServerVersion field instead. The server version", + Description: "Deprecated: use Info.ServerVersion field instead. The server version", Type: []string{"string"}, Format: "", }, @@ -4002,7 +4096,6 @@ func schema_pkg_apis_application_v1alpha1_KustomizeReplica(ref common.ReferenceC "count": { SchemaProps: spec.SchemaProps{ Description: "Number of replicas", - Default: map[string]interface{}{}, Ref: ref("k8s.io/apimachinery/pkg/util/intstr.IntOrString"), }, }, @@ -4124,8 +4217,7 @@ func schema_pkg_apis_application_v1alpha1_ListGenerator(ref common.ReferenceCall Items: &spec.SchemaOrArray{ Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1.JSON"), + Ref: ref("k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1.JSON"), }, }, }, @@ -4463,7 +4555,6 @@ func schema_pkg_apis_application_v1alpha1_OperationState(ref common.ReferenceCal "startedAt": { SchemaProps: spec.SchemaProps{ Description: "StartedAt contains time of operation start", - Default: map[string]interface{}{}, Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), }, }, @@ -4762,8 +4853,7 @@ func schema_pkg_apis_application_v1alpha1_PluginInput(ref common.ReferenceCallba Allows: true, Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1.JSON"), + Ref: ref("k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1.JSON"), }, }, }, @@ -5346,7 +5436,7 @@ func schema_pkg_apis_application_v1alpha1_RepoCreds(ref common.ReferenceCallback Properties: map[string]spec.Schema{ "url": { SchemaProps: spec.SchemaProps{ - Description: "URL is the URL that this credentials matches to", + Description: "URL is the URL to which these credentials match", Default: "", Type: []string{"string"}, Format: "", @@ -5635,7 +5725,7 @@ func schema_pkg_apis_application_v1alpha1_Repository(ref common.ReferenceCallbac }, "project": { SchemaProps: spec.SchemaProps{ - Description: "Reference between project and repository that allow you automatically to be added as item inside SourceRepos project entity", + Description: "Reference between project and repository that allows it to be automatically added as an item inside SourceRepos project entity", Type: []string{"string"}, Format: "", }, @@ -6622,7 +6712,6 @@ func schema_pkg_apis_application_v1alpha1_RevisionHistory(ref common.ReferenceCa "deployedAt": { SchemaProps: spec.SchemaProps{ Description: "DeployedAt holds the time the sync operation completed", - Default: map[string]interface{}{}, Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), }, }, @@ -6676,12 +6765,19 @@ func schema_pkg_apis_application_v1alpha1_RevisionHistory(ref common.ReferenceCa }, }, }, + "initiatedBy": { + SchemaProps: spec.SchemaProps{ + Description: "InitiatedBy contains information about who initiated the operations", + Default: map[string]interface{}{}, + Ref: ref("github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.OperationInitiator"), + }, + }, }, Required: []string{"deployedAt", "id"}, }, }, Dependencies: []string{ - "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.ApplicationSource", "k8s.io/apimachinery/pkg/apis/meta/v1.Time"}, + "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.ApplicationSource", "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.OperationInitiator", "k8s.io/apimachinery/pkg/apis/meta/v1.Time"}, } } @@ -6702,7 +6798,6 @@ func schema_pkg_apis_application_v1alpha1_RevisionMetadata(ref common.ReferenceC "date": { SchemaProps: spec.SchemaProps{ Description: "Date specifies when the revision was authored", - Default: map[string]interface{}{}, Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), }, }, @@ -7663,11 +7758,6 @@ func schema_pkg_apis_application_v1alpha1_SyncStatus(ref common.ReferenceCallbac }, }, "comparedTo": { - VendorExtensible: spec.VendorExtensible{ - Extensions: spec.Extensions{ - "x-kubernetes-patch-strategy": "replace", - }, - }, SchemaProps: spec.SchemaProps{ Description: "ComparedTo contains information about what has been compared", Default: map[string]interface{}{}, diff --git a/vendor/github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1/repository_types.go b/vendor/github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1/repository_types.go index 31e8c47971..4cdfe3f9f8 100644 --- a/vendor/github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1/repository_types.go +++ b/vendor/github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1/repository_types.go @@ -3,6 +3,7 @@ package v1alpha1 import ( "fmt" "net/url" + "strings" "github.com/argoproj/argo-cd/v2/util/cert" "github.com/argoproj/argo-cd/v2/util/git" @@ -14,7 +15,7 @@ import ( // RepoCreds holds the definition for repository credentials type RepoCreds struct { - // URL is the URL that this credentials matches to + // URL is the URL to which these credentials match URL string `json:"url" protobuf:"bytes,1,opt,name=url"` // Username for authenticating at the repo server Username string `json:"username,omitempty" protobuf:"bytes,2,opt,name=username"` @@ -87,7 +88,7 @@ type Repository struct { GitHubAppEnterpriseBaseURL string `json:"githubAppEnterpriseBaseUrl,omitempty" protobuf:"bytes,18,opt,name=githubAppEnterpriseBaseUrl"` // Proxy specifies the HTTP/HTTPS proxy used to access the repo Proxy string `json:"proxy,omitempty" protobuf:"bytes,19,opt,name=proxy"` - // Reference between project and repository that allow you automatically to be added as item inside SourceRepos project entity + // Reference between project and repository that allows it to be automatically added as an item inside SourceRepos project entity Project string `json:"project,omitempty" protobuf:"bytes,20,opt,name=project"` // GCPServiceAccountKey specifies the service account key in JSON format to be used for getting credentials to Google Cloud Source repos GCPServiceAccountKey string `json:"gcpServiceAccountKey,omitempty" protobuf:"bytes,21,opt,name=gcpServiceAccountKey"` @@ -196,7 +197,7 @@ func (repo *Repository) GetGitCreds(store git.CredsStore) git.Creds { return git.NewHTTPSCreds(repo.Username, repo.Password, repo.TLSClientCertData, repo.TLSClientCertKey, repo.IsInsecure(), repo.Proxy, store, repo.ForceHttpBasicAuth) } if repo.SSHPrivateKey != "" { - return git.NewSSHCreds(repo.SSHPrivateKey, getCAPath(repo.Repo), repo.IsInsecure(), store) + return git.NewSSHCreds(repo.SSHPrivateKey, getCAPath(repo.Repo), repo.IsInsecure(), store, repo.Proxy) } if repo.GithubAppPrivateKey != "" && repo.GithubAppId != 0 && repo.GithubAppInstallationId != 0 { return git.NewGitHubAppCreds(repo.GithubAppId, repo.GithubAppInstallationId, repo.GithubAppPrivateKey, repo.GitHubAppEnterpriseBaseURL, repo.Repo, repo.TLSClientCertData, repo.TLSClientCertKey, repo.IsInsecure(), repo.Proxy, store) @@ -227,21 +228,22 @@ func getCAPath(repoURL string) string { } hostname := "" - // url.Parse() will happily parse most things thrown at it. When the URL - // is either https or oci, we use the parsed hostname to retrieve the cert, - // otherwise we'll use the parsed path (OCI repos are often specified as - // hostname, without protocol). - parsedURL, err := url.Parse(repoURL) + var parsedURL *url.URL + var err error + // Without schema in url, url.Parse() treats the url as differently + // and may incorrectly parses the hostname if url contains a path or port. + // To ensure proper parsing, prepend a dummy schema. + if !strings.Contains(repoURL, "://") { + parsedURL, err = url.Parse("protocol://" + repoURL) + } else { + parsedURL, err = url.Parse(repoURL) + } if err != nil { log.Warnf("Could not parse repo URL '%s': %v", repoURL, err) return "" } - if parsedURL.Scheme == "https" || parsedURL.Scheme == "oci" { - hostname = parsedURL.Host - } else if parsedURL.Scheme == "" { - hostname = parsedURL.Path - } + hostname = parsedURL.Hostname() if hostname == "" { log.Warnf("Could not get hostname for repository '%s'", repoURL) return "" diff --git a/vendor/github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1/types.go b/vendor/github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1/types.go index 614cca979a..a5d52f3d3f 100644 --- a/vendor/github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1/types.go +++ b/vendor/github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1/types.go @@ -35,11 +35,11 @@ import ( "k8s.io/client-go/tools/clientcmd/api" "sigs.k8s.io/yaml" - "github.com/argoproj/argo-cd/v2/util/env" - "github.com/argoproj/argo-cd/v2/common" "github.com/argoproj/argo-cd/v2/util/collections" + "github.com/argoproj/argo-cd/v2/util/env" "github.com/argoproj/argo-cd/v2/util/helm" + utilhttp "github.com/argoproj/argo-cd/v2/util/http" "github.com/argoproj/argo-cd/v2/util/security" ) @@ -51,6 +51,7 @@ import ( // +kubebuilder:printcolumn:name="Sync Status",type=string,JSONPath=`.status.sync.status` // +kubebuilder:printcolumn:name="Health Status",type=string,JSONPath=`.status.health.status` // +kubebuilder:printcolumn:name="Revision",type=string,JSONPath=`.status.sync.revision`,priority=10 +// +kubebuilder:printcolumn:name="Project",type=string,JSONPath=`.spec.project`,priority=10 type Application struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata" protobuf:"bytes,1,opt,name=metadata"` @@ -205,6 +206,11 @@ func (s ApplicationSources) Equals(other ApplicationSources) bool { return true } +// IsZero returns true if the application source is considered empty +func (a ApplicationSources) IsZero() bool { + return len(a) == 0 +} + func (a *ApplicationSpec) GetSource() ApplicationSource { // if Application has multiple sources, return the first source in sources if a.HasMultipleSources() { @@ -230,9 +236,17 @@ func (a *ApplicationSpec) HasMultipleSources() bool { return a.Sources != nil && len(a.Sources) > 0 } -func (a *ApplicationSpec) GetSourcePtr() *ApplicationSource { +func (a *ApplicationSpec) GetSourcePtrByPosition(sourcePosition int) *ApplicationSource { + // if Application has multiple sources, return the first source in sources + return a.GetSourcePtrByIndex(sourcePosition - 1) +} + +func (a *ApplicationSpec) GetSourcePtrByIndex(sourceIndex int) *ApplicationSource { // if Application has multiple sources, return the first source in sources if a.HasMultipleSources() { + if sourceIndex > 0 { + return &a.Sources[sourceIndex] + } return &a.Sources[0] } return a.Source @@ -248,6 +262,11 @@ func (a *ApplicationSource) AllowsConcurrentProcessing() bool { return true } +// IsRef returns true when the application source is of type Ref +func (a *ApplicationSource) IsRef() bool { + return a.Ref != "" +} + // IsHelm returns true when the application source is of type Helm func (a *ApplicationSource) IsHelm() bool { return a.Chart != "" @@ -467,6 +486,10 @@ type ApplicationSourceKustomize struct { Replicas KustomizeReplicas `json:"replicas,omitempty" protobuf:"bytes,11,opt,name=replicas"` // Patches is a list of Kustomize patches Patches KustomizePatches `json:"patches,omitempty" protobuf:"bytes,12,opt,name=patches"` + // Components specifies a list of kustomize components to add to the kustomization before building + Components []string `json:"components,omitempty" protobuf:"bytes,13,rep,name=components"` + // LabelWithoutSelector specifies whether to apply common labels to resource selectors or not + LabelWithoutSelector bool `json:"labelWithoutSelector,omitempty" protobuf:"bytes,14,opt,name=labelWithoutSelector"` } type KustomizeReplica struct { @@ -556,7 +579,8 @@ func (k *ApplicationSourceKustomize) AllowsConcurrentProcessing() bool { k.NamePrefix == "" && k.Namespace == "" && k.NameSuffix == "" && - len(k.Patches) == 0 + len(k.Patches) == 0 && + len(k.Components) == 0 } // IsZero returns true when the Kustomize options are considered empty @@ -570,7 +594,8 @@ func (k *ApplicationSourceKustomize) IsZero() bool { len(k.Replicas) == 0 && len(k.CommonLabels) == 0 && len(k.CommonAnnotations) == 0 && - len(k.Patches) == 0 + len(k.Patches) == 0 && + len(k.Components) == 0 } // MergeImage merges a new Kustomize image identifier in to a list of images @@ -913,6 +938,12 @@ type ApplicationDestination struct { isServerInferred bool `json:"-"` } +// SetIsServerInferred sets the isServerInferred flag. This is used to allow comparison between two destinations where +// one server is inferred and the other is not. +func (d *ApplicationDestination) SetIsServerInferred(inferred bool) { + d.isServerInferred = inferred +} + type ResourceHealthLocation string var ( @@ -967,15 +998,15 @@ func (a *ApplicationStatus) GetRevisions() []string { // BuildComparedToStatus will build a ComparedTo object based on the current // Application state. -func (app *Application) BuildComparedToStatus() ComparedTo { +func (spec *ApplicationSpec) BuildComparedToStatus() ComparedTo { ct := ComparedTo{ - Destination: app.Spec.Destination, - IgnoreDifferences: app.Spec.IgnoreDifferences, + Destination: spec.Destination, + IgnoreDifferences: spec.IgnoreDifferences, } - if app.Spec.HasMultipleSources() { - ct.Sources = app.Spec.Sources + if spec.HasMultipleSources() { + ct.Sources = spec.Sources } else { - ct.Source = app.Spec.GetSource() + ct.Source = spec.GetSource() } return ct } @@ -1085,6 +1116,8 @@ type SyncOperation struct { // Revisions is the list of revision (Git) or chart version (Helm) which to sync each source in sources field for the application to // If omitted, will use the revision specified in app spec. Revisions []string `json:"revisions,omitempty" protobuf:"bytes,11,opt,name=revisions"` + // SelfHealAttemptsCount contains the number of auto-heal attempts + SelfHealAttemptsCount int64 `json:"autoHealAttemptsCount,omitempty" protobuf:"bytes,12,opt,name=autoHealAttemptsCount"` } // IsApplyStrategy returns true if the sync strategy is "apply" @@ -1213,7 +1246,6 @@ func (r *RetryStrategy) NextRetryAt(lastAttempt time.Time, retryCounts int64) (t if r.Backoff.Factor != nil { factor = *r.Backoff.Factor } - } // Formula: timeToWait = duration * factor^retry_number // Note that timeToWait should equal to duration for the first retry attempt. @@ -1397,6 +1429,8 @@ type RevisionHistory struct { Sources ApplicationSources `json:"sources,omitempty" protobuf:"bytes,8,opt,name=sources"` // Revisions holds the revision of each source in sources field the sync was performed against Revisions []string `json:"revisions,omitempty" protobuf:"bytes,9,opt,name=revisions"` + // InitiatedBy contains information about who initiated the operations + InitiatedBy OperationInitiator `json:"initiatedBy,omitempty" protobuf:"bytes,10,opt,name=initiatedBy"` } // ApplicationWatchEvent contains information about application change. @@ -1493,8 +1527,7 @@ type SyncStatus struct { // Status is the sync state of the comparison Status SyncStatusCode `json:"status" protobuf:"bytes,1,opt,name=status,casttype=SyncStatusCode"` // ComparedTo contains information about what has been compared - // +patchStrategy=replace - ComparedTo ComparedTo `json:"comparedTo,omitempty" protobuf:"bytes,2,opt,name=comparedTo" patchStrategy:"replace"` + ComparedTo ComparedTo `json:"comparedTo,omitempty" protobuf:"bytes,2,opt,name=comparedTo"` // Revision contains information about the revision the comparison has been performed to Revision string `json:"revision,omitempty" protobuf:"bytes,3,opt,name=revision"` // Revisions contains information about the revisions of multiple sources the comparison has been performed to @@ -1678,7 +1711,7 @@ type ResourceStatus struct { SyncWave int64 `json:"syncWave,omitempty" protobuf:"bytes,10,opt,name=syncWave"` } -// GroupKindVersion returns the GVK schema type for given resource status +// GroupVersionKind returns the GVK schema type for given resource status func (r *ResourceStatus) GroupVersionKind() schema.GroupVersionKind { return schema.GroupVersionKind{Group: r.Group, Version: r.Version, Kind: r.Kind} } @@ -1744,10 +1777,10 @@ type Cluster struct { Name string `json:"name" protobuf:"bytes,2,opt,name=name"` // Config holds cluster information for connecting to a cluster Config ClusterConfig `json:"config" protobuf:"bytes,3,opt,name=config"` - // DEPRECATED: use Info.ConnectionState field instead. + // Deprecated: use Info.ConnectionState field instead. // ConnectionState contains information about cluster connection state ConnectionState ConnectionState `json:"connectionState,omitempty" protobuf:"bytes,4,opt,name=connectionState"` - // DEPRECATED: use Info.ServerVersion field instead. + // Deprecated: use Info.ServerVersion field instead. // The server version ServerVersion string `json:"serverVersion,omitempty" protobuf:"bytes,5,opt,name=serverVersion"` // Holds list of namespaces which are accessible in that cluster. Cluster level resources will be ignored if namespace list is not empty. @@ -1851,6 +1884,9 @@ type AWSAuthConfig struct { // RoleARN contains optional role ARN. If set then AWS IAM Authenticator assume a role to perform cluster operations instead of the default AWS credential provider chain. RoleARN string `json:"roleARN,omitempty" protobuf:"bytes,2,opt,name=roleARN"` + + // Profile contains optional role ARN. If set then AWS IAM Authenticator uses the profile to perform cluster operations instead of the default AWS credential provider chain. + Profile string `json:"profile,omitempty" protobuf:"bytes,3,opt,name=profile"` } // ExecProviderConfig is config used to call an external command to perform cluster authentication @@ -2044,6 +2080,8 @@ var validActions = map[string]bool{ var validActionPatterns = []*regexp.Regexp{ regexp.MustCompile("action/.*"), + regexp.MustCompile("update/.*"), + regexp.MustCompile("delete/.*"), } func isValidAction(action string) bool { @@ -2071,6 +2109,12 @@ func isValidResource(resource string) bool { return validResources[resource] } +func isValidObject(proj string, object string) bool { + // match against [/]/ + objectRegexp, err := regexp.Compile(fmt.Sprintf(`^%s(/[*\w-.]+)?/[*\w-.]+$`, regexp.QuoteMeta(proj))) + return objectRegexp.MatchString(object) && err == nil +} + func validatePolicy(proj string, role string, policy string) error { policyComponents := strings.Split(policy, ",") if len(policyComponents) != 6 || strings.Trim(policyComponents[0], " ") != "p" { @@ -2094,9 +2138,8 @@ func validatePolicy(proj string, role string, policy string) error { } // object object := strings.Trim(policyComponents[4], " ") - objectRegexp, err := regexp.Compile(fmt.Sprintf(`^%s/[*\w-.]+$`, regexp.QuoteMeta(proj))) - if err != nil || !objectRegexp.MatchString(object) { - return status.Errorf(codes.InvalidArgument, "invalid policy rule '%s': object must be of form '%s/*' or '%s/', not '%s'", policy, proj, proj, object) + if !isValidObject(proj, object) { + return status.Errorf(codes.InvalidArgument, "invalid policy rule '%s': object must be of form '%s/*', '%s[/]/' or '%s/', not '%s'", policy, proj, proj, proj, object) } // effect effect := strings.Trim(policyComponents[5], " ") @@ -2226,12 +2269,11 @@ func (s *SyncWindows) HasWindows() bool { } // Active returns a list of sync windows that are currently active -func (s *SyncWindows) Active() *SyncWindows { +func (s *SyncWindows) Active() (*SyncWindows, error) { return s.active(time.Now()) } -func (s *SyncWindows) active(currentTime time.Time) *SyncWindows { - +func (s *SyncWindows) active(currentTime time.Time) (*SyncWindows, error) { // If SyncWindows.Active() is called outside of a UTC locale, it should be // first converted to UTC before we scan through the SyncWindows. currentTime = currentTime.In(time.UTC) @@ -2240,8 +2282,14 @@ func (s *SyncWindows) active(currentTime time.Time) *SyncWindows { var active SyncWindows specParser := cron.NewParser(cron.Minute | cron.Hour | cron.Dom | cron.Month | cron.Dow) for _, w := range *s { - schedule, _ := specParser.Parse(w.Schedule) - duration, _ := time.ParseDuration(w.Duration) + schedule, sErr := specParser.Parse(w.Schedule) + if sErr != nil { + return nil, fmt.Errorf("cannot parse schedule '%s': %w", w.Schedule, sErr) + } + duration, dErr := time.ParseDuration(w.Duration) + if dErr != nil { + return nil, fmt.Errorf("cannot parse duration '%s': %w", w.Duration, dErr) + } // Offset the nextWindow time to consider the timeZone of the sync window timeZoneOffsetDuration := w.scheduleOffsetByTimeZone() @@ -2251,21 +2299,20 @@ func (s *SyncWindows) active(currentTime time.Time) *SyncWindows { } } if len(active) > 0 { - return &active + return &active, nil } } - return nil + return nil, nil } // InactiveAllows will iterate over the SyncWindows and return all inactive allow windows // for the current time. If the current time is in an inactive allow window, syncs will // be denied. -func (s *SyncWindows) InactiveAllows() *SyncWindows { +func (s *SyncWindows) InactiveAllows() (*SyncWindows, error) { return s.inactiveAllows(time.Now()) } -func (s *SyncWindows) inactiveAllows(currentTime time.Time) *SyncWindows { - +func (s *SyncWindows) inactiveAllows(currentTime time.Time) (*SyncWindows, error) { // If SyncWindows.InactiveAllows() is called outside of a UTC locale, it should be // first converted to UTC before we scan through the SyncWindows. currentTime = currentTime.In(time.UTC) @@ -2276,21 +2323,27 @@ func (s *SyncWindows) inactiveAllows(currentTime time.Time) *SyncWindows { for _, w := range *s { if w.Kind == "allow" { schedule, sErr := specParser.Parse(w.Schedule) + if sErr != nil { + return nil, fmt.Errorf("cannot parse schedule '%s': %w", w.Schedule, sErr) + } duration, dErr := time.ParseDuration(w.Duration) + if dErr != nil { + return nil, fmt.Errorf("cannot parse duration '%s': %w", w.Duration, dErr) + } // Offset the nextWindow time to consider the timeZone of the sync window timeZoneOffsetDuration := w.scheduleOffsetByTimeZone() nextWindow := schedule.Next(currentTime.Add(timeZoneOffsetDuration - duration)) - if !nextWindow.Before(currentTime.Add(timeZoneOffsetDuration)) && sErr == nil && dErr == nil { + if !nextWindow.Before(currentTime.Add(timeZoneOffsetDuration)) { inactive = append(inactive, w) } } } if len(inactive) > 0 { - return &inactive + return &inactive, nil } } - return nil + return nil, nil } func (w *SyncWindow) scheduleOffsetByTimeZone() time.Duration { @@ -2307,7 +2360,6 @@ func (w *SyncWindow) scheduleOffsetByTimeZone() time.Duration { func (s *AppProjectSpec) AddWindow(knd string, sch string, dur string, app []string, ns []string, cl []string, ms bool, timeZone string) error { if len(knd) == 0 || len(sch) == 0 || len(dur) == 0 { return fmt.Errorf("cannot create window: require kind, schedule, duration and one or more of applications, namespaces and clusters") - } window := &SyncWindow{ @@ -2336,7 +2388,6 @@ func (s *AppProjectSpec) AddWindow(knd string, sch string, dur string, app []str s.SyncWindows = append(s.SyncWindows, window) return nil - } // DeleteWindow deletes a sync window with the given id from the AppProject @@ -2396,36 +2447,42 @@ func (w *SyncWindows) Matches(app *Application) *SyncWindows { } // CanSync returns true if a sync window currently allows a sync. isManual indicates whether the sync has been triggered manually. -func (w *SyncWindows) CanSync(isManual bool) bool { +func (w *SyncWindows) CanSync(isManual bool) (bool, error) { if !w.HasWindows() { - return true + return true, nil } - active := w.Active() + active, err := w.Active() + if err != nil { + return false, fmt.Errorf("invalid sync windows: %w", err) + } hasActiveDeny, manualEnabled := active.hasDeny() if hasActiveDeny { if isManual && manualEnabled { - return true + return true, nil } else { - return false + return false, nil } } if active.hasAllow() { - return true + return true, nil } - inactiveAllows := w.InactiveAllows() + inactiveAllows, err := w.InactiveAllows() + if err != nil { + return false, fmt.Errorf("invalid sync windows: %w", err) + } if inactiveAllows.HasWindows() { if isManual && inactiveAllows.manualEnabled() { - return true + return true, nil } else { - return false + return false, nil } } - return true + return true, nil } // hasDeny will iterate over the SyncWindows and return if a deny window is found and if @@ -2442,10 +2499,8 @@ func (w *SyncWindows) hasDeny() (bool, bool) { if a.Kind == "deny" { if !denyFound { manualEnabled = a.ManualSync - } else { - if manualEnabled { - manualEnabled = a.ManualSync - } + } else if manualEnabled { + manualEnabled = a.ManualSync } denyFound = true } @@ -2482,30 +2537,34 @@ func (w *SyncWindows) manualEnabled() bool { } // Active returns true if the sync window is currently active -func (w SyncWindow) Active() bool { +func (w SyncWindow) Active() (bool, error) { return w.active(time.Now()) } -func (w SyncWindow) active(currentTime time.Time) bool { - +func (w SyncWindow) active(currentTime time.Time) (bool, error) { // If SyncWindow.Active() is called outside of a UTC locale, it should be // first converted to UTC before search currentTime = currentTime.UTC() specParser := cron.NewParser(cron.Minute | cron.Hour | cron.Dom | cron.Month | cron.Dow) - schedule, _ := specParser.Parse(w.Schedule) - duration, _ := time.ParseDuration(w.Duration) + schedule, sErr := specParser.Parse(w.Schedule) + if sErr != nil { + return false, fmt.Errorf("cannot parse schedule '%s': %w", w.Schedule, sErr) + } + duration, dErr := time.ParseDuration(w.Duration) + if dErr != nil { + return false, fmt.Errorf("cannot parse duration '%s': %w", w.Duration, dErr) + } // Offset the nextWindow time to consider the timeZone of the sync window timeZoneOffsetDuration := w.scheduleOffsetByTimeZone() nextWindow := schedule.Next(currentTime.Add(timeZoneOffsetDuration - duration)) - return nextWindow.Before(currentTime.Add(timeZoneOffsetDuration)) + return nextWindow.Before(currentTime.Add(timeZoneOffsetDuration)), nil } // Update updates a sync window's settings with the given parameter func (w *SyncWindow) Update(s string, d string, a []string, n []string, c []string, tz string) error { - if len(s) == 0 && len(d) == 0 && len(a) == 0 && len(n) == 0 && len(c) == 0 { return fmt.Errorf("cannot update: require one or more of schedule, duration, application, namespace, or cluster") } @@ -2536,7 +2595,6 @@ func (w *SyncWindow) Update(s string, d string, a []string, n []string, c []stri // Validate checks whether a sync window has valid configuration. The error returned indicates any problems that has been found. func (w *SyncWindow) Validate() error { - // Default timeZone to UTC if timeZone is not specified if w.TimeZone == "" { w.TimeZone = "UTC" @@ -2552,11 +2610,11 @@ func (w *SyncWindow) Validate() error { specParser := cron.NewParser(cron.Minute | cron.Hour | cron.Dom | cron.Month | cron.Dow) _, err := specParser.Parse(w.Schedule) if err != nil { - return fmt.Errorf("cannot parse schedule '%s': %s", w.Schedule, err) + return fmt.Errorf("cannot parse schedule '%s': %w", w.Schedule, err) } _, err = time.ParseDuration(w.Duration) if err != nil { - return fmt.Errorf("cannot parse duration '%s': %s", w.Duration, err) + return fmt.Errorf("cannot parse duration '%s': %w", w.Duration, err) } return nil } @@ -2648,6 +2706,18 @@ func (app *Application) IsRefreshRequested() (RefreshType, bool) { return refreshType, true } +func (app *Application) HasPostDeleteFinalizer(stage ...string) bool { + return getFinalizerIndex(app.ObjectMeta, strings.Join(append([]string{PostDeleteFinalizerName}, stage...), "/")) > -1 +} + +func (app *Application) SetPostDeleteFinalizer(stage ...string) { + setFinalizer(&app.ObjectMeta, strings.Join(append([]string{PostDeleteFinalizerName}, stage...), "/"), true) +} + +func (app *Application) UnSetPostDeleteFinalizer(stage ...string) { + setFinalizer(&app.ObjectMeta, strings.Join(append([]string{PostDeleteFinalizerName}, stage...), "/"), false) +} + // SetCascadedDeletion will enable cascaded deletion by setting the propagation policy finalizer func (app *Application) SetCascadedDeletion(finalizer string) { setFinalizer(&app.ObjectMeta, finalizer, true) @@ -2922,6 +2992,12 @@ func SetK8SConfigDefaults(config *rest.Config) error { config.Timeout = K8sServerSideTimeout config.Transport = tr + maxRetries := env.ParseInt64FromEnv(utilhttp.EnvRetryMax, 0, 1, math.MaxInt64) + if maxRetries > 0 { + backoffDurationMS := env.ParseInt64FromEnv(utilhttp.EnvRetryBaseBackoff, 100, 1, math.MaxInt64) + backoffDuration := time.Duration(backoffDurationMS) * time.Millisecond + config.WrapTransport = utilhttp.WithRetry(maxRetries, backoffDuration) + } return nil } @@ -2964,6 +3040,9 @@ func (c *Cluster) RawRestConfig() *rest.Config { if c.Config.AWSAuthConfig.RoleARN != "" { args = append(args, "--role-arn", c.Config.AWSAuthConfig.RoleARN) } + if c.Config.AWSAuthConfig.Profile != "" { + args = append(args, "--profile", c.Config.AWSAuthConfig.Profile) + } config = &rest.Config{ Host: c.Server, TLSClientConfig: tlsClientConfig, @@ -3050,7 +3129,6 @@ func (r ResourceDiff) TargetObject() (*unstructured.Unstructured, error) { // SetInferredServer sets the Server field of the destination. See IsServerInferred() for details. func (d *ApplicationDestination) SetInferredServer(server string) { - d.isServerInferred = true d.Server = server } diff --git a/vendor/github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1/values.go b/vendor/github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1/values.go index 942e2a651c..1c0d6b76de 100644 --- a/vendor/github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1/values.go +++ b/vendor/github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1/values.go @@ -19,11 +19,11 @@ func (h *ApplicationSourceHelm) SetValuesString(value string) error { } else { data, err := yaml.YAMLToJSON([]byte(value)) if err != nil { - return fmt.Errorf("failed converting yaml to json: %v", err) + return fmt.Errorf("failed converting yaml to json: %w", err) } var v interface{} if err := json.Unmarshal(data, &v); err != nil { - return fmt.Errorf("failed to unmarshal json: %v", err) + return fmt.Errorf("failed to unmarshal json: %w", err) } switch v.(type) { case string: diff --git a/vendor/github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1/zz_generated.deepcopy.go b/vendor/github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1/zz_generated.deepcopy.go index e7245069b9..a6de15dd7a 100644 --- a/vendor/github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1/zz_generated.deepcopy.go +++ b/vendor/github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1/zz_generated.deepcopy.go @@ -376,6 +376,11 @@ func (in *ApplicationSetApplicationStatus) DeepCopyInto(out *ApplicationSetAppli in, out := &in.LastTransitionTime, &out.LastTransitionTime *out = (*in).DeepCopy() } + if in.TargetRevisions != nil { + in, out := &in.TargetRevisions, &out.TargetRevisions + *out = make([]string, len(*in)) + copy(*out, *in) + } return } @@ -733,6 +738,11 @@ func (in *ApplicationSetSpec) DeepCopyInto(out *ApplicationSetSpec) { (*in)[i].DeepCopyInto(&(*out)[i]) } } + if in.TemplatePatch != nil { + in, out := &in.TemplatePatch, &out.TemplatePatch + *out = new(string) + **out = **in + } return } @@ -763,6 +773,13 @@ func (in *ApplicationSetStatus) DeepCopyInto(out *ApplicationSetStatus) { (*in)[i].DeepCopyInto(&(*out)[i]) } } + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = make([]ResourceStatus, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } return } @@ -949,6 +966,29 @@ func (in ApplicationSetTerminalGenerators) DeepCopy() ApplicationSetTerminalGene return *out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ApplicationSetTree) DeepCopyInto(out *ApplicationSetTree) { + *out = *in + if in.Nodes != nil { + in, out := &in.Nodes, &out.Nodes + *out = make([]ResourceNode, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ApplicationSetTree. +func (in *ApplicationSetTree) DeepCopy() *ApplicationSetTree { + if in == nil { + return nil + } + out := new(ApplicationSetTree) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ApplicationSource) DeepCopyInto(out *ApplicationSource) { *out = *in @@ -1103,6 +1143,11 @@ func (in *ApplicationSourceKustomize) DeepCopyInto(out *ApplicationSourceKustomi (*in)[i].DeepCopyInto(&(*out)[i]) } } + if in.Components != nil { + in, out := &in.Components, &out.Components + *out = make([]string, len(*in)) + copy(*out, *in) + } return } @@ -3695,6 +3740,7 @@ func (in *RevisionHistory) DeepCopyInto(out *RevisionHistory) { *out = make([]string, len(*in)) copy(*out, *in) } + out.InitiatedBy = in.InitiatedBy return } diff --git a/vendor/github.com/argoproj/argo-cd/v2/reposerver/apiclient/clientset.go b/vendor/github.com/argoproj/argo-cd/v2/reposerver/apiclient/clientset.go index 417dc758ef..23453a800a 100644 --- a/vendor/github.com/argoproj/argo-cd/v2/reposerver/apiclient/clientset.go +++ b/vendor/github.com/argoproj/argo-cd/v2/reposerver/apiclient/clientset.go @@ -4,8 +4,12 @@ import ( "crypto/tls" "crypto/x509" "fmt" + "math" "time" + "github.com/argoproj/argo-cd/v2/common" + "github.com/argoproj/argo-cd/v2/util/env" + grpc_middleware "github.com/grpc-ecosystem/go-grpc-middleware" grpc_retry "github.com/grpc-ecosystem/go-grpc-middleware/retry" log "github.com/sirupsen/logrus" @@ -17,12 +21,10 @@ import ( "github.com/argoproj/argo-cd/v2/util/io" ) -//go:generate go run github.com/vektra/mockery/v2@v2.15.0 --name=RepoServerServiceClient +//go:generate go run github.com/vektra/mockery/v2@v2.40.2 --name=RepoServerServiceClient -const ( - // MaxGRPCMessageSize contains max grpc message size - MaxGRPCMessageSize = 100 * 1024 * 1024 -) +// MaxGRPCMessageSize contains max grpc message size +var MaxGRPCMessageSize = env.ParseNumFromEnv(common.EnvGRPCMaxSizeMB, 100, 0, math.MaxInt32) * 1024 * 1024 // TLSConfiguration describes parameters for TLS configuration to be used by a repo server API client type TLSConfiguration struct { diff --git a/vendor/github.com/argoproj/argo-cd/v2/reposerver/apiclient/repository.pb.go b/vendor/github.com/argoproj/argo-cd/v2/reposerver/apiclient/repository.pb.go index 914a967db3..eea2d9a8d3 100644 --- a/vendor/github.com/argoproj/argo-cd/v2/reposerver/apiclient/repository.pb.go +++ b/vendor/github.com/argoproj/argo-cd/v2/reposerver/apiclient/repository.pb.go @@ -57,7 +57,9 @@ type ManifestRequest struct { // This is used to surface "source not permitted" errors for Helm repositories ProjectSourceRepos []string `protobuf:"bytes,24,rep,name=projectSourceRepos,proto3" json:"projectSourceRepos,omitempty"` // This is used to surface "source not permitted" errors for Helm repositories - ProjectName string `protobuf:"bytes,25,opt,name=projectName,proto3" json:"projectName,omitempty"` + ProjectName string `protobuf:"bytes,25,opt,name=projectName,proto3" json:"projectName,omitempty"` + // Holds instance installation id + InstallationID string `protobuf:"bytes,27,opt,name=installationID,proto3" json:"installationID,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -250,6 +252,13 @@ func (m *ManifestRequest) GetProjectName() string { return "" } +func (m *ManifestRequest) GetInstallationID() string { + if m != nil { + return m.InstallationID + } + return "" +} + type ManifestRequestWithFiles struct { // Types that are valid to be assigned to Part: // *ManifestRequestWithFiles_Request @@ -557,6 +566,7 @@ type ResolveRevisionRequest struct { Repo *v1alpha1.Repository `protobuf:"bytes,1,opt,name=repo,proto3" json:"repo,omitempty"` App *v1alpha1.Application `protobuf:"bytes,2,opt,name=app,proto3" json:"app,omitempty"` AmbiguousRevision string `protobuf:"bytes,3,opt,name=ambiguousRevision,proto3" json:"ambiguousRevision,omitempty"` + SourceIndex int64 `protobuf:"varint,4,opt,name=sourceIndex,proto3" json:"sourceIndex,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -616,6 +626,13 @@ func (m *ResolveRevisionRequest) GetAmbiguousRevision() string { return "" } +func (m *ResolveRevisionRequest) GetSourceIndex() int64 { + if m != nil { + return m.SourceIndex + } + return 0 +} + // ResolveRevisionResponse type ResolveRevisionResponse struct { // returns the resolved revision @@ -1911,6 +1928,7 @@ type GitFilesRequest struct { Path string `protobuf:"bytes,4,opt,name=path,proto3" json:"path,omitempty"` NewGitFileGlobbingEnabled bool `protobuf:"varint,5,opt,name=NewGitFileGlobbingEnabled,proto3" json:"NewGitFileGlobbingEnabled,omitempty"` NoRevisionCache bool `protobuf:"varint,6,opt,name=noRevisionCache,proto3" json:"noRevisionCache,omitempty"` + VerifyCommit bool `protobuf:"varint,7,opt,name=verifyCommit,proto3" json:"verifyCommit,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -1991,6 +2009,13 @@ func (m *GitFilesRequest) GetNoRevisionCache() bool { return false } +func (m *GitFilesRequest) GetVerifyCommit() bool { + if m != nil { + return m.VerifyCommit + } + return false +} + type GitFilesResponse struct { // Map consisting of path of the path to its contents in bytes Map map[string][]byte `protobuf:"bytes,1,rep,name=map,proto3" json:"map,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` @@ -2044,6 +2069,7 @@ type GitDirectoriesRequest struct { SubmoduleEnabled bool `protobuf:"varint,2,opt,name=submoduleEnabled,proto3" json:"submoduleEnabled,omitempty"` Revision string `protobuf:"bytes,3,opt,name=revision,proto3" json:"revision,omitempty"` NoRevisionCache bool `protobuf:"varint,4,opt,name=noRevisionCache,proto3" json:"noRevisionCache,omitempty"` + VerifyCommit bool `protobuf:"varint,5,opt,name=verifyCommit,proto3" json:"verifyCommit,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -2110,6 +2136,13 @@ func (m *GitDirectoriesRequest) GetNoRevisionCache() bool { return false } +func (m *GitDirectoriesRequest) GetVerifyCommit() bool { + if m != nil { + return m.VerifyCommit + } + return false +} + type GitDirectoriesResponse struct { // A set of directory paths Paths []string `protobuf:"bytes,1,rep,name=paths,proto3" json:"paths,omitempty"` @@ -2158,6 +2191,196 @@ func (m *GitDirectoriesResponse) GetPaths() []string { return nil } +type UpdateRevisionForPathsRequest struct { + Repo *v1alpha1.Repository `protobuf:"bytes,1,opt,name=repo,proto3" json:"repo,omitempty"` + AppLabelKey string `protobuf:"bytes,2,opt,name=appLabelKey,proto3" json:"appLabelKey,omitempty"` + AppName string `protobuf:"bytes,3,opt,name=appName,proto3" json:"appName,omitempty"` + Namespace string `protobuf:"bytes,4,opt,name=namespace,proto3" json:"namespace,omitempty"` + ApplicationSource *v1alpha1.ApplicationSource `protobuf:"bytes,5,opt,name=applicationSource,proto3" json:"applicationSource,omitempty"` + TrackingMethod string `protobuf:"bytes,6,opt,name=trackingMethod,proto3" json:"trackingMethod,omitempty"` + RefSources map[string]*v1alpha1.RefTarget `protobuf:"bytes,7,rep,name=refSources,proto3" json:"refSources,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + KubeVersion string `protobuf:"bytes,8,opt,name=kubeVersion,proto3" json:"kubeVersion,omitempty"` + ApiVersions []string `protobuf:"bytes,9,rep,name=apiVersions,proto3" json:"apiVersions,omitempty"` + HasMultipleSources bool `protobuf:"varint,10,opt,name=hasMultipleSources,proto3" json:"hasMultipleSources,omitempty"` + SyncedRevision string `protobuf:"bytes,11,opt,name=syncedRevision,proto3" json:"syncedRevision,omitempty"` + Revision string `protobuf:"bytes,12,opt,name=revision,proto3" json:"revision,omitempty"` + Paths []string `protobuf:"bytes,13,rep,name=paths,proto3" json:"paths,omitempty"` + InstallationID string `protobuf:"bytes,15,opt,name=installationID,proto3" json:"installationID,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *UpdateRevisionForPathsRequest) Reset() { *m = UpdateRevisionForPathsRequest{} } +func (m *UpdateRevisionForPathsRequest) String() string { return proto.CompactTextString(m) } +func (*UpdateRevisionForPathsRequest) ProtoMessage() {} +func (*UpdateRevisionForPathsRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_dd8723cfcc820480, []int{31} +} +func (m *UpdateRevisionForPathsRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *UpdateRevisionForPathsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_UpdateRevisionForPathsRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *UpdateRevisionForPathsRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_UpdateRevisionForPathsRequest.Merge(m, src) +} +func (m *UpdateRevisionForPathsRequest) XXX_Size() int { + return m.Size() +} +func (m *UpdateRevisionForPathsRequest) XXX_DiscardUnknown() { + xxx_messageInfo_UpdateRevisionForPathsRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_UpdateRevisionForPathsRequest proto.InternalMessageInfo + +func (m *UpdateRevisionForPathsRequest) GetRepo() *v1alpha1.Repository { + if m != nil { + return m.Repo + } + return nil +} + +func (m *UpdateRevisionForPathsRequest) GetAppLabelKey() string { + if m != nil { + return m.AppLabelKey + } + return "" +} + +func (m *UpdateRevisionForPathsRequest) GetAppName() string { + if m != nil { + return m.AppName + } + return "" +} + +func (m *UpdateRevisionForPathsRequest) GetNamespace() string { + if m != nil { + return m.Namespace + } + return "" +} + +func (m *UpdateRevisionForPathsRequest) GetApplicationSource() *v1alpha1.ApplicationSource { + if m != nil { + return m.ApplicationSource + } + return nil +} + +func (m *UpdateRevisionForPathsRequest) GetTrackingMethod() string { + if m != nil { + return m.TrackingMethod + } + return "" +} + +func (m *UpdateRevisionForPathsRequest) GetRefSources() map[string]*v1alpha1.RefTarget { + if m != nil { + return m.RefSources + } + return nil +} + +func (m *UpdateRevisionForPathsRequest) GetKubeVersion() string { + if m != nil { + return m.KubeVersion + } + return "" +} + +func (m *UpdateRevisionForPathsRequest) GetApiVersions() []string { + if m != nil { + return m.ApiVersions + } + return nil +} + +func (m *UpdateRevisionForPathsRequest) GetHasMultipleSources() bool { + if m != nil { + return m.HasMultipleSources + } + return false +} + +func (m *UpdateRevisionForPathsRequest) GetSyncedRevision() string { + if m != nil { + return m.SyncedRevision + } + return "" +} + +func (m *UpdateRevisionForPathsRequest) GetRevision() string { + if m != nil { + return m.Revision + } + return "" +} + +func (m *UpdateRevisionForPathsRequest) GetPaths() []string { + if m != nil { + return m.Paths + } + return nil +} + +func (m *UpdateRevisionForPathsRequest) GetInstallationID() string { + if m != nil { + return m.InstallationID + } + return "" +} + +type UpdateRevisionForPathsResponse struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *UpdateRevisionForPathsResponse) Reset() { *m = UpdateRevisionForPathsResponse{} } +func (m *UpdateRevisionForPathsResponse) String() string { return proto.CompactTextString(m) } +func (*UpdateRevisionForPathsResponse) ProtoMessage() {} +func (*UpdateRevisionForPathsResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_dd8723cfcc820480, []int{32} +} +func (m *UpdateRevisionForPathsResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *UpdateRevisionForPathsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_UpdateRevisionForPathsResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *UpdateRevisionForPathsResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_UpdateRevisionForPathsResponse.Merge(m, src) +} +func (m *UpdateRevisionForPathsResponse) XXX_Size() int { + return m.Size() +} +func (m *UpdateRevisionForPathsResponse) XXX_DiscardUnknown() { + xxx_messageInfo_UpdateRevisionForPathsResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_UpdateRevisionForPathsResponse proto.InternalMessageInfo + func init() { proto.RegisterType((*ManifestRequest)(nil), "repository.ManifestRequest") proto.RegisterMapType((map[string]bool)(nil), "repository.ManifestRequest.EnabledSourceTypesEntry") @@ -2198,6 +2421,9 @@ func init() { proto.RegisterMapType((map[string][]byte)(nil), "repository.GitFilesResponse.MapEntry") proto.RegisterType((*GitDirectoriesRequest)(nil), "repository.GitDirectoriesRequest") proto.RegisterType((*GitDirectoriesResponse)(nil), "repository.GitDirectoriesResponse") + proto.RegisterType((*UpdateRevisionForPathsRequest)(nil), "repository.UpdateRevisionForPathsRequest") + proto.RegisterMapType((map[string]*v1alpha1.RefTarget)(nil), "repository.UpdateRevisionForPathsRequest.RefSourcesEntry") + proto.RegisterType((*UpdateRevisionForPathsResponse)(nil), "repository.UpdateRevisionForPathsResponse") } func init() { @@ -2205,140 +2431,152 @@ func init() { } var fileDescriptor_dd8723cfcc820480 = []byte{ - // 2127 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xdc, 0x5a, 0x5b, 0x6f, 0x1b, 0xc7, - 0xf5, 0xe7, 0x92, 0x94, 0x44, 0x1e, 0xd9, 0x12, 0x35, 0xd6, 0x65, 0xc5, 0x38, 0x82, 0xb2, 0xff, - 0xbf, 0x0d, 0xd5, 0x4e, 0x48, 0x48, 0x46, 0xe2, 0xc2, 0x49, 0x53, 0x28, 0x8a, 0x2d, 0x39, 0xb6, - 0x6c, 0x75, 0xed, 0xb6, 0x48, 0xeb, 0xb6, 0x18, 0x2e, 0x87, 0xe4, 0x86, 0x7b, 0x19, 0xef, 0xce, - 0x2a, 0x90, 0x81, 0x3e, 0x14, 0x2d, 0xfa, 0x11, 0xfa, 0xd0, 0xaf, 0x51, 0x14, 0x7d, 0xec, 0x53, - 0x2f, 0x8f, 0x41, 0xbf, 0x40, 0x0b, 0xbf, 0x14, 0xe8, 0xa7, 0x28, 0xe6, 0xb2, 0x57, 0xae, 0x64, - 0xa7, 0x94, 0x15, 0xb4, 0x2f, 0xf6, 0xce, 0x99, 0x33, 0xe7, 0x9c, 0x39, 0x73, 0x2e, 0xbf, 0x19, - 0x0a, 0xae, 0x07, 0x84, 0xfa, 0x21, 0x09, 0x8e, 0x49, 0xd0, 0x15, 0x9f, 0x36, 0xf3, 0x83, 0x93, - 0xcc, 0x67, 0x87, 0x06, 0x3e, 0xf3, 0x11, 0xa4, 0x94, 0xf6, 0xc3, 0xa1, 0xcd, 0x46, 0x51, 0xaf, - 0x63, 0xf9, 0x6e, 0x17, 0x07, 0x43, 0x9f, 0x06, 0xfe, 0x17, 0xe2, 0xe3, 0x3d, 0xab, 0xdf, 0x3d, - 0xde, 0xe9, 0xd2, 0xf1, 0xb0, 0x8b, 0xa9, 0x1d, 0x76, 0x31, 0xa5, 0x8e, 0x6d, 0x61, 0x66, 0xfb, - 0x5e, 0xf7, 0x78, 0x1b, 0x3b, 0x74, 0x84, 0xb7, 0xbb, 0x43, 0xe2, 0x91, 0x00, 0x33, 0xd2, 0x97, - 0x92, 0xdb, 0x6f, 0x0d, 0x7d, 0x7f, 0xe8, 0x90, 0xae, 0x18, 0xf5, 0xa2, 0x41, 0x97, 0xb8, 0x94, - 0x29, 0xb5, 0xc6, 0xbf, 0x2e, 0xc1, 0xe2, 0x21, 0xf6, 0xec, 0x01, 0x09, 0x99, 0x49, 0x9e, 0x47, - 0x24, 0x64, 0xe8, 0x19, 0xd4, 0xb9, 0x31, 0xba, 0xb6, 0xa9, 0x6d, 0xcd, 0xef, 0x1c, 0x74, 0x52, - 0x6b, 0x3a, 0xb1, 0x35, 0xe2, 0xe3, 0x67, 0x56, 0xbf, 0x73, 0xbc, 0xd3, 0xa1, 0xe3, 0x61, 0x87, - 0x5b, 0xd3, 0xc9, 0x58, 0xd3, 0x89, 0xad, 0xe9, 0x98, 0xc9, 0xb6, 0x4c, 0x21, 0x15, 0xb5, 0xa1, - 0x11, 0x90, 0x63, 0x3b, 0xb4, 0x7d, 0x4f, 0xaf, 0x6e, 0x6a, 0x5b, 0x4d, 0x33, 0x19, 0x23, 0x1d, - 0xe6, 0x3c, 0x7f, 0x0f, 0x5b, 0x23, 0xa2, 0xd7, 0x36, 0xb5, 0xad, 0x86, 0x19, 0x0f, 0xd1, 0x26, - 0xcc, 0x63, 0x4a, 0x1f, 0xe2, 0x1e, 0x71, 0x1e, 0x90, 0x13, 0xbd, 0x2e, 0x16, 0x66, 0x49, 0x7c, - 0x2d, 0xa6, 0xf4, 0x11, 0x76, 0x89, 0x3e, 0x23, 0x66, 0xe3, 0x21, 0xba, 0x0a, 0x4d, 0x0f, 0xbb, - 0x24, 0xa4, 0xd8, 0x22, 0x7a, 0x43, 0xcc, 0xa5, 0x04, 0xf4, 0x73, 0x58, 0xca, 0x18, 0xfe, 0xc4, - 0x8f, 0x02, 0x8b, 0xe8, 0x20, 0xb6, 0xfe, 0x78, 0xba, 0xad, 0xef, 0x16, 0xc5, 0x9a, 0x93, 0x9a, - 0xd0, 0x4f, 0x61, 0x46, 0x9c, 0xbc, 0x3e, 0xbf, 0x59, 0x3b, 0x57, 0x6f, 0x4b, 0xb1, 0xc8, 0x83, - 0x39, 0xea, 0x44, 0x43, 0xdb, 0x0b, 0xf5, 0x4b, 0x42, 0xc3, 0xd3, 0xe9, 0x34, 0xec, 0xf9, 0xde, - 0xc0, 0x1e, 0x1e, 0x62, 0x0f, 0x0f, 0x89, 0x4b, 0x3c, 0x76, 0x24, 0x84, 0x9b, 0xb1, 0x12, 0xf4, - 0x02, 0x5a, 0xe3, 0x28, 0x64, 0xbe, 0x6b, 0xbf, 0x20, 0x8f, 0x29, 0x5f, 0x1b, 0xea, 0x97, 0x85, - 0x37, 0x1f, 0x4d, 0xa7, 0xf8, 0x41, 0x41, 0xaa, 0x39, 0xa1, 0x87, 0x07, 0xc9, 0x38, 0xea, 0x91, - 0x1f, 0x90, 0x40, 0x44, 0xd7, 0x82, 0x0c, 0x92, 0x0c, 0x49, 0x86, 0x91, 0xad, 0x46, 0xa1, 0xbe, - 0xb8, 0x59, 0x93, 0x61, 0x94, 0x90, 0xd0, 0x16, 0x2c, 0x1e, 0x93, 0xc0, 0x1e, 0x9c, 0x3c, 0xb1, - 0x87, 0x1e, 0x66, 0x51, 0x40, 0xf4, 0x96, 0x08, 0xc5, 0x22, 0x19, 0xb9, 0x70, 0x79, 0x44, 0x1c, - 0x97, 0xbb, 0x7c, 0x2f, 0x20, 0xfd, 0x50, 0x5f, 0x12, 0xfe, 0xdd, 0x9f, 0xfe, 0x04, 0x85, 0x38, - 0x33, 0x2f, 0x9d, 0x1b, 0xe6, 0xf9, 0xa6, 0xca, 0x14, 0x99, 0x23, 0x48, 0x1a, 0x56, 0x20, 0xa3, - 0xeb, 0xb0, 0xc0, 0x02, 0x6c, 0x8d, 0x6d, 0x6f, 0x78, 0x48, 0xd8, 0xc8, 0xef, 0xeb, 0x57, 0x84, - 0x27, 0x0a, 0x54, 0x64, 0x01, 0x22, 0x1e, 0xee, 0x39, 0xa4, 0x2f, 0x63, 0xf1, 0xe9, 0x09, 0x25, - 0xa1, 0xbe, 0x2c, 0x76, 0x71, 0xab, 0x93, 0xa9, 0x50, 0x85, 0x02, 0xd1, 0xb9, 0x3b, 0xb1, 0xea, - 0xae, 0xc7, 0x82, 0x13, 0xb3, 0x44, 0x1c, 0x1a, 0xc3, 0x3c, 0xdf, 0x47, 0x1c, 0x0a, 0x2b, 0x22, - 0x14, 0xee, 0x4f, 0xe7, 0xa3, 0x83, 0x54, 0xa0, 0x99, 0x95, 0x8e, 0x3a, 0x80, 0x46, 0x38, 0x3c, - 0x8c, 0x1c, 0x66, 0x53, 0x87, 0x48, 0x33, 0x42, 0x7d, 0x55, 0xb8, 0xa9, 0x64, 0x06, 0x3d, 0x00, - 0x08, 0xc8, 0x20, 0xe6, 0x5b, 0x13, 0x3b, 0xbf, 0x79, 0xd6, 0xce, 0xcd, 0x84, 0x5b, 0xee, 0x38, - 0xb3, 0x9c, 0x2b, 0xe7, 0xdb, 0x20, 0x16, 0x53, 0xd9, 0x2e, 0xd2, 0x5a, 0x17, 0x21, 0x56, 0x32, - 0xc3, 0x63, 0x51, 0x51, 0x45, 0xd1, 0x5a, 0x97, 0xd1, 0x9a, 0x21, 0xb5, 0xef, 0xc2, 0xda, 0x29, - 0xae, 0x46, 0x2d, 0xa8, 0x8d, 0xc9, 0x89, 0x28, 0xd1, 0x4d, 0x93, 0x7f, 0xa2, 0x65, 0x98, 0x39, - 0xc6, 0x4e, 0x44, 0x44, 0x51, 0x6d, 0x98, 0x72, 0x70, 0xa7, 0xfa, 0x6d, 0xad, 0xfd, 0x6b, 0x0d, - 0x16, 0x0b, 0x86, 0x97, 0xac, 0xff, 0x49, 0x76, 0xfd, 0x39, 0x84, 0xf1, 0xe0, 0x29, 0x0e, 0x86, - 0x84, 0x65, 0x0c, 0x31, 0xfe, 0xa6, 0x81, 0x5e, 0xf0, 0xe8, 0x0f, 0x6d, 0x36, 0xba, 0x67, 0x3b, - 0x24, 0x44, 0xb7, 0x61, 0x2e, 0x90, 0x34, 0xd5, 0x78, 0xde, 0x3a, 0xe3, 0x20, 0x0e, 0x2a, 0x66, - 0xcc, 0x8d, 0x3e, 0x86, 0x86, 0x4b, 0x18, 0xee, 0x63, 0x86, 0x95, 0xed, 0x9b, 0x65, 0x2b, 0xb9, - 0x96, 0x43, 0xc5, 0x77, 0x50, 0x31, 0x93, 0x35, 0xe8, 0x7d, 0x98, 0xb1, 0x46, 0x91, 0x37, 0x16, - 0x2d, 0x67, 0x7e, 0xe7, 0xed, 0xd3, 0x16, 0xef, 0x71, 0xa6, 0x83, 0x8a, 0x29, 0xb9, 0x3f, 0x99, - 0x85, 0x3a, 0xc5, 0x01, 0x33, 0xee, 0xc1, 0x72, 0x99, 0x0a, 0xde, 0xe7, 0xac, 0x11, 0xb1, 0xc6, - 0x61, 0xe4, 0x2a, 0x37, 0x27, 0x63, 0x84, 0xa0, 0x1e, 0xda, 0x2f, 0xa4, 0xab, 0x6b, 0xa6, 0xf8, - 0x36, 0xbe, 0x05, 0x4b, 0x13, 0xda, 0xf8, 0xa1, 0x4a, 0xdb, 0xb8, 0x84, 0x4b, 0x4a, 0xb5, 0x11, - 0xc1, 0xca, 0x53, 0xe1, 0x8b, 0xa4, 0xd8, 0x5f, 0x44, 0xe7, 0x36, 0x0e, 0x60, 0xb5, 0xa8, 0x36, - 0xa4, 0xbe, 0x17, 0x12, 0x1e, 0xfa, 0xa2, 0x3a, 0xda, 0xa4, 0x9f, 0xce, 0x0a, 0x2b, 0x1a, 0x66, - 0xc9, 0x8c, 0xf1, 0x8b, 0x2a, 0xac, 0x9a, 0x24, 0xf4, 0x9d, 0x63, 0x12, 0x97, 0xae, 0x8b, 0x01, - 0x1f, 0x3f, 0x86, 0x1a, 0xa6, 0x54, 0x85, 0xc9, 0xfd, 0x73, 0x6b, 0xef, 0x26, 0x97, 0x8a, 0xde, - 0x85, 0x25, 0xec, 0xf6, 0xec, 0x61, 0xe4, 0x47, 0x61, 0xbc, 0x2d, 0x11, 0x54, 0x4d, 0x73, 0x72, - 0xc2, 0xb0, 0x60, 0x6d, 0xc2, 0x05, 0xca, 0x9d, 0x59, 0x88, 0xa4, 0x15, 0x20, 0x52, 0xa9, 0x92, - 0xea, 0x69, 0x4a, 0xfe, 0xac, 0x41, 0x2b, 0x4d, 0x1d, 0x25, 0xfe, 0x2a, 0x34, 0x5d, 0x45, 0x0b, - 0x75, 0x4d, 0xd4, 0xa7, 0x94, 0x90, 0x47, 0x4b, 0xd5, 0x22, 0x5a, 0x5a, 0x85, 0x59, 0x09, 0x66, - 0xd5, 0xc6, 0xd4, 0x28, 0x67, 0x72, 0xbd, 0x60, 0xf2, 0x06, 0x40, 0x98, 0xd4, 0x2f, 0x7d, 0x56, - 0xcc, 0x66, 0x28, 0xc8, 0x80, 0x4b, 0xb2, 0xb7, 0x9a, 0x24, 0x8c, 0x1c, 0xa6, 0xcf, 0x09, 0x8e, - 0x1c, 0xcd, 0xf0, 0x61, 0xf1, 0xa1, 0xcd, 0xf7, 0x30, 0x08, 0x2f, 0x26, 0xd8, 0x3f, 0x80, 0x3a, - 0x57, 0xc6, 0x37, 0xd6, 0x0b, 0xb0, 0x67, 0x8d, 0x48, 0xec, 0xab, 0x64, 0xcc, 0xd3, 0x98, 0xe1, - 0x61, 0xa8, 0x57, 0x05, 0x5d, 0x7c, 0x1b, 0x7f, 0xa8, 0x4a, 0x4b, 0x77, 0x29, 0x0d, 0xbf, 0x79, - 0x40, 0x5d, 0xde, 0xe2, 0x6b, 0x93, 0x2d, 0xbe, 0x60, 0xf2, 0xd7, 0x69, 0xf1, 0xe7, 0xd4, 0xa6, - 0x8c, 0x08, 0xe6, 0x76, 0x29, 0xe5, 0x86, 0xa0, 0x6d, 0xa8, 0x63, 0x4a, 0xa5, 0xc3, 0x0b, 0x15, - 0x59, 0xb1, 0xf0, 0xff, 0x95, 0x49, 0x82, 0xb5, 0x7d, 0x1b, 0x9a, 0x09, 0xe9, 0x55, 0x6a, 0x9b, - 0x59, 0xb5, 0x9b, 0x00, 0x12, 0xc3, 0xde, 0xf7, 0x06, 0x3e, 0x3f, 0x52, 0x1e, 0xec, 0x6a, 0xa9, - 0xf8, 0x36, 0xee, 0xc4, 0x1c, 0xc2, 0xb6, 0x77, 0x61, 0xc6, 0x66, 0xc4, 0x8d, 0x8d, 0x5b, 0xcd, - 0x1a, 0x97, 0x0a, 0x32, 0x25, 0x93, 0xf1, 0x97, 0x06, 0xac, 0xf3, 0x13, 0x7b, 0x22, 0xd2, 0x64, - 0x97, 0xd2, 0x4f, 0x09, 0xc3, 0xb6, 0x13, 0x7e, 0x2f, 0x22, 0xc1, 0xc9, 0x1b, 0x0e, 0x8c, 0x21, - 0xcc, 0xca, 0x2c, 0x53, 0xf5, 0xee, 0xdc, 0xaf, 0x33, 0x4a, 0x7c, 0x7a, 0x87, 0xa9, 0xbd, 0x99, - 0x3b, 0x4c, 0xd9, 0x9d, 0xa2, 0x7e, 0x41, 0x77, 0x8a, 0xd3, 0xaf, 0x95, 0x99, 0xcb, 0xea, 0x6c, - 0xfe, 0xb2, 0x5a, 0x02, 0xd5, 0xe7, 0x5e, 0x17, 0xaa, 0x37, 0x4a, 0xa1, 0xba, 0x5b, 0x9a, 0xc7, - 0x4d, 0xe1, 0xee, 0xef, 0x64, 0x23, 0xf0, 0xd4, 0x58, 0x9b, 0x06, 0xb4, 0xc3, 0x1b, 0x05, 0xed, - 0xdf, 0xcf, 0x81, 0x70, 0x79, 0x0d, 0x7e, 0xff, 0xf5, 0xf6, 0x74, 0x06, 0x1c, 0xff, 0x9f, 0x03, - 0xcf, 0xbf, 0x12, 0x98, 0x89, 0xfa, 0xa9, 0x0f, 0x92, 0x86, 0xce, 0xfb, 0x10, 0x6f, 0xad, 0xaa, - 0x68, 0xf1, 0x6f, 0x74, 0x13, 0xea, 0xdc, 0xc9, 0x0a, 0xd4, 0xae, 0x65, 0xfd, 0xc9, 0x4f, 0x62, - 0x97, 0xd2, 0x27, 0x94, 0x58, 0xa6, 0x60, 0x42, 0x77, 0xa0, 0x99, 0x04, 0xbe, 0xca, 0xac, 0xab, - 0xd9, 0x15, 0x49, 0x9e, 0xc4, 0xcb, 0x52, 0x76, 0xbe, 0xb6, 0x6f, 0x07, 0xc4, 0x12, 0x90, 0x6f, - 0x66, 0x72, 0xed, 0xa7, 0xf1, 0x64, 0xb2, 0x36, 0x61, 0x47, 0xdb, 0x30, 0x2b, 0xdf, 0x0d, 0x44, - 0x06, 0xcd, 0xef, 0xac, 0x4f, 0x16, 0xd3, 0x78, 0x95, 0x62, 0x34, 0xfe, 0xa4, 0xc1, 0x3b, 0x69, - 0x40, 0xc4, 0xd9, 0x14, 0xa3, 0xee, 0x6f, 0xbe, 0xe3, 0x5e, 0x87, 0x05, 0x01, 0xf3, 0xd3, 0xe7, - 0x03, 0xf9, 0x92, 0x55, 0xa0, 0x1a, 0xbf, 0xd7, 0xe0, 0xda, 0xe4, 0x3e, 0xf6, 0x46, 0x38, 0x60, - 0xc9, 0xf1, 0x5e, 0xc4, 0x5e, 0xe2, 0x86, 0x57, 0x4d, 0x1b, 0x5e, 0x6e, 0x7f, 0xb5, 0xfc, 0xfe, - 0x8c, 0x3f, 0x56, 0x61, 0x3e, 0x13, 0x40, 0x65, 0x0d, 0x93, 0x03, 0x3e, 0x11, 0xb7, 0xe2, 0x62, - 0x27, 0x9a, 0x42, 0xd3, 0xcc, 0x50, 0xd0, 0x18, 0x80, 0xe2, 0x00, 0xbb, 0x84, 0x91, 0x80, 0x57, - 0x72, 0x9e, 0xf1, 0x0f, 0xa6, 0xaf, 0x2e, 0x47, 0xb1, 0x4c, 0x33, 0x23, 0x9e, 0x23, 0x56, 0xa1, - 0x3a, 0x54, 0xf5, 0x5b, 0x8d, 0xd0, 0x97, 0xb0, 0x30, 0xb0, 0x1d, 0x72, 0x94, 0x1a, 0x32, 0x2b, - 0x0c, 0x79, 0x3c, 0xbd, 0x21, 0xf7, 0xb2, 0x72, 0xcd, 0x82, 0x1a, 0xe3, 0x06, 0xb4, 0x8a, 0xf9, - 0xc4, 0x8d, 0xb4, 0x5d, 0x3c, 0x4c, 0xbc, 0xa5, 0x46, 0x06, 0x82, 0x56, 0x31, 0x7f, 0x8c, 0xbf, - 0x57, 0x61, 0x25, 0x11, 0xb7, 0xeb, 0x79, 0x7e, 0xe4, 0x59, 0xe2, 0x29, 0xae, 0xf4, 0x2c, 0x96, - 0x61, 0x86, 0xd9, 0xcc, 0x49, 0x80, 0x8f, 0x18, 0xf0, 0xde, 0xc5, 0x7c, 0xdf, 0x61, 0x36, 0x55, - 0x07, 0x1c, 0x0f, 0xe5, 0xd9, 0x3f, 0x8f, 0xec, 0x80, 0xf4, 0x45, 0x25, 0x68, 0x98, 0xc9, 0x98, - 0xcf, 0x71, 0x54, 0x23, 0x60, 0xbc, 0x74, 0x66, 0x32, 0x16, 0x71, 0xef, 0x3b, 0x0e, 0xb1, 0xb8, - 0x3b, 0x32, 0x40, 0xbf, 0x40, 0x15, 0x17, 0x08, 0x16, 0xd8, 0xde, 0x50, 0xc1, 0x7c, 0x35, 0xe2, - 0x76, 0xe2, 0x20, 0xc0, 0x27, 0x7a, 0x43, 0x38, 0x40, 0x0e, 0xd0, 0x47, 0x50, 0x73, 0x31, 0x55, - 0x8d, 0xee, 0x46, 0xae, 0x3a, 0x94, 0x79, 0xa0, 0x73, 0x88, 0xa9, 0xec, 0x04, 0x7c, 0x59, 0xfb, - 0x03, 0x68, 0xc4, 0x84, 0xaf, 0x05, 0x09, 0xbf, 0x80, 0xcb, 0xb9, 0xe2, 0x83, 0x3e, 0x87, 0xd5, - 0x34, 0xa2, 0xb2, 0x0a, 0x15, 0x08, 0x7c, 0xe7, 0x95, 0x96, 0x99, 0xa7, 0x08, 0x30, 0x9e, 0xc3, - 0x12, 0x0f, 0x19, 0x91, 0xf8, 0x17, 0x74, 0xb5, 0xf9, 0x10, 0x9a, 0x89, 0xca, 0xd2, 0x98, 0x69, - 0x43, 0xe3, 0x38, 0x7e, 0x22, 0x95, 0x77, 0x9b, 0x64, 0x6c, 0xec, 0x02, 0xca, 0xda, 0xab, 0x3a, - 0xd0, 0xcd, 0x3c, 0x28, 0x5e, 0x29, 0xb6, 0x1b, 0xc1, 0x1e, 0x63, 0xe2, 0xdf, 0x55, 0x61, 0x71, - 0xdf, 0x16, 0xaf, 0x1c, 0x17, 0x54, 0xe4, 0x6e, 0x40, 0x2b, 0x8c, 0x7a, 0xae, 0xdf, 0x8f, 0x1c, - 0xa2, 0x40, 0x81, 0xea, 0xf4, 0x13, 0xf4, 0xb3, 0x8a, 0x1f, 0x77, 0x16, 0xc5, 0x6c, 0xa4, 0x6e, - 0xb8, 0xe2, 0x1b, 0x7d, 0x04, 0xeb, 0x8f, 0xc8, 0x97, 0x6a, 0x3f, 0xfb, 0x8e, 0xdf, 0xeb, 0xd9, - 0xde, 0x30, 0x56, 0x32, 0x23, 0x94, 0x9c, 0xce, 0x50, 0x06, 0x15, 0x67, 0x4b, 0xa1, 0xa2, 0xf1, - 0x4b, 0x0d, 0x5a, 0xa9, 0xd7, 0x94, 0xdf, 0x6f, 0xcb, 0xfc, 0x90, 0x5e, 0xbf, 0x96, 0xf5, 0x7a, - 0x91, 0xf5, 0x3f, 0x4f, 0x8d, 0x4b, 0xd9, 0xd4, 0xf8, 0xa7, 0x06, 0x2b, 0xfb, 0x36, 0x8b, 0x8b, - 0x92, 0xfd, 0xdf, 0x76, 0x82, 0x25, 0xfe, 0xae, 0x97, 0xfb, 0xbb, 0x03, 0xab, 0xc5, 0x8d, 0x2a, - 0xa7, 0x2f, 0xc3, 0x0c, 0x3f, 0xf9, 0xf8, 0x3d, 0x40, 0x0e, 0x76, 0xbe, 0x6a, 0xc2, 0x52, 0xda, - 0xd0, 0xf9, 0xbf, 0xb6, 0x45, 0xd0, 0x63, 0x68, 0xed, 0xab, 0xdf, 0xe3, 0xe2, 0x77, 0x18, 0x74, - 0xd6, 0xc3, 0x66, 0xfb, 0x6a, 0xf9, 0xa4, 0x54, 0x6d, 0x54, 0x90, 0x05, 0xeb, 0x45, 0x81, 0xe9, - 0x1b, 0xea, 0xff, 0x9f, 0x21, 0x39, 0xe1, 0x7a, 0x95, 0x8a, 0x2d, 0x0d, 0x7d, 0x0e, 0x0b, 0xf9, - 0x97, 0x3e, 0x94, 0xab, 0x70, 0xa5, 0x8f, 0x8f, 0x6d, 0xe3, 0x2c, 0x96, 0xc4, 0xfe, 0x67, 0x1c, - 0x4e, 0xe7, 0x9e, 0xbd, 0x90, 0x91, 0x07, 0xfb, 0x65, 0xcf, 0x82, 0xed, 0xff, 0x3b, 0x93, 0x27, - 0x91, 0xfe, 0x21, 0x34, 0xe2, 0x67, 0xa2, 0xbc, 0x9b, 0x0b, 0x8f, 0x47, 0xed, 0x56, 0x5e, 0xde, - 0x20, 0x34, 0x2a, 0xe8, 0x63, 0xb9, 0x78, 0x97, 0xd2, 0x92, 0xc5, 0x99, 0xc7, 0x91, 0xf6, 0x95, - 0x92, 0x07, 0x09, 0xa3, 0x82, 0xbe, 0x0b, 0xf3, 0xfc, 0xeb, 0x48, 0xfd, 0x12, 0xb6, 0xda, 0x91, - 0x3f, 0xbc, 0x76, 0xe2, 0x1f, 0x5e, 0x3b, 0x77, 0x5d, 0xca, 0x4e, 0xda, 0x25, 0x2f, 0x06, 0x4a, - 0xc0, 0x33, 0xb8, 0xbc, 0x4f, 0x58, 0x0a, 0xf0, 0xd1, 0xb5, 0xd7, 0xba, 0x06, 0xb5, 0x8d, 0x22, - 0xdb, 0xe4, 0x1d, 0xc1, 0xa8, 0xa0, 0xdf, 0x68, 0x70, 0x65, 0x9f, 0xb0, 0x22, 0x64, 0x46, 0xef, - 0x95, 0x2b, 0x39, 0x05, 0x5a, 0xb7, 0x1f, 0x4d, 0x9b, 0xd9, 0x79, 0xb1, 0x46, 0x05, 0xfd, 0x56, - 0x83, 0xb5, 0x8c, 0x61, 0x59, 0x0c, 0x8c, 0xb6, 0xcf, 0x36, 0xae, 0x04, 0x2f, 0xb7, 0x3f, 0x9b, - 0xf2, 0x07, 0xce, 0x8c, 0x48, 0xa3, 0x82, 0x8e, 0xc4, 0x99, 0xa4, 0x2d, 0x0f, 0xbd, 0x5d, 0xda, - 0xdb, 0x12, 0xed, 0x1b, 0xa7, 0x4d, 0x27, 0xe7, 0xf0, 0x19, 0xcc, 0xef, 0x13, 0x16, 0xd7, 0xe7, - 0x7c, 0xa4, 0x15, 0xda, 0x62, 0x3e, 0x55, 0x8b, 0x25, 0x5d, 0x44, 0xcc, 0x92, 0x94, 0x95, 0xa9, - 0x53, 0xf9, 0x5c, 0x2d, 0x2d, 0xd6, 0xf9, 0x88, 0x29, 0x2f, 0x73, 0x46, 0xe5, 0x93, 0xdd, 0xbf, - 0xbe, 0xdc, 0xd0, 0xbe, 0x7a, 0xb9, 0xa1, 0xfd, 0xe3, 0xe5, 0x86, 0xf6, 0xa3, 0x5b, 0xaf, 0xf8, - 0xab, 0x84, 0xcc, 0x1f, 0x3a, 0x60, 0x6a, 0x5b, 0x8e, 0x4d, 0x3c, 0xd6, 0x9b, 0x15, 0xc1, 0x7f, - 0xeb, 0xdf, 0x01, 0x00, 0x00, 0xff, 0xff, 0xf2, 0x91, 0xe2, 0xd9, 0x07, 0x21, 0x00, 0x00, + // 2319 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xdc, 0x5a, 0xcb, 0x73, 0x1c, 0x47, + 0x19, 0xd7, 0x3e, 0xb5, 0xfb, 0xc9, 0x7a, 0xb5, 0x6d, 0x79, 0xbc, 0xb6, 0x55, 0xca, 0x80, 0x5d, + 0x8e, 0x9d, 0xac, 0xca, 0x72, 0x25, 0x06, 0x27, 0x84, 0x52, 0x64, 0x5b, 0x72, 0x6c, 0xd9, 0x62, + 0xec, 0x40, 0x19, 0x0c, 0x54, 0xef, 0x6c, 0x6b, 0xb7, 0xa3, 0x79, 0xb4, 0x67, 0x7a, 0x14, 0xe4, + 0x2a, 0x4e, 0x50, 0x5c, 0xb8, 0x71, 0xe0, 0xc0, 0x95, 0xbf, 0x81, 0xe2, 0xc8, 0x81, 0xe2, 0x71, + 0xa4, 0xb8, 0xc0, 0x0d, 0xca, 0x7f, 0x09, 0xd5, 0x8f, 0x79, 0xee, 0xec, 0x5a, 0x41, 0xb2, 0x02, + 0xb9, 0x48, 0xd3, 0x5f, 0x77, 0x7f, 0xdf, 0xd7, 0xdf, 0xa3, 0xfb, 0xf7, 0x75, 0x2f, 0x5c, 0x09, + 0x08, 0xf3, 0x43, 0x12, 0xec, 0x93, 0x60, 0x55, 0x7e, 0x52, 0xee, 0x07, 0x07, 0x99, 0xcf, 0x2e, + 0x0b, 0x7c, 0xee, 0x23, 0x48, 0x29, 0x9d, 0x87, 0x03, 0xca, 0x87, 0x51, 0xaf, 0x6b, 0xfb, 0xee, + 0x2a, 0x0e, 0x06, 0x3e, 0x0b, 0xfc, 0xcf, 0xe4, 0xc7, 0xbb, 0x76, 0x7f, 0x75, 0x7f, 0x6d, 0x95, + 0xed, 0x0d, 0x56, 0x31, 0xa3, 0xe1, 0x2a, 0x66, 0xcc, 0xa1, 0x36, 0xe6, 0xd4, 0xf7, 0x56, 0xf7, + 0x6f, 0x60, 0x87, 0x0d, 0xf1, 0x8d, 0xd5, 0x01, 0xf1, 0x48, 0x80, 0x39, 0xe9, 0x2b, 0xce, 0x9d, + 0x0b, 0x03, 0xdf, 0x1f, 0x38, 0x64, 0x55, 0xb6, 0x7a, 0xd1, 0xee, 0x2a, 0x71, 0x19, 0xd7, 0x62, + 0xcd, 0x5f, 0xcd, 0xc2, 0xfc, 0x36, 0xf6, 0xe8, 0x2e, 0x09, 0xb9, 0x45, 0x5e, 0x44, 0x24, 0xe4, + 0xe8, 0x39, 0xd4, 0x85, 0x32, 0x46, 0x65, 0xa5, 0x72, 0x75, 0x66, 0x6d, 0xab, 0x9b, 0x6a, 0xd3, + 0x8d, 0xb5, 0x91, 0x1f, 0x3f, 0xb6, 0xfb, 0xdd, 0xfd, 0xb5, 0x2e, 0xdb, 0x1b, 0x74, 0x85, 0x36, + 0xdd, 0x8c, 0x36, 0xdd, 0x58, 0x9b, 0xae, 0x95, 0x2c, 0xcb, 0x92, 0x5c, 0x51, 0x07, 0x5a, 0x01, + 0xd9, 0xa7, 0x21, 0xf5, 0x3d, 0xa3, 0xba, 0x52, 0xb9, 0xda, 0xb6, 0x92, 0x36, 0x32, 0x60, 0xda, + 0xf3, 0x37, 0xb0, 0x3d, 0x24, 0x46, 0x6d, 0xa5, 0x72, 0xb5, 0x65, 0xc5, 0x4d, 0xb4, 0x02, 0x33, + 0x98, 0xb1, 0x87, 0xb8, 0x47, 0x9c, 0x07, 0xe4, 0xc0, 0xa8, 0xcb, 0x89, 0x59, 0x92, 0x98, 0x8b, + 0x19, 0x7b, 0x84, 0x5d, 0x62, 0x34, 0x64, 0x6f, 0xdc, 0x44, 0x17, 0xa1, 0xed, 0x61, 0x97, 0x84, + 0x0c, 0xdb, 0xc4, 0x68, 0xc9, 0xbe, 0x94, 0x80, 0x7e, 0x0a, 0x8b, 0x19, 0xc5, 0x9f, 0xf8, 0x51, + 0x60, 0x13, 0x03, 0xe4, 0xd2, 0x1f, 0x1f, 0x6d, 0xe9, 0xeb, 0x45, 0xb6, 0xd6, 0xa8, 0x24, 0xf4, + 0x23, 0x68, 0x48, 0xcf, 0x1b, 0x33, 0x2b, 0xb5, 0x63, 0xb5, 0xb6, 0x62, 0x8b, 0x3c, 0x98, 0x66, + 0x4e, 0x34, 0xa0, 0x5e, 0x68, 0x9c, 0x92, 0x12, 0x9e, 0x1e, 0x4d, 0xc2, 0x86, 0xef, 0xed, 0xd2, + 0xc1, 0x36, 0xf6, 0xf0, 0x80, 0xb8, 0xc4, 0xe3, 0x3b, 0x92, 0xb9, 0x15, 0x0b, 0x41, 0x2f, 0x61, + 0x61, 0x2f, 0x0a, 0xb9, 0xef, 0xd2, 0x97, 0xe4, 0x31, 0x13, 0x73, 0x43, 0x63, 0x56, 0x5a, 0xf3, + 0xd1, 0xd1, 0x04, 0x3f, 0x28, 0x70, 0xb5, 0x46, 0xe4, 0x88, 0x20, 0xd9, 0x8b, 0x7a, 0xe4, 0xbb, + 0x24, 0x90, 0xd1, 0x35, 0xa7, 0x82, 0x24, 0x43, 0x52, 0x61, 0x44, 0x75, 0x2b, 0x34, 0xe6, 0x57, + 0x6a, 0x2a, 0x8c, 0x12, 0x12, 0xba, 0x0a, 0xf3, 0xfb, 0x24, 0xa0, 0xbb, 0x07, 0x4f, 0xe8, 0xc0, + 0xc3, 0x3c, 0x0a, 0x88, 0xb1, 0x20, 0x43, 0xb1, 0x48, 0x46, 0x2e, 0xcc, 0x0e, 0x89, 0xe3, 0x0a, + 0x93, 0x6f, 0x04, 0xa4, 0x1f, 0x1a, 0x8b, 0xd2, 0xbe, 0x9b, 0x47, 0xf7, 0xa0, 0x64, 0x67, 0xe5, + 0xb9, 0x0b, 0xc5, 0x3c, 0xdf, 0xd2, 0x99, 0xa2, 0x72, 0x04, 0x29, 0xc5, 0x0a, 0x64, 0x74, 0x05, + 0xe6, 0x78, 0x80, 0xed, 0x3d, 0xea, 0x0d, 0xb6, 0x09, 0x1f, 0xfa, 0x7d, 0xe3, 0xb4, 0xb4, 0x44, + 0x81, 0x8a, 0x6c, 0x40, 0xc4, 0xc3, 0x3d, 0x87, 0xf4, 0x55, 0x2c, 0x3e, 0x3d, 0x60, 0x24, 0x34, + 0xce, 0xc8, 0x55, 0xdc, 0xec, 0x66, 0x76, 0xa8, 0xc2, 0x06, 0xd1, 0xbd, 0x3b, 0x32, 0xeb, 0xae, + 0xc7, 0x83, 0x03, 0xab, 0x84, 0x1d, 0xda, 0x83, 0x19, 0xb1, 0x8e, 0x38, 0x14, 0xce, 0xca, 0x50, + 0xb8, 0x7f, 0x34, 0x1b, 0x6d, 0xa5, 0x0c, 0xad, 0x2c, 0x77, 0xd4, 0x05, 0x34, 0xc4, 0xe1, 0x76, + 0xe4, 0x70, 0xca, 0x1c, 0xa2, 0xd4, 0x08, 0x8d, 0x25, 0x69, 0xa6, 0x92, 0x1e, 0xf4, 0x00, 0x20, + 0x20, 0xbb, 0xf1, 0xb8, 0x73, 0x72, 0xe5, 0xd7, 0x27, 0xad, 0xdc, 0x4a, 0x46, 0xab, 0x15, 0x67, + 0xa6, 0x0b, 0xe1, 0x62, 0x19, 0xc4, 0xe6, 0x3a, 0xdb, 0x65, 0x5a, 0x1b, 0x32, 0xc4, 0x4a, 0x7a, + 0x44, 0x2c, 0x6a, 0xaa, 0xdc, 0xb4, 0xce, 0xab, 0x68, 0xcd, 0x90, 0x84, 0x23, 0xa9, 0x17, 0x72, + 0xec, 0x38, 0xd2, 0x00, 0xf7, 0xef, 0x18, 0x17, 0x94, 0x23, 0xf3, 0xd4, 0xce, 0x5d, 0x38, 0x37, + 0xc6, 0x25, 0x68, 0x01, 0x6a, 0x7b, 0xe4, 0x40, 0x6e, 0xe5, 0x6d, 0x4b, 0x7c, 0xa2, 0x33, 0xd0, + 0xd8, 0xc7, 0x4e, 0x44, 0xe4, 0xe6, 0xdb, 0xb2, 0x54, 0xe3, 0x76, 0xf5, 0x1b, 0x95, 0xce, 0x2f, + 0x2a, 0x30, 0x5f, 0x58, 0x60, 0xc9, 0xfc, 0x1f, 0x66, 0xe7, 0x1f, 0x43, 0xb8, 0xef, 0x3e, 0xc5, + 0xc1, 0x80, 0xf0, 0x8c, 0x22, 0xe6, 0xdf, 0x2b, 0x60, 0x14, 0x2c, 0xff, 0x3d, 0xca, 0x87, 0xf7, + 0xa8, 0x43, 0x42, 0x74, 0x0b, 0xa6, 0x03, 0x45, 0xd3, 0x07, 0xd4, 0x85, 0x09, 0x0e, 0xdb, 0x9a, + 0xb2, 0xe2, 0xd1, 0xe8, 0x23, 0x68, 0xb9, 0x84, 0xe3, 0x3e, 0xe6, 0x58, 0xeb, 0xbe, 0x52, 0x36, + 0x53, 0x48, 0xd9, 0xd6, 0xe3, 0xb6, 0xa6, 0xac, 0x64, 0x0e, 0x7a, 0x0f, 0x1a, 0xf6, 0x30, 0xf2, + 0xf6, 0xe4, 0xd1, 0x34, 0xb3, 0x76, 0x69, 0xdc, 0xe4, 0x0d, 0x31, 0x68, 0x6b, 0xca, 0x52, 0xa3, + 0x3f, 0x6e, 0x42, 0x9d, 0xe1, 0x80, 0x9b, 0xf7, 0xe0, 0x4c, 0x99, 0x08, 0x71, 0x1e, 0xda, 0x43, + 0x62, 0xef, 0x85, 0x91, 0xab, 0xcd, 0x9c, 0xb4, 0x11, 0x82, 0x7a, 0x48, 0x5f, 0x2a, 0x53, 0xd7, + 0x2c, 0xf9, 0x6d, 0xbe, 0x0d, 0x8b, 0x23, 0xd2, 0x84, 0x53, 0x95, 0x6e, 0x82, 0xc3, 0x29, 0x2d, + 0xda, 0x8c, 0xe0, 0xec, 0x53, 0x69, 0x8b, 0xe4, 0x50, 0x38, 0x89, 0x13, 0xde, 0xdc, 0x82, 0xa5, + 0xa2, 0xd8, 0x90, 0xf9, 0x5e, 0x48, 0x44, 0x8a, 0xc8, 0x5d, 0x94, 0x92, 0x7e, 0xda, 0x2b, 0xb5, + 0x68, 0x59, 0x25, 0x3d, 0xe6, 0x6f, 0xab, 0xb0, 0x64, 0x91, 0xd0, 0x77, 0xf6, 0x49, 0xbc, 0xc5, + 0x9d, 0x0c, 0x48, 0xf9, 0x01, 0xd4, 0x30, 0x63, 0x3a, 0x4c, 0xee, 0x1f, 0x1b, 0x0c, 0xb0, 0x04, + 0x57, 0xf4, 0x0e, 0x2c, 0x62, 0xb7, 0x47, 0x07, 0x91, 0x1f, 0x85, 0xf1, 0xb2, 0x64, 0x50, 0xb5, + 0xad, 0xd1, 0x0e, 0xb1, 0x4d, 0x84, 0x32, 0x23, 0xef, 0x7b, 0x7d, 0xf2, 0x13, 0x89, 0x7c, 0x6a, + 0x56, 0x96, 0x64, 0xda, 0x70, 0x6e, 0xc4, 0x48, 0xda, 0xe0, 0x59, 0xb0, 0x55, 0x29, 0x80, 0xad, + 0x52, 0x35, 0xaa, 0x63, 0xd4, 0x30, 0xff, 0x5c, 0x81, 0x85, 0x34, 0xb9, 0x34, 0xfb, 0x8b, 0xd0, + 0x76, 0x35, 0x2d, 0x34, 0x2a, 0x72, 0xa7, 0x4b, 0x09, 0x79, 0xdc, 0x55, 0x2d, 0xe2, 0xae, 0x25, + 0x68, 0x2a, 0x58, 0xac, 0x97, 0xae, 0x5b, 0x39, 0x95, 0xeb, 0x05, 0x95, 0x97, 0x01, 0xc2, 0x64, + 0x87, 0x33, 0x9a, 0xb2, 0x37, 0x43, 0x41, 0x26, 0x9c, 0x52, 0xa7, 0xb4, 0x45, 0xc2, 0xc8, 0xe1, + 0xc6, 0xb4, 0x1c, 0x91, 0xa3, 0x99, 0x3e, 0xcc, 0x3f, 0xa4, 0x62, 0x0d, 0xbb, 0xe1, 0xc9, 0xa4, + 0xc3, 0xfb, 0x50, 0x17, 0xc2, 0xc4, 0xc2, 0x7a, 0x01, 0xf6, 0xec, 0x21, 0x89, 0x6d, 0x95, 0xb4, + 0x45, 0xa2, 0x73, 0x3c, 0x08, 0x8d, 0xaa, 0xa4, 0xcb, 0x6f, 0xf3, 0xf7, 0x55, 0xa5, 0xe9, 0x3a, + 0x63, 0xe1, 0x97, 0x0f, 0xcd, 0xcb, 0xc1, 0x42, 0x6d, 0x14, 0x2c, 0x14, 0x54, 0xfe, 0x22, 0x60, + 0xe1, 0x98, 0x0e, 0x32, 0x33, 0x82, 0xe9, 0x75, 0xc6, 0x84, 0x22, 0xe8, 0x06, 0xd4, 0x31, 0x63, + 0xca, 0xe0, 0x85, 0x3d, 0x5b, 0x0f, 0x11, 0xff, 0xb5, 0x4a, 0x72, 0x68, 0xe7, 0x16, 0xb4, 0x13, + 0xd2, 0xeb, 0xc4, 0xb6, 0xb3, 0x62, 0x57, 0x00, 0x14, 0x1a, 0xbe, 0xef, 0xed, 0xfa, 0xc2, 0xa5, + 0x22, 0xd8, 0xf5, 0x54, 0xf9, 0x6d, 0xde, 0x8e, 0x47, 0x48, 0xdd, 0xde, 0x81, 0x06, 0xe5, 0xc4, + 0x8d, 0x95, 0x5b, 0xca, 0x2a, 0x97, 0x32, 0xb2, 0xd4, 0x20, 0xf3, 0x2f, 0x2d, 0x38, 0x2f, 0x3c, + 0xf6, 0x44, 0xa6, 0xc9, 0x3a, 0x63, 0x77, 0x08, 0xc7, 0xd4, 0x09, 0xbf, 0x13, 0x91, 0xe0, 0xe0, + 0x0d, 0x07, 0xc6, 0x00, 0x9a, 0x2a, 0xcb, 0xf4, 0x8e, 0x78, 0xec, 0x85, 0x91, 0x66, 0x9f, 0x56, + 0x43, 0xb5, 0x37, 0x53, 0x0d, 0x95, 0x55, 0x27, 0xf5, 0x13, 0xaa, 0x4e, 0xc6, 0x17, 0xa8, 0x99, + 0xb2, 0xb7, 0x99, 0x2f, 0x7b, 0x4b, 0x40, 0xff, 0xf4, 0x61, 0x41, 0x7f, 0xab, 0x14, 0xf4, 0xbb, + 0xa5, 0x79, 0xdc, 0x96, 0xe6, 0xfe, 0x56, 0x36, 0x02, 0xc7, 0xc6, 0xda, 0x51, 0xe0, 0x3f, 0xbc, + 0x51, 0xf8, 0xff, 0x69, 0x0e, 0xce, 0xab, 0x82, 0xfa, 0xbd, 0xc3, 0xad, 0x69, 0x02, 0xb0, 0xff, + 0xca, 0xc1, 0xeb, 0x9f, 0x4b, 0x54, 0xc5, 0xfc, 0xd4, 0x06, 0xc9, 0x81, 0x2e, 0xce, 0x21, 0x71, + 0xb4, 0xea, 0x4d, 0x4b, 0x7c, 0xa3, 0xeb, 0x50, 0x17, 0x46, 0xd6, 0xb0, 0xf7, 0x5c, 0xd6, 0x9e, + 0xc2, 0x13, 0xeb, 0x8c, 0x3d, 0x61, 0xc4, 0xb6, 0xe4, 0x20, 0x74, 0x1b, 0xda, 0x49, 0xe0, 0xeb, + 0xcc, 0xba, 0x98, 0x9d, 0x91, 0xe4, 0x49, 0x3c, 0x2d, 0x1d, 0x2e, 0xe6, 0xf6, 0x69, 0x40, 0x6c, + 0x09, 0x0a, 0x1b, 0xa3, 0x73, 0xef, 0xc4, 0x9d, 0xc9, 0xdc, 0x64, 0x38, 0xba, 0x01, 0x4d, 0x75, + 0x03, 0x21, 0x33, 0x68, 0x66, 0xed, 0xfc, 0xe8, 0x66, 0x1a, 0xcf, 0xd2, 0x03, 0xcd, 0x3f, 0x55, + 0xe0, 0xad, 0x34, 0x20, 0xe2, 0x6c, 0x8a, 0x71, 0xf9, 0x97, 0x7f, 0xe2, 0x5e, 0x81, 0x39, 0x59, + 0x08, 0xa4, 0x17, 0x11, 0xea, 0x4e, 0xac, 0x40, 0x35, 0x7f, 0x57, 0x81, 0xcb, 0xa3, 0xeb, 0xd8, + 0x18, 0xe2, 0x80, 0x27, 0xee, 0x3d, 0x89, 0xb5, 0xc4, 0x07, 0x5e, 0x35, 0x3d, 0xf0, 0x72, 0xeb, + 0xab, 0xe5, 0xd7, 0x67, 0xfe, 0xa1, 0x0a, 0x33, 0x99, 0x00, 0x2a, 0x3b, 0x30, 0x05, 0xe0, 0x93, + 0x71, 0x2b, 0x4b, 0x3f, 0x79, 0x28, 0xb4, 0xad, 0x0c, 0x05, 0xed, 0x01, 0x30, 0x1c, 0x60, 0x97, + 0x70, 0x12, 0x88, 0x9d, 0x5c, 0x64, 0xfc, 0x83, 0xa3, 0xef, 0x2e, 0x3b, 0x31, 0x4f, 0x2b, 0xc3, + 0x5e, 0x20, 0x56, 0x29, 0x3a, 0xd4, 0xfb, 0xb7, 0x6e, 0xa1, 0xcf, 0x61, 0x6e, 0x97, 0x3a, 0x64, + 0x27, 0x55, 0xa4, 0x29, 0x15, 0x79, 0x7c, 0x74, 0x45, 0xee, 0x65, 0xf9, 0x5a, 0x05, 0x31, 0xe6, + 0x35, 0x58, 0x28, 0xe6, 0x93, 0x50, 0x92, 0xba, 0x78, 0x90, 0x58, 0x4b, 0xb7, 0x4c, 0x04, 0x0b, + 0xc5, 0xfc, 0x31, 0xff, 0x55, 0x85, 0xb3, 0x09, 0xbb, 0x75, 0xcf, 0xf3, 0x23, 0xcf, 0x96, 0x97, + 0x7a, 0xa5, 0xbe, 0x38, 0x03, 0x0d, 0x4e, 0xb9, 0x93, 0x00, 0x1f, 0xd9, 0x10, 0x67, 0x17, 0xf7, + 0x7d, 0x87, 0x53, 0xa6, 0x1d, 0x1c, 0x37, 0x95, 0xef, 0x5f, 0x44, 0x34, 0x20, 0x7d, 0xb9, 0x13, + 0xb4, 0xac, 0xa4, 0x2d, 0xfa, 0x04, 0xaa, 0x91, 0x30, 0x5e, 0x19, 0x33, 0x69, 0xcb, 0xb8, 0xf7, + 0x1d, 0x87, 0xd8, 0xc2, 0x1c, 0x19, 0xa0, 0x5f, 0xa0, 0xca, 0x02, 0x82, 0x07, 0xd4, 0x1b, 0x68, + 0x98, 0xaf, 0x5b, 0x42, 0x4f, 0x1c, 0x04, 0xf8, 0xc0, 0x68, 0x49, 0x03, 0xa8, 0x06, 0xfa, 0x10, + 0x6a, 0x2e, 0x66, 0xfa, 0xa0, 0xbb, 0x96, 0xdb, 0x1d, 0xca, 0x2c, 0xd0, 0xdd, 0xc6, 0x4c, 0x9d, + 0x04, 0x62, 0x5a, 0xe7, 0x7d, 0x68, 0xc5, 0x84, 0x2f, 0x04, 0x09, 0x3f, 0x83, 0xd9, 0xdc, 0xe6, + 0x83, 0x9e, 0xc1, 0x52, 0x1a, 0x51, 0x59, 0x81, 0x1a, 0x04, 0xbe, 0xf5, 0x5a, 0xcd, 0xac, 0x31, + 0x0c, 0xcc, 0x17, 0xb0, 0x28, 0x42, 0x46, 0x26, 0xfe, 0x09, 0x95, 0x36, 0x1f, 0x40, 0x3b, 0x11, + 0x59, 0x1a, 0x33, 0x1d, 0x68, 0xed, 0xc7, 0x97, 0xad, 0xaa, 0xb6, 0x49, 0xda, 0xe6, 0x3a, 0xa0, + 0xac, 0xbe, 0xfa, 0x04, 0xba, 0x9e, 0x07, 0xc5, 0x67, 0x8b, 0xc7, 0x8d, 0x1c, 0x1e, 0x63, 0xe2, + 0x7f, 0x54, 0x61, 0x7e, 0x93, 0xca, 0x7b, 0x90, 0x13, 0xda, 0xe4, 0xae, 0xc1, 0x42, 0x18, 0xf5, + 0x5c, 0xbf, 0x1f, 0x39, 0x44, 0x83, 0x02, 0x7d, 0xd2, 0x8f, 0xd0, 0x27, 0x6d, 0x7e, 0xc2, 0x58, + 0x0c, 0xf3, 0xa1, 0xae, 0x70, 0xe5, 0x37, 0xfa, 0x10, 0xce, 0x3f, 0x22, 0x9f, 0xeb, 0xf5, 0x6c, + 0x3a, 0x7e, 0xaf, 0x47, 0xbd, 0x41, 0x2c, 0xa4, 0x21, 0x85, 0x8c, 0x1f, 0x50, 0x06, 0x15, 0x9b, + 0xe5, 0x50, 0x31, 0xa9, 0x92, 0x37, 0x7c, 0xd7, 0xa5, 0x5c, 0x23, 0xca, 0x1c, 0xcd, 0xfc, 0x59, + 0x05, 0x16, 0x52, 0xcb, 0x6a, 0xdf, 0xdc, 0x52, 0x39, 0xa4, 0x3c, 0x73, 0x39, 0xeb, 0x99, 0xe2, + 0xd0, 0xff, 0x3e, 0x7d, 0x4e, 0x65, 0xd3, 0xe7, 0x97, 0x55, 0x38, 0xbb, 0x49, 0x79, 0xbc, 0x71, + 0xd1, 0xff, 0x37, 0x2f, 0x97, 0xf8, 0xa4, 0x7e, 0x38, 0x9f, 0x34, 0x4a, 0x7c, 0xd2, 0x85, 0xa5, + 0xa2, 0x31, 0xb4, 0x63, 0xce, 0x40, 0x43, 0x44, 0x50, 0x7c, 0xaf, 0xa0, 0x1a, 0xe6, 0x3f, 0x9b, + 0x70, 0xe9, 0x53, 0xd6, 0xc7, 0x3c, 0xb9, 0x17, 0xba, 0xe7, 0x07, 0x3b, 0xa2, 0xeb, 0x64, 0xac, + 0x58, 0x78, 0xb3, 0xab, 0x4e, 0x7c, 0xb3, 0xab, 0x4d, 0x78, 0xb3, 0xab, 0x1f, 0xea, 0xcd, 0xae, + 0x71, 0x62, 0x6f, 0x76, 0xa3, 0xb5, 0x56, 0xb3, 0xb4, 0xd6, 0x7a, 0x96, 0xab, 0x47, 0xa6, 0x65, + 0xda, 0x7c, 0x33, 0x9b, 0x36, 0x13, 0xbd, 0x33, 0xf1, 0xb1, 0xa1, 0xf0, 0xd4, 0xd5, 0x7a, 0xed, + 0x53, 0x57, 0x7b, 0xf4, 0xa9, 0xab, 0xfc, 0xb5, 0x04, 0xc6, 0xbe, 0x96, 0x5c, 0x81, 0xb9, 0xf0, + 0xc0, 0xb3, 0x49, 0x3f, 0xb9, 0x2d, 0x9c, 0x51, 0xcb, 0xce, 0x53, 0x73, 0x19, 0x71, 0xaa, 0x90, + 0x11, 0x49, 0xa4, 0xce, 0x66, 0x22, 0xb5, 0xe4, 0xa1, 0x63, 0xbe, 0xf4, 0xa1, 0xe3, 0x7f, 0xa6, + 0x84, 0x5a, 0x81, 0xe5, 0x71, 0xbe, 0x53, 0x29, 0xb9, 0xf6, 0x47, 0x80, 0xc5, 0x14, 0x95, 0x8b, + 0xbf, 0xd4, 0x26, 0xe8, 0x31, 0x2c, 0x6c, 0xea, 0xe7, 0xf9, 0xf8, 0x32, 0x15, 0x4d, 0x7a, 0xbf, + 0xe8, 0x5c, 0x2c, 0xef, 0x54, 0x42, 0xcc, 0x29, 0x64, 0xc3, 0xf9, 0x22, 0xc3, 0xf4, 0xa9, 0xe4, + 0xeb, 0x13, 0x38, 0x27, 0xa3, 0x5e, 0x27, 0xe2, 0x6a, 0x05, 0x3d, 0x83, 0xb9, 0xfc, 0x85, 0x3e, + 0xca, 0xc1, 0x94, 0xd2, 0x37, 0x86, 0x8e, 0x39, 0x69, 0x48, 0xa2, 0xff, 0x73, 0xe1, 0xd0, 0xdc, + 0xdd, 0x35, 0x32, 0xf3, 0x15, 0x7b, 0xd9, 0xed, 0x7f, 0xe7, 0x6b, 0x13, 0xc7, 0x24, 0xdc, 0x3f, + 0x80, 0x56, 0x7c, 0xd7, 0x9b, 0x37, 0x73, 0xe1, 0x06, 0xb8, 0xb3, 0x90, 0xe7, 0xb7, 0x1b, 0x9a, + 0x53, 0xe8, 0x23, 0x35, 0x79, 0x9d, 0xb1, 0x92, 0xc9, 0x99, 0x1b, 0xce, 0xce, 0xe9, 0x92, 0x5b, + 0x45, 0x73, 0x0a, 0x7d, 0x1b, 0x66, 0xc4, 0xd7, 0x8e, 0x7e, 0x18, 0x5f, 0xea, 0xaa, 0xdf, 0x61, + 0x74, 0xe3, 0xdf, 0x61, 0x74, 0xef, 0xba, 0x8c, 0x1f, 0x74, 0x4a, 0xae, 0xfd, 0x34, 0x83, 0xe7, + 0x30, 0xbb, 0x49, 0x78, 0x5a, 0xa5, 0xa3, 0xcb, 0x87, 0xba, 0xcb, 0xe8, 0x98, 0xc5, 0x61, 0xa3, + 0x85, 0xbe, 0x39, 0x85, 0x7e, 0x5d, 0x81, 0xd3, 0x9b, 0x84, 0x17, 0xeb, 0x5e, 0xf4, 0x6e, 0xb9, + 0x90, 0x31, 0xf5, 0x71, 0xe7, 0xd1, 0x51, 0xb3, 0x2b, 0xcf, 0xd6, 0x9c, 0x42, 0xbf, 0xa9, 0xc0, + 0xb9, 0x8c, 0x62, 0xd9, 0x42, 0x16, 0xdd, 0x98, 0xac, 0x5c, 0x49, 0xd1, 0xdb, 0xf9, 0xe4, 0x88, + 0xbf, 0x77, 0xc8, 0xb0, 0x34, 0xa7, 0xd0, 0x8e, 0xf4, 0x49, 0x8a, 0x5b, 0xd1, 0xa5, 0x52, 0x80, + 0x9a, 0x48, 0x5f, 0x1e, 0xd7, 0x9d, 0xf8, 0xe1, 0x13, 0x98, 0xd9, 0x24, 0x3c, 0x06, 0x50, 0xf9, + 0x48, 0x2b, 0x60, 0xdb, 0x7c, 0xaa, 0x16, 0x31, 0x97, 0x8c, 0x98, 0x45, 0xc5, 0x2b, 0x03, 0x12, + 0xf2, 0xb9, 0x5a, 0x8a, 0xa6, 0xf2, 0x11, 0x53, 0x8e, 0x31, 0xcc, 0x29, 0xf4, 0x02, 0x96, 0xca, + 0x37, 0x3d, 0xf4, 0xf6, 0xa1, 0x0f, 0xb5, 0xce, 0xb5, 0xc3, 0x0c, 0x8d, 0x45, 0x7e, 0xbc, 0xfe, + 0xd7, 0x57, 0xcb, 0x95, 0xbf, 0xbd, 0x5a, 0xae, 0xfc, 0xfb, 0xd5, 0x72, 0xe5, 0xfb, 0x37, 0x5f, + 0xf3, 0xbb, 0xa8, 0xcc, 0x4f, 0xad, 0x30, 0xa3, 0xb6, 0x43, 0x89, 0xc7, 0x7b, 0x4d, 0x99, 0x6f, + 0x37, 0xff, 0x13, 0x00, 0x00, 0xff, 0xff, 0xf0, 0xf8, 0x9b, 0x3a, 0x89, 0x25, 0x00, 0x00, } // Reference imports to suppress errors if they are not otherwise used. @@ -2379,6 +2617,8 @@ type RepoServerServiceClient interface { GetGitFiles(ctx context.Context, in *GitFilesRequest, opts ...grpc.CallOption) (*GitFilesResponse, error) // GetGitDirectories returns a set of directory paths for the given repo GetGitDirectories(ctx context.Context, in *GitDirectoriesRequest, opts ...grpc.CallOption) (*GitDirectoriesResponse, error) + // UpdateRevisionForPaths will compare two revisions and update the cache with the new revision if no changes are detected in the provided paths + UpdateRevisionForPaths(ctx context.Context, in *UpdateRevisionForPathsRequest, opts ...grpc.CallOption) (*UpdateRevisionForPathsResponse, error) } type repoServerServiceClient struct { @@ -2531,6 +2771,15 @@ func (c *repoServerServiceClient) GetGitDirectories(ctx context.Context, in *Git return out, nil } +func (c *repoServerServiceClient) UpdateRevisionForPaths(ctx context.Context, in *UpdateRevisionForPathsRequest, opts ...grpc.CallOption) (*UpdateRevisionForPathsResponse, error) { + out := new(UpdateRevisionForPathsResponse) + err := c.cc.Invoke(ctx, "/repository.RepoServerService/UpdateRevisionForPaths", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + // RepoServerServiceServer is the server API for RepoServerService service. type RepoServerServiceServer interface { // GenerateManifest generates manifest for application in specified repo name and revision @@ -2559,6 +2808,8 @@ type RepoServerServiceServer interface { GetGitFiles(context.Context, *GitFilesRequest) (*GitFilesResponse, error) // GetGitDirectories returns a set of directory paths for the given repo GetGitDirectories(context.Context, *GitDirectoriesRequest) (*GitDirectoriesResponse, error) + // UpdateRevisionForPaths will compare two revisions and update the cache with the new revision if no changes are detected in the provided paths + UpdateRevisionForPaths(context.Context, *UpdateRevisionForPathsRequest) (*UpdateRevisionForPathsResponse, error) } // UnimplementedRepoServerServiceServer can be embedded to have forward compatible implementations. @@ -2604,6 +2855,9 @@ func (*UnimplementedRepoServerServiceServer) GetGitFiles(ctx context.Context, re func (*UnimplementedRepoServerServiceServer) GetGitDirectories(ctx context.Context, req *GitDirectoriesRequest) (*GitDirectoriesResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method GetGitDirectories not implemented") } +func (*UnimplementedRepoServerServiceServer) UpdateRevisionForPaths(ctx context.Context, req *UpdateRevisionForPathsRequest) (*UpdateRevisionForPathsResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method UpdateRevisionForPaths not implemented") +} func RegisterRepoServerServiceServer(s *grpc.Server, srv RepoServerServiceServer) { s.RegisterService(&_RepoServerService_serviceDesc, srv) @@ -2851,6 +3105,24 @@ func _RepoServerService_GetGitDirectories_Handler(srv interface{}, ctx context.C return interceptor(ctx, in, info, handler) } +func _RepoServerService_UpdateRevisionForPaths_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UpdateRevisionForPathsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(RepoServerServiceServer).UpdateRevisionForPaths(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/repository.RepoServerService/UpdateRevisionForPaths", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(RepoServerServiceServer).UpdateRevisionForPaths(ctx, req.(*UpdateRevisionForPathsRequest)) + } + return interceptor(ctx, in, info, handler) +} + var _RepoServerService_serviceDesc = grpc.ServiceDesc{ ServiceName: "repository.RepoServerService", HandlerType: (*RepoServerServiceServer)(nil), @@ -2903,6 +3175,10 @@ var _RepoServerService_serviceDesc = grpc.ServiceDesc{ MethodName: "GetGitDirectories", Handler: _RepoServerService_GetGitDirectories_Handler, }, + { + MethodName: "UpdateRevisionForPaths", + Handler: _RepoServerService_UpdateRevisionForPaths_Handler, + }, }, Streams: []grpc.StreamDesc{ { @@ -2938,6 +3214,15 @@ func (m *ManifestRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { i -= len(m.XXX_unrecognized) copy(dAtA[i:], m.XXX_unrecognized) } + if len(m.InstallationID) > 0 { + i -= len(m.InstallationID) + copy(dAtA[i:], m.InstallationID) + i = encodeVarintRepository(dAtA, i, uint64(len(m.InstallationID))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xda + } if len(m.ProjectName) > 0 { i -= len(m.ProjectName) copy(dAtA[i:], m.ProjectName) @@ -3478,6 +3763,11 @@ func (m *ResolveRevisionRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) i -= len(m.XXX_unrecognized) copy(dAtA[i:], m.XXX_unrecognized) } + if m.SourceIndex != 0 { + i = encodeVarintRepository(dAtA, i, uint64(m.SourceIndex)) + i-- + dAtA[i] = 0x20 + } if len(m.AmbiguousRevision) > 0 { i -= len(m.AmbiguousRevision) copy(dAtA[i:], m.AmbiguousRevision) @@ -4695,6 +4985,16 @@ func (m *GitFilesRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { i -= len(m.XXX_unrecognized) copy(dAtA[i:], m.XXX_unrecognized) } + if m.VerifyCommit { + i-- + if m.VerifyCommit { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x38 + } if m.NoRevisionCache { i-- if m.NoRevisionCache { @@ -4826,6 +5126,16 @@ func (m *GitDirectoriesRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { i -= len(m.XXX_unrecognized) copy(dAtA[i:], m.XXX_unrecognized) } + if m.VerifyCommit { + i-- + if m.VerifyCommit { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x28 + } if m.NoRevisionCache { i-- if m.NoRevisionCache { @@ -4904,45 +5214,233 @@ func (m *GitDirectoriesResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) return len(dAtA) - i, nil } -func encodeVarintRepository(dAtA []byte, offset int, v uint64) int { - offset -= sovRepository(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ +func (m *UpdateRevisionForPathsRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - dAtA[offset] = uint8(v) - return base + return dAtA[:n], nil } -func (m *ManifestRequest) Size() (n int) { - if m == nil { - return 0 - } + +func (m *UpdateRevisionForPathsRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *UpdateRevisionForPathsRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - if m.Repo != nil { - l = m.Repo.Size() - n += 1 + l + sovRepository(uint64(l)) + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) } - l = len(m.Revision) - if l > 0 { - n += 1 + l + sovRepository(uint64(l)) + if len(m.InstallationID) > 0 { + i -= len(m.InstallationID) + copy(dAtA[i:], m.InstallationID) + i = encodeVarintRepository(dAtA, i, uint64(len(m.InstallationID))) + i-- + dAtA[i] = 0x7a } - if m.NoCache { - n += 2 + if len(m.Paths) > 0 { + for iNdEx := len(m.Paths) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Paths[iNdEx]) + copy(dAtA[i:], m.Paths[iNdEx]) + i = encodeVarintRepository(dAtA, i, uint64(len(m.Paths[iNdEx]))) + i-- + dAtA[i] = 0x6a + } } - l = len(m.AppLabelKey) - if l > 0 { - n += 1 + l + sovRepository(uint64(l)) + if len(m.Revision) > 0 { + i -= len(m.Revision) + copy(dAtA[i:], m.Revision) + i = encodeVarintRepository(dAtA, i, uint64(len(m.Revision))) + i-- + dAtA[i] = 0x62 } - l = len(m.AppName) - if l > 0 { - n += 1 + l + sovRepository(uint64(l)) + if len(m.SyncedRevision) > 0 { + i -= len(m.SyncedRevision) + copy(dAtA[i:], m.SyncedRevision) + i = encodeVarintRepository(dAtA, i, uint64(len(m.SyncedRevision))) + i-- + dAtA[i] = 0x5a } - l = len(m.Namespace) - if l > 0 { - n += 1 + l + sovRepository(uint64(l)) + if m.HasMultipleSources { + i-- + if m.HasMultipleSources { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x50 + } + if len(m.ApiVersions) > 0 { + for iNdEx := len(m.ApiVersions) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.ApiVersions[iNdEx]) + copy(dAtA[i:], m.ApiVersions[iNdEx]) + i = encodeVarintRepository(dAtA, i, uint64(len(m.ApiVersions[iNdEx]))) + i-- + dAtA[i] = 0x4a + } + } + if len(m.KubeVersion) > 0 { + i -= len(m.KubeVersion) + copy(dAtA[i:], m.KubeVersion) + i = encodeVarintRepository(dAtA, i, uint64(len(m.KubeVersion))) + i-- + dAtA[i] = 0x42 + } + if len(m.RefSources) > 0 { + for k := range m.RefSources { + v := m.RefSources[k] + baseI := i + if v != nil { + { + size, err := v.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintRepository(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarintRepository(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarintRepository(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x3a + } + } + if len(m.TrackingMethod) > 0 { + i -= len(m.TrackingMethod) + copy(dAtA[i:], m.TrackingMethod) + i = encodeVarintRepository(dAtA, i, uint64(len(m.TrackingMethod))) + i-- + dAtA[i] = 0x32 + } + if m.ApplicationSource != nil { + { + size, err := m.ApplicationSource.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintRepository(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + if len(m.Namespace) > 0 { + i -= len(m.Namespace) + copy(dAtA[i:], m.Namespace) + i = encodeVarintRepository(dAtA, i, uint64(len(m.Namespace))) + i-- + dAtA[i] = 0x22 + } + if len(m.AppName) > 0 { + i -= len(m.AppName) + copy(dAtA[i:], m.AppName) + i = encodeVarintRepository(dAtA, i, uint64(len(m.AppName))) + i-- + dAtA[i] = 0x1a + } + if len(m.AppLabelKey) > 0 { + i -= len(m.AppLabelKey) + copy(dAtA[i:], m.AppLabelKey) + i = encodeVarintRepository(dAtA, i, uint64(len(m.AppLabelKey))) + i-- + dAtA[i] = 0x12 + } + if m.Repo != nil { + { + size, err := m.Repo.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintRepository(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *UpdateRevisionForPathsResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *UpdateRevisionForPathsResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *UpdateRevisionForPathsResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + return len(dAtA) - i, nil +} + +func encodeVarintRepository(dAtA []byte, offset int, v uint64) int { + offset -= sovRepository(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *ManifestRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Repo != nil { + l = m.Repo.Size() + n += 1 + l + sovRepository(uint64(l)) + } + l = len(m.Revision) + if l > 0 { + n += 1 + l + sovRepository(uint64(l)) + } + if m.NoCache { + n += 2 + } + l = len(m.AppLabelKey) + if l > 0 { + n += 1 + l + sovRepository(uint64(l)) + } + l = len(m.AppName) + if l > 0 { + n += 1 + l + sovRepository(uint64(l)) + } + l = len(m.Namespace) + if l > 0 { + n += 1 + l + sovRepository(uint64(l)) } if m.ApplicationSource != nil { l = m.ApplicationSource.Size() @@ -5028,6 +5526,10 @@ func (m *ManifestRequest) Size() (n int) { if l > 0 { n += 2 + l + sovRepository(uint64(l)) } + l = len(m.InstallationID) + if l > 0 { + n += 2 + l + sovRepository(uint64(l)) + } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } @@ -5169,6 +5671,9 @@ func (m *ResolveRevisionRequest) Size() (n int) { if l > 0 { n += 1 + l + sovRepository(uint64(l)) } + if m.SourceIndex != 0 { + n += 1 + sovRepository(uint64(m.SourceIndex)) + } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } @@ -5725,6 +6230,9 @@ func (m *GitFilesRequest) Size() (n int) { if m.NoRevisionCache { n += 2 } + if m.VerifyCommit { + n += 2 + } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } @@ -5775,6 +6283,9 @@ func (m *GitDirectoriesRequest) Size() (n int) { if m.NoRevisionCache { n += 2 } + if m.VerifyCommit { + n += 2 + } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } @@ -5799,41 +6310,133 @@ func (m *GitDirectoriesResponse) Size() (n int) { return n } -func sovRepository(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozRepository(x uint64) (n int) { - return sovRepository(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *ManifestRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRepository - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break +func (m *UpdateRevisionForPathsRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Repo != nil { + l = m.Repo.Size() + n += 1 + l + sovRepository(uint64(l)) + } + l = len(m.AppLabelKey) + if l > 0 { + n += 1 + l + sovRepository(uint64(l)) + } + l = len(m.AppName) + if l > 0 { + n += 1 + l + sovRepository(uint64(l)) + } + l = len(m.Namespace) + if l > 0 { + n += 1 + l + sovRepository(uint64(l)) + } + if m.ApplicationSource != nil { + l = m.ApplicationSource.Size() + n += 1 + l + sovRepository(uint64(l)) + } + l = len(m.TrackingMethod) + if l > 0 { + n += 1 + l + sovRepository(uint64(l)) + } + if len(m.RefSources) > 0 { + for k, v := range m.RefSources { + _ = k + _ = v + l = 0 + if v != nil { + l = v.Size() + l += 1 + sovRepository(uint64(l)) } + mapEntrySize := 1 + len(k) + sovRepository(uint64(len(k))) + l + n += mapEntrySize + 1 + sovRepository(uint64(mapEntrySize)) } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ManifestRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ManifestRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + l = len(m.KubeVersion) + if l > 0 { + n += 1 + l + sovRepository(uint64(l)) + } + if len(m.ApiVersions) > 0 { + for _, s := range m.ApiVersions { + l = len(s) + n += 1 + l + sovRepository(uint64(l)) } - switch fieldNum { + } + if m.HasMultipleSources { + n += 2 + } + l = len(m.SyncedRevision) + if l > 0 { + n += 1 + l + sovRepository(uint64(l)) + } + l = len(m.Revision) + if l > 0 { + n += 1 + l + sovRepository(uint64(l)) + } + if len(m.Paths) > 0 { + for _, s := range m.Paths { + l = len(s) + n += 1 + l + sovRepository(uint64(l)) + } + } + l = len(m.InstallationID) + if l > 0 { + n += 1 + l + sovRepository(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *UpdateRevisionForPathsResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func sovRepository(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozRepository(x uint64) (n int) { + return sovRepository(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *ManifestRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRepository + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ManifestRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ManifestRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { case 1: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Repo", wireType) @@ -6692,6 +7295,38 @@ func (m *ManifestRequest) Unmarshal(dAtA []byte) error { } m.ProjectName = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex + case 27: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field InstallationID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRepository + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRepository + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthRepository + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.InstallationID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipRepository(dAtA[iNdEx:]) @@ -7348,6 +7983,25 @@ func (m *ResolveRevisionRequest) Unmarshal(dAtA []byte) error { } m.AmbiguousRevision = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field SourceIndex", wireType) + } + m.SourceIndex = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRepository + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.SourceIndex |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } default: iNdEx = preIndex skippy, err := skipRepository(dAtA[iNdEx:]) @@ -10936,6 +11590,26 @@ func (m *GitFilesRequest) Unmarshal(dAtA []byte) error { } } m.NoRevisionCache = bool(v != 0) + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field VerifyCommit", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRepository + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.VerifyCommit = bool(v != 0) default: iNdEx = preIndex skippy, err := skipRepository(dAtA[iNdEx:]) @@ -11274,6 +11948,26 @@ func (m *GitDirectoriesRequest) Unmarshal(dAtA []byte) error { } } m.NoRevisionCache = bool(v != 0) + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field VerifyCommit", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRepository + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.VerifyCommit = bool(v != 0) default: iNdEx = preIndex skippy, err := skipRepository(dAtA[iNdEx:]) @@ -11379,6 +12073,649 @@ func (m *GitDirectoriesResponse) Unmarshal(dAtA []byte) error { } return nil } +func (m *UpdateRevisionForPathsRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRepository + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: UpdateRevisionForPathsRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: UpdateRevisionForPathsRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Repo", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRepository + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRepository + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthRepository + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Repo == nil { + m.Repo = &v1alpha1.Repository{} + } + if err := m.Repo.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AppLabelKey", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRepository + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRepository + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthRepository + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AppLabelKey = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AppName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRepository + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRepository + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthRepository + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AppName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRepository + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRepository + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthRepository + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Namespace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ApplicationSource", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRepository + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRepository + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthRepository + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ApplicationSource == nil { + m.ApplicationSource = &v1alpha1.ApplicationSource{} + } + if err := m.ApplicationSource.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TrackingMethod", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRepository + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRepository + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthRepository + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TrackingMethod = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RefSources", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRepository + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRepository + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthRepository + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.RefSources == nil { + m.RefSources = make(map[string]*v1alpha1.RefTarget) + } + var mapkey string + var mapvalue *v1alpha1.RefTarget + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRepository + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRepository + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthRepository + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthRepository + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRepository + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthRepository + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthRepository + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &v1alpha1.RefTarget{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipRepository(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthRepository + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.RefSources[mapkey] = mapvalue + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field KubeVersion", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRepository + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRepository + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthRepository + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.KubeVersion = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ApiVersions", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRepository + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRepository + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthRepository + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ApiVersions = append(m.ApiVersions, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 10: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field HasMultipleSources", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRepository + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.HasMultipleSources = bool(v != 0) + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SyncedRevision", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRepository + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRepository + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthRepository + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SyncedRevision = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 12: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Revision", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRepository + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRepository + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthRepository + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Revision = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 13: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Paths", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRepository + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRepository + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthRepository + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Paths = append(m.Paths, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 15: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field InstallationID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRepository + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRepository + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthRepository + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.InstallationID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRepository(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthRepository + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *UpdateRevisionForPathsResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRepository + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: UpdateRevisionForPathsResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: UpdateRevisionForPathsResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipRepository(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthRepository + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func skipRepository(dAtA []byte) (n int, err error) { l := len(dAtA) iNdEx := 0 diff --git a/vendor/github.com/argoproj/argo-cd/v2/util/cache/cache.go b/vendor/github.com/argoproj/argo-cd/v2/util/cache/cache.go index fdea46cdea..fb1d20c5b7 100644 --- a/vendor/github.com/argoproj/argo-cd/v2/util/cache/cache.go +++ b/vendor/github.com/argoproj/argo-cd/v2/util/cache/cache.go @@ -2,15 +2,16 @@ package cache import ( "context" + "crypto/tls" + "crypto/x509" "fmt" "math" "os" + "strings" "time" - "crypto/tls" - "crypto/x509" - "github.com/redis/go-redis/v9" + log "github.com/sirupsen/logrus" "github.com/spf13/cobra" "github.com/argoproj/argo-cd/v2/common" @@ -27,6 +28,15 @@ const ( envRedisRetryCount = "REDIS_RETRY_COUNT" // defaultRedisRetryCount holds default number of retries defaultRedisRetryCount = 3 + // envRedisSentinelPassword is an env variable name which stores redis sentinel password + envRedisSentinelPassword = "REDIS_SENTINEL_PASSWORD" + // envRedisSentinelUsername is an env variable name which stores redis sentinel username + envRedisSentinelUsername = "REDIS_SENTINEL_USERNAME" +) + +const ( + // CLIFlagRedisCompress is a cli flag name to define the redis compression setting for data sent to redis + CLIFlagRedisCompress = "redis-compress" ) func NewCache(client CacheClient) *Cache { @@ -52,28 +62,74 @@ func buildRedisClient(redisAddress, password, username string, redisDB, maxRetri return client } -func buildFailoverRedisClient(sentinelMaster, password, username string, redisDB, maxRetries int, tlsConfig *tls.Config, sentinelAddresses []string) *redis.Client { +func buildFailoverRedisClient(sentinelMaster, sentinelUsername, sentinelPassword, password, username string, redisDB, maxRetries int, tlsConfig *tls.Config, sentinelAddresses []string) *redis.Client { opts := &redis.FailoverOptions{ - MasterName: sentinelMaster, - SentinelAddrs: sentinelAddresses, - DB: redisDB, - Password: password, - MaxRetries: maxRetries, - TLSConfig: tlsConfig, - Username: username, + MasterName: sentinelMaster, + SentinelAddrs: sentinelAddresses, + DB: redisDB, + Password: password, + MaxRetries: maxRetries, + TLSConfig: tlsConfig, + Username: username, + SentinelUsername: sentinelUsername, + SentinelPassword: sentinelPassword, } client := redis.NewFailoverClient(opts) client.AddHook(redis.Hook(NewArgoRedisHook(func() { - *client = *buildFailoverRedisClient(sentinelMaster, password, username, redisDB, maxRetries, tlsConfig, sentinelAddresses) + *client = *buildFailoverRedisClient(sentinelMaster, sentinelUsername, sentinelPassword, password, username, redisDB, maxRetries, tlsConfig, sentinelAddresses) }))) return client } +type Options struct { + FlagPrefix string + OnClientCreated func(client *redis.Client) +} + +func (o *Options) callOnClientCreated(client *redis.Client) { + if o.OnClientCreated != nil { + o.OnClientCreated(client) + } +} + +func (o *Options) getEnvPrefix() string { + return strings.ReplaceAll(strings.ToUpper(o.FlagPrefix), "-", "_") +} + +func mergeOptions(opts ...Options) Options { + var result Options + for _, o := range opts { + if o.FlagPrefix != "" { + result.FlagPrefix = o.FlagPrefix + } + if o.OnClientCreated != nil { + result.OnClientCreated = o.OnClientCreated + } + } + return result +} + +func getFlagVal[T any](cmd *cobra.Command, o Options, name string, getVal func(name string) (T, error)) func() T { + return func() T { + var res T + var err error + if o.FlagPrefix != "" && cmd.Flags().Changed(o.FlagPrefix+name) { + res, err = getVal(o.FlagPrefix + name) + } else { + res, err = getVal(name) + } + if err != nil { + panic(err) + } + return res + } +} + // AddCacheFlagsToCmd adds flags which control caching to the specified command -func AddCacheFlagsToCmd(cmd *cobra.Command, opts ...func(client *redis.Client)) func() (*Cache, error) { +func AddCacheFlagsToCmd(cmd *cobra.Command, opts ...Options) func() (*Cache, error) { redisAddress := "" sentinelAddresses := make([]string, 0) sentinelMaster := "" @@ -84,20 +140,44 @@ func AddCacheFlagsToCmd(cmd *cobra.Command, opts ...func(client *redis.Client)) redisUseTLS := false insecureRedis := false compressionStr := "" + opt := mergeOptions(opts...) var defaultCacheExpiration time.Duration - cmd.Flags().StringVar(&redisAddress, "redis", env.StringFromEnv("REDIS_SERVER", ""), "Redis server hostname and port (e.g. argocd-redis:6379). ") - cmd.Flags().IntVar(&redisDB, "redisdb", env.ParseNumFromEnv("REDISDB", 0, 0, math.MaxInt32), "Redis database.") - cmd.Flags().StringArrayVar(&sentinelAddresses, "sentinel", []string{}, "Redis sentinel hostname and port (e.g. argocd-redis-ha-announce-0:6379). ") - cmd.Flags().StringVar(&sentinelMaster, "sentinelmaster", "master", "Redis sentinel master group name.") - cmd.Flags().DurationVar(&defaultCacheExpiration, "default-cache-expiration", env.ParseDurationFromEnv("ARGOCD_DEFAULT_CACHE_EXPIRATION", 24*time.Hour, 0, math.MaxInt64), "Cache expiration default") - cmd.Flags().BoolVar(&redisUseTLS, "redis-use-tls", false, "Use TLS when connecting to Redis. ") - cmd.Flags().StringVar(&redisClientCertificate, "redis-client-certificate", "", "Path to Redis client certificate (e.g. /etc/certs/redis/client.crt).") - cmd.Flags().StringVar(&redisClientKey, "redis-client-key", "", "Path to Redis client key (e.g. /etc/certs/redis/client.crt).") - cmd.Flags().BoolVar(&insecureRedis, "redis-insecure-skip-tls-verify", false, "Skip Redis server certificate validation.") - cmd.Flags().StringVar(&redisCACertificate, "redis-ca-certificate", "", "Path to Redis server CA certificate (e.g. /etc/certs/redis/ca.crt). If not specified, system trusted CAs will be used for server certificate validation.") - cmd.Flags().StringVar(&compressionStr, "redis-compress", env.StringFromEnv("REDIS_COMPRESSION", string(RedisCompressionGZip)), "Enable compression for data sent to Redis with the required compression algorithm. (possible values: gzip, none)") + cmd.Flags().StringVar(&redisAddress, opt.FlagPrefix+"redis", env.StringFromEnv(opt.getEnvPrefix()+"REDIS_SERVER", ""), "Redis server hostname and port (e.g. argocd-redis:6379). ") + redisAddressSrc := getFlagVal(cmd, opt, "redis", cmd.Flags().GetString) + cmd.Flags().IntVar(&redisDB, opt.FlagPrefix+"redisdb", env.ParseNumFromEnv(opt.getEnvPrefix()+"REDISDB", 0, 0, math.MaxInt32), "Redis database.") + redisDBSrc := getFlagVal(cmd, opt, "redisdb", cmd.Flags().GetInt) + cmd.Flags().StringArrayVar(&sentinelAddresses, opt.FlagPrefix+"sentinel", []string{}, "Redis sentinel hostname and port (e.g. argocd-redis-ha-announce-0:6379). ") + sentinelAddressesSrc := getFlagVal(cmd, opt, "sentinel", cmd.Flags().GetStringArray) + cmd.Flags().StringVar(&sentinelMaster, opt.FlagPrefix+"sentinelmaster", "master", "Redis sentinel master group name.") + sentinelMasterSrc := getFlagVal(cmd, opt, "sentinelmaster", cmd.Flags().GetString) + cmd.Flags().DurationVar(&defaultCacheExpiration, opt.FlagPrefix+"default-cache-expiration", env.ParseDurationFromEnv("ARGOCD_DEFAULT_CACHE_EXPIRATION", 24*time.Hour, 0, math.MaxInt64), "Cache expiration default") + defaultCacheExpirationSrc := getFlagVal(cmd, opt, "default-cache-expiration", cmd.Flags().GetDuration) + cmd.Flags().BoolVar(&redisUseTLS, opt.FlagPrefix+"redis-use-tls", false, "Use TLS when connecting to Redis. ") + redisUseTLSSrc := getFlagVal(cmd, opt, "redis-use-tls", cmd.Flags().GetBool) + cmd.Flags().StringVar(&redisClientCertificate, opt.FlagPrefix+"redis-client-certificate", "", "Path to Redis client certificate (e.g. /etc/certs/redis/client.crt).") + redisClientCertificateSrc := getFlagVal(cmd, opt, "redis-client-certificate", cmd.Flags().GetString) + cmd.Flags().StringVar(&redisClientKey, opt.FlagPrefix+"redis-client-key", "", "Path to Redis client key (e.g. /etc/certs/redis/client.crt).") + redisClientKeySrc := getFlagVal(cmd, opt, "redis-client-key", cmd.Flags().GetString) + cmd.Flags().BoolVar(&insecureRedis, opt.FlagPrefix+"redis-insecure-skip-tls-verify", false, "Skip Redis server certificate validation.") + insecureRedisSrc := getFlagVal(cmd, opt, "redis-insecure-skip-tls-verify", cmd.Flags().GetBool) + cmd.Flags().StringVar(&redisCACertificate, opt.FlagPrefix+"redis-ca-certificate", "", "Path to Redis server CA certificate (e.g. /etc/certs/redis/ca.crt). If not specified, system trusted CAs will be used for server certificate validation.") + redisCACertificateSrc := getFlagVal(cmd, opt, "redis-ca-certificate", cmd.Flags().GetString) + cmd.Flags().StringVar(&compressionStr, opt.FlagPrefix+CLIFlagRedisCompress, env.StringFromEnv(opt.getEnvPrefix()+"REDIS_COMPRESSION", string(RedisCompressionGZip)), "Enable compression for data sent to Redis with the required compression algorithm. (possible values: gzip, none)") + compressionStrSrc := getFlagVal(cmd, opt, CLIFlagRedisCompress, cmd.Flags().GetString) return func() (*Cache, error) { + redisAddress := redisAddressSrc() + redisDB := redisDBSrc() + sentinelAddresses := sentinelAddressesSrc() + sentinelMaster := sentinelMasterSrc() + defaultCacheExpiration := defaultCacheExpirationSrc() + redisUseTLS := redisUseTLSSrc() + redisClientCertificate := redisClientCertificateSrc() + redisClientKey := redisClientKeySrc() + insecureRedis := insecureRedisSrc() + redisCACertificate := redisCACertificateSrc() + compressionStr := compressionStrSrc() + var tlsConfig *tls.Config = nil if redisUseTLS { tlsConfig = &tls.Config{} @@ -126,16 +206,31 @@ func AddCacheFlagsToCmd(cmd *cobra.Command, opts ...func(client *redis.Client)) } password := os.Getenv(envRedisPassword) username := os.Getenv(envRedisUsername) + sentinelUsername := os.Getenv(envRedisSentinelUsername) + sentinelPassword := os.Getenv(envRedisSentinelPassword) + if opt.FlagPrefix != "" { + if val := os.Getenv(opt.getEnvPrefix() + envRedisUsername); val != "" { + username = val + } + if val := os.Getenv(opt.getEnvPrefix() + envRedisPassword); val != "" { + password = val + } + if val := os.Getenv(opt.getEnvPrefix() + envRedisSentinelUsername); val != "" { + sentinelUsername = val + } + if val := os.Getenv(opt.getEnvPrefix() + envRedisSentinelPassword); val != "" { + sentinelPassword = val + } + } + maxRetries := env.ParseNumFromEnv(envRedisRetryCount, defaultRedisRetryCount, 0, math.MaxInt32) compression, err := CompressionTypeFromString(compressionStr) if err != nil { return nil, err } if len(sentinelAddresses) > 0 { - client := buildFailoverRedisClient(sentinelMaster, password, username, redisDB, maxRetries, tlsConfig, sentinelAddresses) - for i := range opts { - opts[i](client) - } + client := buildFailoverRedisClient(sentinelMaster, sentinelUsername, sentinelPassword, password, username, redisDB, maxRetries, tlsConfig, sentinelAddresses) + opt.callOnClientCreated(client) return NewCache(NewRedisCache(client, defaultCacheExpiration, compression)), nil } if redisAddress == "" { @@ -143,9 +238,7 @@ func AddCacheFlagsToCmd(cmd *cobra.Command, opts ...func(client *redis.Client)) } client := buildRedisClient(redisAddress, password, username, redisDB, maxRetries, tlsConfig) - for i := range opts { - opts[i](client) - } + opt.callOnClientCreated(client) return NewCache(NewRedisCache(client, defaultCacheExpiration, compression)), nil } } @@ -163,30 +256,47 @@ func (c *Cache) SetClient(client CacheClient) { c.client = client } -func (c *Cache) SetItem(key string, item interface{}, expiration time.Duration, delete bool) error { - key = fmt.Sprintf("%s|%s", key, common.CacheVersion) - if delete { - return c.client.Delete(key) +func (c *Cache) RenameItem(oldKey string, newKey string, expiration time.Duration) error { + return c.client.Rename(fmt.Sprintf("%s|%s", oldKey, common.CacheVersion), fmt.Sprintf("%s|%s", newKey, common.CacheVersion), expiration) +} + +func (c *Cache) generateFullKey(key string) string { + if key == "" { + log.Debug("Cache key is empty, this will result in key collisions if there is more than one empty key") + } + return fmt.Sprintf("%s|%s", key, common.CacheVersion) +} + +// Sets or deletes an item in cache +func (c *Cache) SetItem(key string, item interface{}, opts *CacheActionOpts) error { + if item == nil { + return fmt.Errorf("cannot set nil item in cache") + } + if opts == nil { + opts = &CacheActionOpts{} + } + fullKey := c.generateFullKey(key) + client := c.GetClient() + if opts.Delete { + return client.Delete(fullKey) } else { - if item == nil { - return fmt.Errorf("cannot set item to nil for key %s", key) - } - return c.client.Set(&Item{Object: item, Key: key, Expiration: expiration}) + return client.Set(&Item{Key: fullKey, Object: item, CacheActionOpts: *opts}) } } func (c *Cache) GetItem(key string, item interface{}) error { + key = c.generateFullKey(key) if item == nil { return fmt.Errorf("cannot get item into a nil for key %s", key) } - key = fmt.Sprintf("%s|%s", key, common.CacheVersion) - return c.client.Get(key, item) + client := c.GetClient() + return client.Get(key, item) } func (c *Cache) OnUpdated(ctx context.Context, key string, callback func() error) error { - return c.client.OnUpdated(ctx, fmt.Sprintf("%s|%s", key, common.CacheVersion), callback) + return c.client.OnUpdated(ctx, c.generateFullKey(key), callback) } func (c *Cache) NotifyUpdated(key string) error { - return c.client.NotifyUpdated(fmt.Sprintf("%s|%s", key, common.CacheVersion)) + return c.client.NotifyUpdated(c.generateFullKey(key)) } diff --git a/vendor/github.com/argoproj/argo-cd/v2/util/cache/client.go b/vendor/github.com/argoproj/argo-cd/v2/util/cache/client.go index 434c2a8da1..f5bb7b9427 100644 --- a/vendor/github.com/argoproj/argo-cd/v2/util/cache/client.go +++ b/vendor/github.com/argoproj/argo-cd/v2/util/cache/client.go @@ -6,17 +6,30 @@ import ( "time" ) -var ErrCacheMiss = errors.New("cache: key is missing") +var ( + ErrCacheMiss = errors.New("cache: key is missing") + ErrCacheKeyLocked = errors.New("cache: key is locked") + CacheLockedValue = "locked" +) type Item struct { - Key string - Object interface{} + Key string + Object interface{} + CacheActionOpts CacheActionOpts +} + +type CacheActionOpts struct { + // Delete item from cache + Delete bool + // Disable writing if key already exists (NX) + DisableOverwrite bool // Expiration is the cache expiration time. Expiration time.Duration } type CacheClient interface { Set(item *Item) error + Rename(oldKey string, newKey string, expiration time.Duration) error Get(key string, obj interface{}) error Delete(key string) error OnUpdated(ctx context.Context, key string, callback func() error) error diff --git a/vendor/github.com/argoproj/argo-cd/v2/util/cache/inmemory.go b/vendor/github.com/argoproj/argo-cd/v2/util/cache/inmemory.go index 53e690925d..c46b02b942 100644 --- a/vendor/github.com/argoproj/argo-cd/v2/util/cache/inmemory.go +++ b/vendor/github.com/argoproj/argo-cd/v2/util/cache/inmemory.go @@ -16,7 +16,11 @@ func NewInMemoryCache(expiration time.Duration) *InMemoryCache { } } -// compile-time validation of adherance of the CacheClient contract +func init() { + gob.Register([]interface{}{}) +} + +// compile-time validation of adherence of the CacheClient contract var _ CacheClient = &InMemoryCache{} type InMemoryCache struct { @@ -29,7 +33,22 @@ func (i *InMemoryCache) Set(item *Item) error { if err != nil { return err } - i.memCache.Set(item.Key, buf, item.Expiration) + if item.CacheActionOpts.DisableOverwrite { + // go-redis doesn't throw an error on Set with NX, so absorbing here to keep the interface consistent + _ = i.memCache.Add(item.Key, buf, item.CacheActionOpts.Expiration) + } else { + i.memCache.Set(item.Key, buf, item.CacheActionOpts.Expiration) + } + return nil +} + +func (i *InMemoryCache) Rename(oldKey string, newKey string, expiration time.Duration) error { + bufIf, found := i.memCache.Get(oldKey) + if !found { + return ErrCacheMiss + } + i.memCache.Set(newKey, bufIf, expiration) + i.memCache.Delete(oldKey) return nil } @@ -81,11 +100,9 @@ func (i *InMemoryCache) NotifyUpdated(key string) error { // Items return a list of items in the cache; requires passing a constructor function // so that the items can be decoded from gob format. func (i *InMemoryCache) Items(createNewObject func() interface{}) (map[string]interface{}, error) { - result := map[string]interface{}{} for key, value := range i.memCache.Items() { - buf := value.Object.(bytes.Buffer) obj := createNewObject() err := gob.NewDecoder(&buf).Decode(obj) @@ -94,7 +111,6 @@ func (i *InMemoryCache) Items(createNewObject func() interface{}) (map[string]in } result[key] = obj - } return result, nil diff --git a/vendor/github.com/argoproj/argo-cd/v2/util/cache/redis.go b/vendor/github.com/argoproj/argo-cd/v2/util/cache/redis.go index f483d2cbec..c8d1307f2b 100644 --- a/vendor/github.com/argoproj/argo-cd/v2/util/cache/redis.go +++ b/vendor/github.com/argoproj/argo-cd/v2/util/cache/redis.go @@ -5,6 +5,7 @@ import ( "compress/gzip" "context" "encoding/json" + "errors" "fmt" "io" "net" @@ -42,7 +43,7 @@ func NewRedisCache(client *redis.Client, expiration time.Duration, compressionTy } } -// compile-time validation of adherance of the CacheClient contract +// compile-time validation of adherence of the CacheClient contract var _ CacheClient = &redisCache{} type redisCache struct { @@ -96,8 +97,17 @@ func (r *redisCache) unmarshal(data []byte, obj interface{}) error { return nil } +func (r *redisCache) Rename(oldKey string, newKey string, _ time.Duration) error { + err := r.client.Rename(context.TODO(), r.getKey(oldKey), r.getKey(newKey)).Err() + if err != nil && err.Error() == "ERR no such key" { + err = ErrCacheMiss + } + + return err +} + func (r *redisCache) Set(item *Item) error { - expiration := item.Expiration + expiration := item.CacheActionOpts.Expiration if expiration == 0 { expiration = r.expiration } @@ -111,13 +121,14 @@ func (r *redisCache) Set(item *Item) error { Key: r.getKey(item.Key), Value: val, TTL: expiration, + SetNX: item.CacheActionOpts.DisableOverwrite, }) } func (r *redisCache) Get(key string, obj interface{}) error { var data []byte err := r.cache.Get(context.TODO(), r.getKey(key), &data) - if err == rediscache.ErrCacheMiss { + if errors.Is(err, rediscache.ErrCacheMiss) { err = ErrCacheMiss } if err != nil { @@ -172,7 +183,7 @@ func (rh *redisHook) ProcessHook(next redis.ProcessHook) redis.ProcessHook { startTime := time.Now() err := next(ctx, cmd) - rh.registry.IncRedisRequest(err != nil && err != redis.Nil) + rh.registry.IncRedisRequest(err != nil && !errors.Is(err, redis.Nil)) rh.registry.ObserveRedisRequestDuration(time.Since(startTime)) return err diff --git a/vendor/github.com/argoproj/argo-cd/v2/util/cache/twolevelclient.go b/vendor/github.com/argoproj/argo-cd/v2/util/cache/twolevelclient.go index 14a4279e87..f221099844 100644 --- a/vendor/github.com/argoproj/argo-cd/v2/util/cache/twolevelclient.go +++ b/vendor/github.com/argoproj/argo-cd/v2/util/cache/twolevelclient.go @@ -18,6 +18,14 @@ type twoLevelClient struct { externalCache CacheClient } +func (c *twoLevelClient) Rename(oldKey string, newKey string, expiration time.Duration) error { + err := c.inMemoryCache.Rename(oldKey, newKey, expiration) + if err != nil { + log.Warnf("Failed to move key '%s' in in-memory cache: %v", oldKey, err) + } + return c.externalCache.Rename(oldKey, newKey, expiration) +} + // Set stores the given value in both in-memory and external cache. // Skip storing the value in external cache if the same value already exists in memory to avoid requesting external cache. func (c *twoLevelClient) Set(item *Item) error { diff --git a/vendor/github.com/argoproj/argo-cd/v2/util/cert/cert.go b/vendor/github.com/argoproj/argo-cd/v2/util/cert/cert.go index 3826c72b7d..4fe3929966 100644 --- a/vendor/github.com/argoproj/argo-cd/v2/util/cert/cert.go +++ b/vendor/github.com/argoproj/argo-cd/v2/util/cert/cert.go @@ -271,8 +271,8 @@ func TokenizedDataToPublicKey(hostname string, subType string, rawKeyData string // Returns the requested pattern with all possible square brackets escaped func nonBracketedPattern(pattern string) string { - ret := strings.Replace(pattern, "[", `\[`, -1) - return strings.Replace(ret, "]", `\]`, -1) + ret := strings.ReplaceAll(pattern, "[", `\[`) + return strings.ReplaceAll(ret, "]", `\]`) } // We do not use full fledged regular expression for matching the hostname. diff --git a/vendor/github.com/argoproj/argo-cd/v2/util/config/env.go b/vendor/github.com/argoproj/argo-cd/v2/util/config/env.go index b6679bca7e..d2007fba6a 100644 --- a/vendor/github.com/argoproj/argo-cd/v2/util/config/env.go +++ b/vendor/github.com/argoproj/argo-cd/v2/util/config/env.go @@ -1,8 +1,10 @@ package config import ( + "encoding/csv" "errors" "os" + "strconv" "strings" "github.com/kballard/go-shellquote" @@ -46,8 +48,8 @@ func loadFlags() error { // pkg shellquota doesn't recognize `=` so that the opts in format `foo=bar` could not work. // issue ref: https://github.com/argoproj/argo-cd/issues/6822 for k, v := range flags { - if strings.Contains(k, "=") && strings.Count(k, "=") == 1 && v == "true" { - kv := strings.Split(k, "=") + if strings.Contains(k, "=") && v == "true" { + kv := strings.SplitN(k, "=", 2) actualKey, actualValue := kv[0], kv[1] if _, ok := flags[actualKey]; !ok { flags[actualKey] = actualValue @@ -68,3 +70,34 @@ func GetFlag(key, fallback string) string { func GetBoolFlag(key string) bool { return GetFlag(key, "false") == "true" } + +func GetIntFlag(key string, fallback int) int { + val, ok := flags[key] + if !ok { + return fallback + } + + v, err := strconv.Atoi(val) + if err != nil { + log.Fatal(err) + } + return v +} + +func GetStringSliceFlag(key string, fallback []string) []string { + val, ok := flags[key] + if !ok { + return fallback + } + + if val == "" { + return []string{} + } + stringReader := strings.NewReader(val) + csvReader := csv.NewReader(stringReader) + v, err := csvReader.Read() + if err != nil { + log.Fatal(err) + } + return v +} diff --git a/vendor/github.com/argoproj/argo-cd/v2/util/config/reader.go b/vendor/github.com/argoproj/argo-cd/v2/util/config/reader.go index 61df13b0a3..9674a2ef5b 100644 --- a/vendor/github.com/argoproj/argo-cd/v2/util/config/reader.go +++ b/vendor/github.com/argoproj/argo-cd/v2/util/config/reader.go @@ -38,7 +38,7 @@ func unmarshalObject(data []byte, obj interface{}) error { func MarshalLocalYAMLFile(path string, obj interface{}) error { yamlData, err := yaml.Marshal(obj) if err == nil { - err = os.WriteFile(path, yamlData, 0600) + err = os.WriteFile(path, yamlData, 0o600) } return err } diff --git a/vendor/github.com/argoproj/argo-cd/v2/util/env/env.go b/vendor/github.com/argoproj/argo-cd/v2/util/env/env.go index 6808f59d2d..e9c2ff41d3 100644 --- a/vendor/github.com/argoproj/argo-cd/v2/util/env/env.go +++ b/vendor/github.com/argoproj/argo-cd/v2/util/env/env.go @@ -96,6 +96,33 @@ func ParseFloatFromEnv(env string, defaultValue, min, max float32) float32 { return float32(num) } +// Helper function to parse a float64 from an environment variable. Returns a +// default if env is not set, is not parseable to a number, exceeds max (if +// max is greater than 0) or is less than min (and min is greater than 0). +// +// nolint:unparam +func ParseFloat64FromEnv(env string, defaultValue, min, max float64) float64 { + str := os.Getenv(env) + if str == "" { + return defaultValue + } + + num, err := strconv.ParseFloat(str, 64) + if err != nil { + log.Warnf("Could not parse '%s' as a float32 from environment %s", str, env) + return defaultValue + } + if num < min { + log.Warnf("Value in %s is %f, which is less than minimum %f allowed", env, num, min) + return defaultValue + } + if num > max { + log.Warnf("Value in %s is %f, which is greater than maximum %f allowed", env, num, max) + return defaultValue + } + return num +} + // Helper function to parse a time duration from an environment variable. Returns a // default if env is not set, is not parseable to a duration, exceeds max (if // max is greater than 0) or is less than min. @@ -141,7 +168,7 @@ func StringFromEnv(env string, defaultValue string, opts ...StringFromEnvOpts) s } // StringsFromEnv parses given value from the environment as a list of strings, -// using seperator as the delimeter, and returns them as a slice. The strings +// using separator as the delimeter, and returns them as a slice. The strings // in the returned slice will have leading and trailing white space removed. func StringsFromEnv(env string, defaultValue []string, separator string) []string { if str := os.Getenv(env); str != "" { @@ -168,3 +195,30 @@ func ParseBoolFromEnv(envVar string, defaultValue bool) bool { } return defaultValue } + +// ParseStringToStringVar parses given value from the environment as a map of string. +// Returns default value if envVar is not set. +func ParseStringToStringFromEnv(envVar string, defaultValue map[string]string, separator string) map[string]string { + str := os.Getenv(envVar) + str = strings.TrimSpace(str) + if str == "" { + return defaultValue + } + + parsed := make(map[string]string) + for _, pair := range strings.Split(str, separator) { + keyvalue := strings.Split(pair, "=") + if len(keyvalue) != 2 { + log.Warnf("Invalid key-value pair when parsing environment '%s' as a string map", str) + return defaultValue + } + key := strings.TrimSpace(keyvalue[0]) + value := strings.TrimSpace(keyvalue[1]) + if _, ok := parsed[key]; ok { + log.Warnf("Duplicate key '%s' when parsing environment '%s' as a string map", key, str) + return defaultValue + } + parsed[key] = value + } + return parsed +} diff --git a/vendor/github.com/argoproj/argo-cd/v2/util/git/client.go b/vendor/github.com/argoproj/argo-cd/v2/util/git/client.go index 6b8587c0b3..386d8c1110 100644 --- a/vendor/github.com/argoproj/argo-cd/v2/util/git/client.go +++ b/vendor/github.com/argoproj/argo-cd/v2/util/git/client.go @@ -2,6 +2,7 @@ package git import ( "crypto/tls" + "errors" "fmt" "math" "net/http" @@ -16,6 +17,8 @@ import ( "syscall" "time" + "github.com/Masterminds/semver/v3" + argoexec "github.com/argoproj/pkg/exec" "github.com/bmatcuk/doublestar/v4" "github.com/go-git/go-git/v5" @@ -24,6 +27,7 @@ import ( "github.com/go-git/go-git/v5/plumbing/transport" githttp "github.com/go-git/go-git/v5/plumbing/transport/http" "github.com/go-git/go-git/v5/storage/memory" + "github.com/google/uuid" log "github.com/sirupsen/logrus" "golang.org/x/crypto/ssh" "golang.org/x/crypto/ssh/knownhosts" @@ -37,6 +41,8 @@ import ( "github.com/argoproj/argo-cd/v2/util/proxy" ) +//go:generate go run github.com/vektra/mockery/v2@v2.40.2 --name=Client + var ErrInvalidRepoURL = fmt.Errorf("repo URL is invalid") type RevisionMetadata struct { @@ -55,7 +61,8 @@ type Refs struct { type gitRefCache interface { SetGitReferences(repo string, references []*plumbing.Reference) error - GetGitReferences(repo string, references *[]*plumbing.Reference) error + GetOrLockGitReferences(repo string, lockId string, references *[]*plumbing.Reference) (string, error) + UnlockGitReferences(repo string, lockId string) error } // Client is a generic git client interface @@ -73,6 +80,8 @@ type Client interface { RevisionMetadata(revision string) (*RevisionMetadata, error) VerifyCommitSignature(string) (string, error) IsAnnotatedTag(string) bool + ChangedFiles(revision string, targetRevision string) ([]string, error) + IsRevisionPresent(revision string) bool } type EventHandlers struct { @@ -126,7 +135,6 @@ func init() { maxRetryDuration = env.ParseDurationFromEnv(common.EnvGitRetryMaxDuration, common.DefaultGitRetryMaxDuration, 0, math.MaxInt64) retryDuration = env.ParseDurationFromEnv(common.EnvGitRetryDuration, common.DefaultGitRetryDuration, 0, math.MaxInt64) factor = env.ParseInt64FromEnv(common.EnvGitRetryFactor, common.DefaultGitRetryFactor, 0, math.MaxInt64) - } type ClientOpts func(c *nativeGitClient) @@ -174,6 +182,8 @@ func NewClientExt(rawRepoURL string, root string, creds Creds, insecure bool, en return client, nil } +var gitClientTimeout = env.ParseDurationFromEnv("ARGOCD_GIT_REQUEST_TIMEOUT", 15*time.Second, 0, math.MaxInt64) + // Returns a HTTP client object suitable for go-git to use using the following // pattern: // - If insecure is true, always returns a client with certificate verification @@ -184,9 +194,9 @@ func NewClientExt(rawRepoURL string, root string, creds Creds, insecure bool, en // - Otherwise (and on non-fatal errors), a default HTTP client is returned. func GetRepoHTTPClient(repoURL string, insecure bool, creds Creds, proxyURL string) *http.Client { // Default HTTP client - var customHTTPClient = &http.Client{ - // 15 second timeout - Timeout: 15 * time.Second, + customHTTPClient := &http.Client{ + // 15 second timeout by default + Timeout: gitClientTimeout, // don't follow redirect CheckRedirect: func(req *http.Request, via []*http.Request) error { return http.ErrUseLastResponse @@ -310,15 +320,15 @@ func (m *nativeGitClient) Init() error { if err == nil { return nil } - if err != git.ErrRepositoryNotExists { + if !errors.Is(err, git.ErrRepositoryNotExists) { return err } log.Infof("Initializing %s to %s", m.repoURL, m.root) err = os.RemoveAll(m.root) if err != nil { - return fmt.Errorf("unable to clean repo at %s: %v", m.root, err) + return fmt.Errorf("unable to clean repo at %s: %w", m.root, err) } - err = os.MkdirAll(m.root, 0755) + err = os.MkdirAll(m.root, 0o755) if err != nil { return err } @@ -348,6 +358,21 @@ func (m *nativeGitClient) fetch(revision string) error { return err } +// IsRevisionPresent checks to see if the given revision already exists locally. +func (m *nativeGitClient) IsRevisionPresent(revision string) bool { + if revision == "" { + return false + } + + cmd := exec.Command("git", "cat-file", "-t", revision) + out, err := m.runCmdOutput(cmd, runOpts{SkipErrorLogging: true}) + if out == "commit" && err == nil { + return true + } else { + return false + } +} + // Fetch fetches latest updates from origin func (m *nativeGitClient) Fetch(revision string) error { if m.OnFetch != nil { @@ -473,11 +498,36 @@ func (m *nativeGitClient) Checkout(revision string, submoduleEnabled bool) error } func (m *nativeGitClient) getRefs() ([]*plumbing.Reference, error) { + myLockUUID, err := uuid.NewRandom() + myLockId := "" + if err != nil { + log.Debug("Error generating git references cache lock id: ", err) + } else { + myLockId = myLockUUID.String() + } + // Prevent an additional get call to cache if we know our state isn't stale + needsUnlock := true if m.gitRefCache != nil && m.loadRefFromCache { var res []*plumbing.Reference - if m.gitRefCache.GetGitReferences(m.repoURL, &res) == nil { + foundLockId, err := m.gitRefCache.GetOrLockGitReferences(m.repoURL, myLockId, &res) + isLockOwner := myLockId == foundLockId + if !isLockOwner && err == nil { + // Valid value already in cache return res, nil + } else if !isLockOwner && err != nil { + // Error getting value from cache + log.Debugf("Error getting git references from cache: %v", err) + return nil, err } + // Defer a soft reset of the cache lock, if the value is set this call will be ignored + defer func() { + if needsUnlock { + err := m.gitRefCache.UnlockGitReferences(m.repoURL, myLockId) + if err != nil { + log.Debugf("Error unlocking git references from cache: %v", err) + } + } + }() } if m.OnLsRemote != nil { @@ -504,6 +554,9 @@ func (m *nativeGitClient) getRefs() ([]*plumbing.Reference, error) { if err == nil && m.gitRefCache != nil { if err := m.gitRefCache.SetGitReferences(m.repoURL, res); err != nil { log.Warnf("Failed to store git references to cache: %v", err) + } else { + // Since we successfully overwrote the lock with valid data, we don't need to unlock + needsUnlock = false } return res, nil } @@ -512,7 +565,6 @@ func (m *nativeGitClient) getRefs() ([]*plumbing.Reference, error) { func (m *nativeGitClient) LsRefs() (*Refs, error) { refs, err := m.getRefs() - if err != nil { return nil, err } @@ -539,11 +591,11 @@ func (m *nativeGitClient) LsRefs() (*Refs, error) { return sortedRefs, nil } -// LsRemote resolves the commit SHA of a specific branch, tag, or HEAD. If the supplied revision -// does not resolve, and "looks" like a 7+ hexadecimal commit SHA, it return the revision string. -// Otherwise, it returns an error indicating that the revision could not be resolved. This method -// runs with in-memory storage and is safe to run concurrently, or to be run without a git -// repository locally cloned. +// LsRemote resolves the commit SHA of a specific branch, tag (with semantic versioning or not), +// or HEAD. If the supplied revision does not resolve, and "looks" like a 7+ hexadecimal commit SHA, +// it will return the revision string. Otherwise, it returns an error indicating that the revision could +// not be resolved. This method runs with in-memory storage and is safe to run concurrently, +// or to be run without a git repository locally cloned. func (m *nativeGitClient) LsRemote(revision string) (res string, err error) { for attempt := 0; attempt < maxAttemptsCount; attempt++ { res, err = m.lsRemote(revision) @@ -570,26 +622,34 @@ func (m *nativeGitClient) lsRemote(revision string) (string, error) { } refs, err := m.getRefs() - if err != nil { return "", err } + if revision == "" { revision = "HEAD" } + + semverSha := m.resolveSemverRevision(revision, refs) + if semverSha != "" { + return semverSha, nil + } + // refToHash keeps a maps of remote refs to their hash // (e.g. refs/heads/master -> a67038ae2e9cb9b9b16423702f98b41e36601001) refToHash := make(map[string]string) + // refToResolve remembers ref name of the supplied revision if we determine the revision is a // symbolic reference (like HEAD), in which case we will resolve it from the refToHash map refToResolve := "" + for _, ref := range refs { refName := ref.Name().String() hash := ref.Hash().String() if ref.Type() == plumbing.HashReference { refToHash[refName] = hash } - //log.Debugf("%s\t%s", hash, refName) + // log.Debugf("%s\t%s", hash, refName) if ref.Name().Short() == revision || refName == revision { if ref.Type() == plumbing.HashReference { log.Debugf("revision '%s' resolved to '%s'", revision, hash) @@ -600,6 +660,7 @@ func (m *nativeGitClient) lsRemote(revision string) (string, error) { } } } + if refToResolve != "" { // If refToResolve is non-empty, we are resolving symbolic reference (e.g. HEAD). // It should exist in our refToHash map @@ -608,14 +669,69 @@ func (m *nativeGitClient) lsRemote(revision string) (string, error) { return hash, nil } } + // We support the ability to use a truncated commit-SHA (e.g. first 7 characters of a SHA) if IsTruncatedCommitSHA(revision) { log.Debugf("revision '%s' assumed to be commit sha", revision) return revision, nil } + // If we get here, revision string had non hexadecimal characters (indicating its a branch, tag, // or symbolic ref) and we were unable to resolve it to a commit SHA. - return "", fmt.Errorf("Unable to resolve '%s' to a commit SHA", revision) + return "", fmt.Errorf("unable to resolve '%s' to a commit SHA", revision) +} + +// resolveSemverRevision is a part of the lsRemote method workflow. +// When the user correctly configures the Git repository revision, and that revision is a valid semver constraint, we +// use this logic path rather than the standard lsRemote revision resolution loop. +// Some examples to illustrate the actual behavior - if the revision is: +// * "v0.1.2"/"0.1.2" or "v0.1"/"0.1", then this is not a constraint, it's a pinned version - so we fall back to the standard tag matching in the lsRemote loop. +// * "v0.1.*"/"0.1.*", and there's a tag matching that constraint, then we find the latest matching version and return its commit hash. +// * "v0.1.*"/"0.1.*", and there is *no* tag matching that constraint, then we fall back to the standard tag matching in the lsRemote loop. +// * "custom-tag", only the lsRemote loop will run - because that revision is an invalid semver; +// * "master-branch", only the lsRemote loop will run because that revision is an invalid semver; +func (m *nativeGitClient) resolveSemverRevision(revision string, refs []*plumbing.Reference) string { + if _, err := semver.NewVersion(revision); err == nil { + // If the revision is a valid version, then we know it isn't a constraint; it's just a pin. + // In which case, we should use standard tag resolution mechanisms. + return "" + } + + constraint, err := semver.NewConstraint(revision) + if err != nil { + log.Debugf("Revision '%s' is not a valid semver constraint, skipping semver resolution.", revision) + return "" + } + + maxVersion := semver.New(0, 0, 0, "", "") + maxVersionHash := plumbing.ZeroHash + for _, ref := range refs { + if !ref.Name().IsTag() { + continue + } + + tag := ref.Name().Short() + version, err := semver.NewVersion(tag) + if err != nil { + log.Debugf("Error parsing version for tag: '%s': %v", tag, err) + // Skip this tag and continue to the next one + continue + } + + if constraint.Check(version) { + if version.GreaterThan(maxVersion) { + maxVersion = version + maxVersionHash = ref.Hash() + } + } + } + + if maxVersionHash.IsZero() { + return "" + } + + log.Debugf("Semver constraint '%s' resolved to tag '%s', at reference '%s'", revision, maxVersion.Original(), maxVersionHash.String()) + return maxVersionHash.String() } // CommitSHA returns current commit sha from `git rev-parse HEAD` @@ -654,7 +770,8 @@ func (m *nativeGitClient) RevisionMetadata(revision string) (*RevisionMetadata, func (m *nativeGitClient) VerifyCommitSignature(revision string) (string, error) { out, err := m.runGnuPGWrapper("git-verify-wrapper.sh", revision) if err != nil { - return "", err + log.Errorf("error verifying commit signature: %v", err) + return "", fmt.Errorf("permission denied") } return out, nil } @@ -670,6 +787,29 @@ func (m *nativeGitClient) IsAnnotatedTag(revision string) bool { } } +// ChangedFiles returns a list of files changed between two revisions +func (m *nativeGitClient) ChangedFiles(revision string, targetRevision string) ([]string, error) { + if revision == targetRevision { + return []string{}, nil + } + + if !IsCommitSHA(revision) || !IsCommitSHA(targetRevision) { + return []string{}, fmt.Errorf("invalid revision provided, must be SHA") + } + + out, err := m.runCmd("diff", "--name-only", fmt.Sprintf("%s..%s", revision, targetRevision)) + if err != nil { + return nil, fmt.Errorf("failed to diff %s..%s: %w", revision, targetRevision, err) + } + + if out == "" { + return []string{}, nil + } + + files := strings.Split(out, "\n") + return files, nil +} + // runWrapper runs a custom command with all the semantics of running the Git client func (m *nativeGitClient) runGnuPGWrapper(wrapper string, args ...string) (string, error) { cmd := exec.Command(wrapper, args...) @@ -737,7 +877,6 @@ func (m *nativeGitClient) runCmdOutput(cmd *exec.Cmd, ropts runOpts) (string, er } } } - cmd.Env = proxy.UpsertEnv(cmd, m.proxy) opts := executil.ExecRunOpts{ TimeoutBehavior: argoexec.TimeoutBehavior{ diff --git a/vendor/github.com/argoproj/argo-cd/v2/util/git/creds.go b/vendor/github.com/argoproj/argo-cd/v2/util/git/creds.go index c3d09574ee..4341f40ce8 100644 --- a/vendor/github.com/argoproj/argo-cd/v2/util/git/creds.go +++ b/vendor/github.com/argoproj/argo-cd/v2/util/git/creds.go @@ -8,6 +8,7 @@ import ( "errors" "fmt" "io" + "net/url" "os" "strconv" "strings" @@ -56,8 +57,7 @@ func init() { googleCloudTokenSource = gocache.New(gocache.NoExpiration, 0) } -type NoopCredsStore struct { -} +type NoopCredsStore struct{} func (d NoopCredsStore) Add(username string, password string) string { return "" @@ -85,8 +85,7 @@ func getGitAskPassEnv(id string) []string { } // nop implementation -type NopCloser struct { -} +type NopCloser struct{} func (c NopCloser) Close() error { return nil @@ -94,8 +93,7 @@ func (c NopCloser) Close() error { var _ Creds = NopCreds{} -type NopCreds struct { -} +type NopCreds struct{} func (c NopCreds) Environ() (io.Closer, []string, error) { return NopCloser{}, nil, nil @@ -241,10 +239,11 @@ type SSHCreds struct { caPath string insecure bool store CredsStore + proxy string } -func NewSSHCreds(sshPrivateKey string, caPath string, insecureIgnoreHostKey bool, store CredsStore) SSHCreds { - return SSHCreds{sshPrivateKey, caPath, insecureIgnoreHostKey, store} +func NewSSHCreds(sshPrivateKey string, caPath string, insecureIgnoreHostKey bool, store CredsStore, proxy string) SSHCreds { + return SSHCreds{sshPrivateKey, caPath, insecureIgnoreHostKey, store, proxy} } type sshPrivateKeyFile string @@ -275,6 +274,9 @@ func (c SSHCreds) Environ() (io.Closer, []string, error) { if err != nil { return nil, nil, err } + + sshCloser := sshPrivateKeyFile(file.Name()) + defer func() { if err = file.Close(); err != nil { log.WithFields(log.Fields{ @@ -286,6 +288,7 @@ func (c SSHCreds) Environ() (io.Closer, []string, error) { _, err = file.WriteString(c.sshPrivateKey + "\n") if err != nil { + sshCloser.Close() return nil, nil, err } @@ -303,8 +306,27 @@ func (c SSHCreds) Environ() (io.Closer, []string, error) { knownHostsFile := certutil.GetSSHKnownHostsDataPath() args = append(args, "-o", "StrictHostKeyChecking=yes", "-o", fmt.Sprintf("UserKnownHostsFile=%s", knownHostsFile)) } + // Handle SSH socks5 proxy settings + proxyEnv := []string{} + if c.proxy != "" { + parsedProxyURL, err := url.Parse(c.proxy) + if err != nil { + sshCloser.Close() + return nil, nil, fmt.Errorf("failed to set environment variables related to socks5 proxy, could not parse proxy URL '%s': %w", c.proxy, err) + } + args = append(args, "-o", fmt.Sprintf("ProxyCommand='connect-proxy -S %s:%s -5 %%h %%p'", + parsedProxyURL.Hostname(), + parsedProxyURL.Port())) + if parsedProxyURL.User != nil { + proxyEnv = append(proxyEnv, fmt.Sprintf("SOCKS5_USER=%s", parsedProxyURL.User.Username())) + if socks5_passwd, isPasswdSet := parsedProxyURL.User.Password(); isPasswdSet { + proxyEnv = append(proxyEnv, fmt.Sprintf("SOCKS5_PASSWD=%s", socks5_passwd)) + } + } + } env = append(env, []string{fmt.Sprintf("GIT_SSH_COMMAND=%s", strings.Join(args, " "))}...) - return sshPrivateKeyFile(file.Name()), env, nil + env = append(env, proxyEnv...) + return sshCloser, env, nil } // GitHubAppCreds to authenticate as GitHub application @@ -383,7 +405,6 @@ func (g GitHubAppCreds) Environ() (io.Closer, []string, error) { } // GIT_SSL_KEY is the full path to a client certificate's key to be used env = append(env, fmt.Sprintf("GIT_SSL_KEY=%s", keyFile.Name())) - } nonce := g.store.Add(githubAccessTokenUsername, token) env = append(env, getGitAskPassEnv(nonce)...) diff --git a/vendor/github.com/argoproj/argo-cd/v2/util/git/git.go b/vendor/github.com/argoproj/argo-cd/v2/util/git/git.go index d5a8652f7c..da33432690 100644 --- a/vendor/github.com/argoproj/argo-cd/v2/util/git/git.go +++ b/vendor/github.com/argoproj/argo-cd/v2/util/git/git.go @@ -35,11 +35,21 @@ func IsTruncatedCommitSHA(sha string) bool { // SameURL returns whether or not the two repository URLs are equivalent in location func SameURL(leftRepo, rightRepo string) bool { - normalLeft := NormalizeGitURL(leftRepo) - normalRight := NormalizeGitURL(rightRepo) + normalLeft := NormalizeGitURLAllowInvalid(leftRepo) + normalRight := NormalizeGitURLAllowInvalid(rightRepo) return normalLeft != "" && normalRight != "" && normalLeft == normalRight } +// Similar to NormalizeGitURL, except returning an original url if the url is invalid. +// Needed to allow a deletion of repos with invalid urls. See https://github.com/argoproj/argo-cd/issues/20921. +func NormalizeGitURLAllowInvalid(repo string) string { + normalized := NormalizeGitURL(repo) + if normalized == "" { + return repo + } + return normalized +} + // NormalizeGitURL normalizes a git URL for purposes of comparison, as well as preventing redundant // local clones (by normalizing various forms of a URL to a consistent location). // Prefer using SameURL() over this function when possible. This algorithm may change over time diff --git a/vendor/github.com/argoproj/argo-cd/v2/util/git/workaround.go b/vendor/github.com/argoproj/argo-cd/v2/util/git/workaround.go index c364c093c8..47636125cf 100644 --- a/vendor/github.com/argoproj/argo-cd/v2/util/git/workaround.go +++ b/vendor/github.com/argoproj/argo-cd/v2/util/git/workaround.go @@ -1,6 +1,9 @@ package git import ( + "fmt" + neturl "net/url" + "github.com/go-git/go-git/v5" "github.com/go-git/go-git/v5/plumbing" "github.com/go-git/go-git/v5/plumbing/transport" @@ -30,6 +33,23 @@ func newClient(url string, insecure bool, creds Creds, proxy string) (transport. if !IsHTTPSURL(url) && !IsHTTPURL(url) { // use the default client for protocols other than HTTP/HTTPS + ep.InsecureSkipTLS = insecure + if proxy != "" { + parsedProxyURL, err := neturl.Parse(proxy) + if err != nil { + return nil, nil, fmt.Errorf("failed to create client for url '%s', error parsing proxy url '%s': %w", url, proxy, err) + } + var proxyUsername, proxyPasswd string + if parsedProxyURL.User != nil { + proxyUsername = parsedProxyURL.User.Username() + proxyPasswd, _ = parsedProxyURL.User.Password() + } + ep.Proxy = transport.ProxyOptions{ + URL: fmt.Sprintf("%s://%s:%s", parsedProxyURL.Scheme, parsedProxyURL.Hostname(), parsedProxyURL.Port()), + Username: proxyUsername, + Password: proxyPasswd, + } + } c, err := client.NewClient(ep) if err != nil { return nil, nil, err diff --git a/vendor/github.com/argoproj/argo-cd/v2/util/glob/list.go b/vendor/github.com/argoproj/argo-cd/v2/util/glob/list.go index 1a6a8732ad..f257302af9 100644 --- a/vendor/github.com/argoproj/argo-cd/v2/util/glob/list.go +++ b/vendor/github.com/argoproj/argo-cd/v2/util/glob/list.go @@ -1,10 +1,30 @@ package glob -// MatchStringInList will return true if item is contained in list. If -// exactMatch is set to false, list may contain globs to be matched. -func MatchStringInList(list []string, item string, exactMatch bool) bool { +import ( + "strings" + + "github.com/argoproj/argo-cd/v2/util/regex" +) + +const ( + EXACT = "exact" + GLOB = "glob" + REGEXP = "regexp" +) + +// MatchStringInList will return true if item is contained in list. +// patternMatch; can be set to exact, glob, regexp. +// If patternMatch; is set to exact, the item must be an exact match. +// If patternMatch; is set to glob, the item must match a glob pattern. +// If patternMatch; is set to regexp, the item must match a regular expression or glob. +func MatchStringInList(list []string, item string, patternMatch string) bool { for _, ll := range list { - if item == ll || (!exactMatch && Match(ll, item)) { + // If string is wrapped in "/", assume it is a regular expression. + if patternMatch == REGEXP && strings.HasPrefix(ll, "/") && strings.HasSuffix(ll, "/") && regex.Match(ll[1:len(ll)-1], item) { + return true + } else if (patternMatch == REGEXP || patternMatch == GLOB) && Match(ll, item) { + return true + } else if patternMatch == EXACT && item == ll { return true } } diff --git a/vendor/github.com/argoproj/argo-cd/v2/util/grpc/errors.go b/vendor/github.com/argoproj/argo-cd/v2/util/grpc/errors.go index 33f992bc0f..f095ad78f4 100644 --- a/vendor/github.com/argoproj/argo-cd/v2/util/grpc/errors.go +++ b/vendor/github.com/argoproj/argo-cd/v2/util/grpc/errors.go @@ -19,7 +19,7 @@ func gitErrToGRPC(err error) error { if err == nil { return err } - var errMsg = err.Error() + errMsg := err.Error() if grpcStatus := UnwrapGRPCStatus(err); grpcStatus != nil { errMsg = grpcStatus.Message() } diff --git a/vendor/github.com/argoproj/argo-cd/v2/util/grpc/grpc.go b/vendor/github.com/argoproj/argo-cd/v2/util/grpc/grpc.go index 93b9556d7c..e3d99672eb 100644 --- a/vendor/github.com/argoproj/argo-cd/v2/util/grpc/grpc.go +++ b/vendor/github.com/argoproj/argo-cd/v2/util/grpc/grpc.go @@ -3,12 +3,12 @@ package grpc import ( "context" "crypto/tls" + "fmt" "net" "runtime/debug" "strings" "time" - "github.com/argoproj/argo-cd/v2/common" "github.com/sirupsen/logrus" "golang.org/x/net/proxy" "google.golang.org/grpc" @@ -17,6 +17,8 @@ import ( "google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/keepalive" "google.golang.org/grpc/status" + + "github.com/argoproj/argo-cd/v2/common" ) // PanicLoggerUnaryServerInterceptor returns a new unary server interceptor for recovering from panics and returning error @@ -63,17 +65,16 @@ func BlockingDial(ctx context.Context, network, address string, creds credential } dialer := func(ctx context.Context, address string) (net.Conn, error) { - conn, err := proxy.Dial(ctx, network, address) if err != nil { writeResult(err) - return nil, err + return nil, fmt.Errorf("error dial proxy: %w", err) } if creds != nil { conn, _, err = creds.ClientHandshake(ctx, address, conn) if err != nil { writeResult(err) - return nil, err + return nil, fmt.Errorf("error creating connection: %w", err) } } return conn, nil @@ -89,7 +90,7 @@ func BlockingDial(ctx context.Context, network, address string, creds credential grpc.FailOnNonTempDialError(true), grpc.WithContextDialer(dialer), grpc.WithTransportCredentials(insecure.NewCredentials()), // we are handling TLS, so tell grpc not to - grpc.WithKeepaliveParams(keepalive.ClientParameters{Time: common.GRPCKeepAliveTime}), + grpc.WithKeepaliveParams(keepalive.ClientParameters{Time: common.GetGRPCKeepAliveTime()}), ) conn, err := grpc.DialContext(ctx, address, opts...) var res interface{} diff --git a/vendor/github.com/argoproj/argo-cd/v2/util/grpc/logging.go b/vendor/github.com/argoproj/argo-cd/v2/util/grpc/logging.go index b9ede33d1b..37dfc286ca 100644 --- a/vendor/github.com/argoproj/argo-cd/v2/util/grpc/logging.go +++ b/vendor/github.com/argoproj/argo-cd/v2/util/grpc/logging.go @@ -46,7 +46,7 @@ func (j *jsonpbMarshalleble) MarshalJSON() ([]byte, error) { m := &jsonpb.Marshaler{} err := m.Marshal(&b, j.Message) if err != nil { - return nil, fmt.Errorf("jsonpb serializer failed: %v", err) + return nil, fmt.Errorf("jsonpb serializer failed: %w", err) } return b.Bytes(), nil } diff --git a/vendor/github.com/argoproj/argo-cd/v2/util/grpc/sanitizer.go b/vendor/github.com/argoproj/argo-cd/v2/util/grpc/sanitizer.go index ffad63ba9f..36739e7def 100644 --- a/vendor/github.com/argoproj/argo-cd/v2/util/grpc/sanitizer.go +++ b/vendor/github.com/argoproj/argo-cd/v2/util/grpc/sanitizer.go @@ -1,12 +1,11 @@ package grpc import ( + "context" "errors" "regexp" "strings" - "context" - "google.golang.org/grpc" "google.golang.org/grpc/status" ) @@ -65,7 +64,7 @@ func NewSanitizer() *sanitizer { // AddReplacement adds a replacement to the Sanitizer func (s *sanitizer) AddReplacement(val string, replacement string) { s.replacers = append(s.replacers, func(in string) string { - return strings.Replace(in, val, replacement, -1) + return strings.ReplaceAll(in, val, replacement) }) } diff --git a/vendor/github.com/argoproj/argo-cd/v2/util/grpc/trace.go b/vendor/github.com/argoproj/argo-cd/v2/util/grpc/trace.go index 484e2b61dc..7ecc5bc964 100644 --- a/vendor/github.com/argoproj/argo-cd/v2/util/grpc/trace.go +++ b/vendor/github.com/argoproj/argo-cd/v2/util/grpc/trace.go @@ -17,8 +17,8 @@ var ( // see https://github.com/open-telemetry/opentelemetry-go-contrib/issues/4226 for details func ensureInitialized() { interceptorsInitialized.Do(func() { - otelUnaryInterceptor = otelgrpc.UnaryClientInterceptor() - otelStreamInterceptor = otelgrpc.StreamClientInterceptor() + otelUnaryInterceptor = otelgrpc.UnaryClientInterceptor() //nolint:staticcheck // TODO: ignore SA1019 for depreciation: see https://github.com/argoproj/argo-cd/issues/18258 + otelStreamInterceptor = otelgrpc.StreamClientInterceptor() //nolint:staticcheck // TODO: ignore SA1019 for depreciation: see https://github.com/argoproj/argo-cd/issues/18258 }) } diff --git a/vendor/github.com/argoproj/argo-cd/v2/util/grpc/useragent.go b/vendor/github.com/argoproj/argo-cd/v2/util/grpc/useragent.go index 28b37880a7..c71f5e1a8a 100644 --- a/vendor/github.com/argoproj/argo-cd/v2/util/grpc/useragent.go +++ b/vendor/github.com/argoproj/argo-cd/v2/util/grpc/useragent.go @@ -1,9 +1,9 @@ package grpc import ( + "context" "strings" - "context" "github.com/Masterminds/semver/v3" "google.golang.org/grpc" "google.golang.org/grpc/codes" diff --git a/vendor/github.com/argoproj/argo-cd/v2/util/helm/client.go b/vendor/github.com/argoproj/argo-cd/v2/util/helm/client.go index 8b99cd67c6..5179c57b48 100644 --- a/vendor/github.com/argoproj/argo-cd/v2/util/helm/client.go +++ b/vendor/github.com/argoproj/argo-cd/v2/util/helm/client.go @@ -32,6 +32,8 @@ import ( "github.com/argoproj/argo-cd/v2/util/proxy" ) +//go:generate go run github.com/vektra/mockery/v2@v2.40.2 --name=Client + var ( globalLock = sync.NewKeyLock() indexLock = sync.NewKeyLock() @@ -54,8 +56,8 @@ type indexCache interface { } type Client interface { - CleanChartCache(chart string, version string) error - ExtractChart(chart string, version string, passCredentials bool, manifestMaxExtractedSize int64, disableManifestMaxExtractedSize bool) (string, argoio.Closer, error) + CleanChartCache(chart string, version string, project string) error + ExtractChart(chart string, version string, project string, passCredentials bool, manifestMaxExtractedSize int64, disableManifestMaxExtractedSize bool) (string, argoio.Closer, error) GetIndex(noCache bool, maxIndexSize int64) (*Index, error) GetTags(chart string, noCache bool) (*TagsList, error) TestHelmOCI() (bool, error) @@ -117,8 +119,8 @@ func fileExist(filePath string) (bool, error) { return true, nil } -func (c *nativeHelmChart) CleanChartCache(chart string, version string) error { - cachePath, err := c.getCachedChartPath(chart, version) +func (c *nativeHelmChart) CleanChartCache(chart string, version string, project string) error { + cachePath, err := c.getCachedChartPath(chart, version, project) if err != nil { return err } @@ -139,10 +141,9 @@ func untarChart(tempDir string, cachedChartPath string, manifestMaxExtractedSize return files.Untgz(tempDir, reader, manifestMaxExtractedSize, false) } -func (c *nativeHelmChart) ExtractChart(chart string, version string, passCredentials bool, manifestMaxExtractedSize int64, disableManifestMaxExtractedSize bool) (string, argoio.Closer, error) { +func (c *nativeHelmChart) ExtractChart(chart string, version string, project string, passCredentials bool, manifestMaxExtractedSize int64, disableManifestMaxExtractedSize bool) (string, argoio.Closer, error) { // always use Helm V3 since we don't have chart content to determine correct Helm version helmCmd, err := NewCmdWithVersion("", HelmV3, c.enableOci, c.proxy) - if err != nil { return "", nil, err } @@ -159,7 +160,7 @@ func (c *nativeHelmChart) ExtractChart(chart string, version string, passCredent return "", nil, err } - cachedChartPath, err := c.getCachedChartPath(chart, version) + cachedChartPath, err := c.getCachedChartPath(chart, version, project) if err != nil { return "", nil, err } @@ -236,7 +237,7 @@ func (c *nativeHelmChart) GetIndex(noCache bool, maxIndexSize int64) (*Index, er var data []byte if !noCache && c.indexCache != nil { - if err := c.indexCache.GetHelmIndex(c.repoURL, &data); err != nil && err != cache.ErrCacheMiss { + if err := c.indexCache.GetHelmIndex(c.repoURL, &data); err != nil && !errors.Is(err, cache.ErrCacheMiss) { log.Warnf("Failed to load index cache for repo: %s: %v", c.repoURL, err) } } @@ -374,8 +375,8 @@ func normalizeChartName(chart string) string { return nc } -func (c *nativeHelmChart) getCachedChartPath(chart string, version string) (string, error) { - keyData, err := json.Marshal(map[string]string{"url": c.repoURL, "chart": chart, "version": version}) +func (c *nativeHelmChart) getCachedChartPath(chart string, version string, project string) (string, error) { + keyData, err := json.Marshal(map[string]string{"url": c.repoURL, "chart": chart, "version": version, "project": project}) if err != nil { return "", err } @@ -414,7 +415,7 @@ func (c *nativeHelmChart) GetTags(chart string, noCache bool) (*TagsList, error) var data []byte if !noCache && c.indexCache != nil { - if err := c.indexCache.GetHelmIndex(tagsURL, &data); err != nil && err != cache.ErrCacheMiss { + if err := c.indexCache.GetHelmIndex(tagsURL, &data); err != nil && !errors.Is(err, cache.ErrCacheMiss) { log.Warnf("Failed to load index cache for repo: %s: %v", tagsURL, err) } } @@ -424,11 +425,11 @@ func (c *nativeHelmChart) GetTags(chart string, noCache bool) (*TagsList, error) start := time.Now() repo, err := remote.NewRepository(tagsURL) if err != nil { - return nil, fmt.Errorf("failed to initialize repository: %v", err) + return nil, fmt.Errorf("failed to initialize repository: %w", err) } tlsConf, err := newTLSConfig(c.creds) if err != nil { - return nil, fmt.Errorf("failed setup tlsConfig: %v", err) + return nil, fmt.Errorf("failed setup tlsConfig: %w", err) } client := &http.Client{Transport: &http.Transport{ Proxy: proxy.GetCallback(c.proxy), @@ -456,9 +457,8 @@ func (c *nativeHelmChart) GetTags(chart string, noCache bool) (*TagsList, error) return nil }) - if err != nil { - return nil, fmt.Errorf("failed to get tags: %v", err) + return nil, fmt.Errorf("failed to get tags: %w", err) } log.WithFields( log.Fields{"seconds": time.Since(start).Seconds(), "chart": chart, "repo": c.repoURL}, @@ -472,7 +472,7 @@ func (c *nativeHelmChart) GetTags(chart string, noCache bool) (*TagsList, error) } else { err := json.Unmarshal(data, tags) if err != nil { - return nil, fmt.Errorf("failed to decode tags: %v", err) + return nil, fmt.Errorf("failed to decode tags: %w", err) } } diff --git a/vendor/github.com/argoproj/argo-cd/v2/util/helm/cmd.go b/vendor/github.com/argoproj/argo-cd/v2/util/helm/cmd.go index cc2a1388d6..6edf949fb2 100644 --- a/vendor/github.com/argoproj/argo-cd/v2/util/helm/cmd.go +++ b/vendor/github.com/argoproj/argo-cd/v2/util/helm/cmd.go @@ -30,7 +30,6 @@ type Cmd struct { } func NewCmd(workDir string, version string, proxy string) (*Cmd, error) { - switch version { // If v3 is specified (or by default, if no value is specified) then use v3 case "", "v3": @@ -191,7 +190,7 @@ func writeToTmp(data []byte) (string, argoio.Closer, error) { if err != nil { return "", nil, err } - err = os.WriteFile(file.Name(), data, 0644) + err = os.WriteFile(file.Name(), data, 0o644) if err != nil { _ = os.RemoveAll(file.Name()) return "", nil, err @@ -253,10 +252,12 @@ func (c *Cmd) Fetch(repo, chartName, version, destination string, creds Creds, p } func (c *Cmd) PullOCI(repo string, chart string, version string, destination string, creds Creds) (string, error) { - args := []string{"pull", fmt.Sprintf("oci://%s/%s", repo, chart), "--version", + args := []string{ + "pull", fmt.Sprintf("oci://%s/%s", repo, chart), "--version", version, "--destination", - destination} + destination, + } if creds.CAPath != "" { args = append(args, "--ca-file", creds.CAPath) } diff --git a/vendor/github.com/argoproj/argo-cd/v2/util/helm/helm.go b/vendor/github.com/argoproj/argo-cd/v2/util/helm/helm.go index f586691867..41f9a13afd 100644 --- a/vendor/github.com/argoproj/argo-cd/v2/util/helm/helm.go +++ b/vendor/github.com/argoproj/argo-cd/v2/util/helm/helm.go @@ -98,7 +98,6 @@ func (h *helm) DependencyBuild() error { } } else { _, err := h.cmd.RepoAdd(repo.Name, repo.Repo, repo.Creds, h.passCredentials) - if err != nil { return err } @@ -130,7 +129,7 @@ func Version(shortForm bool) (string, error) { // short: "v3.3.1+g249e521" version, err := executil.RunWithRedactor(cmd, redactor) if err != nil { - return "", fmt.Errorf("could not get helm version: %s", err) + return "", fmt.Errorf("could not get helm version: %w", err) } return strings.TrimSpace(version), nil } @@ -160,7 +159,7 @@ func (h *helm) GetParameters(valuesFiles []pathutil.ResolvedFilePath, appPath, r fileValues, err = os.ReadFile(file) } if err != nil { - return nil, fmt.Errorf("failed to read value file %s: %s", file, err) + return nil, fmt.Errorf("failed to read value file %s: %w", file, err) } values = append(values, string(fileValues)) } @@ -169,7 +168,7 @@ func (h *helm) GetParameters(valuesFiles []pathutil.ResolvedFilePath, appPath, r for _, file := range values { values := map[string]interface{}{} if err := yaml.Unmarshal([]byte(file), &values); err != nil { - return nil, fmt.Errorf("failed to parse values: %s", err) + return nil, fmt.Errorf("failed to parse values: %w", err) } flatVals(values, output) } diff --git a/vendor/github.com/argoproj/argo-cd/v2/util/helm/helmver.go b/vendor/github.com/argoproj/argo-cd/v2/util/helm/helmver.go index 8960964e5f..139c2f0c61 100644 --- a/vendor/github.com/argoproj/argo-cd/v2/util/helm/helmver.go +++ b/vendor/github.com/argoproj/argo-cd/v2/util/helm/helmver.go @@ -5,21 +5,19 @@ import ( "path" ) -var ( - // HelmV3 represents helm V3 specific settings - HelmV3 = HelmVer{ - binaryName: "helm", - templateNameArg: "--name-template", - kubeVersionSupported: true, - showCommand: "show", - pullCommand: "pull", - initSupported: false, - getPostTemplateCallback: cleanupChartLockFile, - includeCrds: true, - insecureSkipVerifySupported: true, - helmPassCredentialsSupported: true, - } -) +// HelmV3 represents helm V3 specific settings +var HelmV3 = HelmVer{ + binaryName: "helm", + templateNameArg: "--name-template", + kubeVersionSupported: true, + showCommand: "show", + pullCommand: "pull", + initSupported: false, + getPostTemplateCallback: cleanupChartLockFile, + includeCrds: true, + insecureSkipVerifySupported: true, + helmPassCredentialsSupported: true, +} // workaround for Helm3 bug. Remove after https://github.com/helm/helm/issues/6870 is fixed. // The `helm template` command generates Chart.lock after which `helm dependency build` does not work diff --git a/vendor/github.com/argoproj/argo-cd/v2/util/helm/index.go b/vendor/github.com/argoproj/argo-cd/v2/util/helm/index.go index 8dcdb7bf33..2dca5041d4 100644 --- a/vendor/github.com/argoproj/argo-cd/v2/util/helm/index.go +++ b/vendor/github.com/argoproj/argo-cd/v2/util/helm/index.go @@ -1,6 +1,7 @@ package helm import ( + "errors" "fmt" "time" @@ -33,13 +34,13 @@ func (e Entries) MaxVersion(constraints *semver.Constraints) (*semver.Version, e for _, entry := range e { v, err := semver.NewVersion(entry.Version) - //Invalid semantic version ignored - if err == semver.ErrInvalidSemVer { + // Invalid semantic version ignored + if errors.Is(err, semver.ErrInvalidSemVer) { log.Debugf("Invalid sementic version: %s", entry.Version) continue } if err != nil { - return nil, fmt.Errorf("invalid constraint in index: %v", err) + return nil, fmt.Errorf("invalid constraint in index: %w", err) } if constraints.Check(v) { versions = append(versions, v) diff --git a/vendor/github.com/argoproj/argo-cd/v2/util/helm/tags.go b/vendor/github.com/argoproj/argo-cd/v2/util/helm/tags.go index 656ff774b2..6cfa745fb3 100644 --- a/vendor/github.com/argoproj/argo-cd/v2/util/helm/tags.go +++ b/vendor/github.com/argoproj/argo-cd/v2/util/helm/tags.go @@ -1,6 +1,7 @@ package helm import ( + "errors" "fmt" log "github.com/sirupsen/logrus" @@ -17,13 +18,13 @@ func (t TagsList) MaxVersion(constraints *semver.Constraints) (*semver.Version, for _, tag := range t.Tags { v, err := semver.NewVersion(tag) - //Invalid semantic version ignored - if err == semver.ErrInvalidSemVer { + // Invalid semantic version ignored + if errors.Is(err, semver.ErrInvalidSemVer) { log.Debugf("Invalid semantic version: %s", tag) continue } if err != nil { - return nil, fmt.Errorf("invalid constraint in tags: %v", err) + return nil, fmt.Errorf("invalid constraint in tags: %w", err) } if constraints.Check(v) { versions = append(versions, v) diff --git a/vendor/github.com/argoproj/argo-cd/v2/util/http/http.go b/vendor/github.com/argoproj/argo-cd/v2/util/http/http.go new file mode 100644 index 0000000000..7c13c71fde --- /dev/null +++ b/vendor/github.com/argoproj/argo-cd/v2/util/http/http.go @@ -0,0 +1,241 @@ +package http + +import ( + "bytes" + "fmt" + "io" + "math" + "net/http" + "net/http/httputil" + "strconv" + "strings" + "time" + + log "github.com/sirupsen/logrus" + "k8s.io/client-go/transport" + + "github.com/argoproj/argo-cd/v2/common" + "github.com/argoproj/argo-cd/v2/util/env" +) + +const ( + maxCookieLength = 4093 + + // limit size of the resp to 512KB + respReadLimit = int64(524288) + retryWaitMax = time.Duration(10) * time.Second + EnvRetryMax = "ARGOCD_K8SCLIENT_RETRY_MAX" + EnvRetryBaseBackoff = "ARGOCD_K8SCLIENT_RETRY_BASE_BACKOFF" +) + +// max number of chunks a cookie can be broken into. To be compatible with +// widest range of browsers, you shouldn't create more than 30 cookies per domain +var maxCookieNumber = env.ParseNumFromEnv(common.EnvMaxCookieNumber, 20, 0, math.MaxInt) + +// MakeCookieMetadata generates a string representing a Web cookie. Yum! +func MakeCookieMetadata(key, value string, flags ...string) ([]string, error) { + attributes := strings.Join(flags, "; ") + + // cookie: name=value; attributes and key: key-(i) e.g. argocd.token-1 + maxValueLength := maxCookieValueLength(key, attributes) + numberOfCookies := int(math.Ceil(float64(len(value)) / float64(maxValueLength))) + if numberOfCookies > maxCookieNumber { + return nil, fmt.Errorf("the authentication token is %d characters long and requires %d cookies but the max number of cookies is %d. Contact your Argo CD administrator to increase the max number of cookies", len(value), numberOfCookies, maxCookieNumber) + } + + return splitCookie(key, value, attributes), nil +} + +// browser has limit on size of cookie, currently 4kb. In order to +// support cookies longer than 4kb, we split cookie into multiple 4kb chunks. +// first chunk will be of format argocd.token=:token; attributes +func splitCookie(key, value, attributes string) []string { + var cookies []string + valueLength := len(value) + // cookie: name=value; attributes and key: key-(i) e.g. argocd.token-1 + maxValueLength := maxCookieValueLength(key, attributes) + numberOfChunks := int(math.Ceil(float64(valueLength) / float64(maxValueLength))) + + var end int + for i, j := 0, 0; i < valueLength; i, j = i+maxValueLength, j+1 { + end = i + maxValueLength + if end > valueLength { + end = valueLength + } + + var cookie string + if j == 0 && numberOfChunks == 1 { + cookie = fmt.Sprintf("%s=%s", key, value[i:end]) + } else if j == 0 { + cookie = fmt.Sprintf("%s=%d:%s", key, numberOfChunks, value[i:end]) + } else { + cookie = fmt.Sprintf("%s-%d=%s", key, j, value[i:end]) + } + if attributes != "" { + cookie = fmt.Sprintf("%s; %s", cookie, attributes) + } + cookies = append(cookies, cookie) + } + return cookies +} + +// JoinCookies combines chunks of cookie based on key as prefix. It returns cookie +// value as string. cookieString is of format key1=value1; key2=value2; key3=value3 +// first chunk will be of format argocd.token=:token; attributes +func JoinCookies(key string, cookieList []*http.Cookie) (string, error) { + cookies := make(map[string]string) + for _, cookie := range cookieList { + if !strings.HasPrefix(cookie.Name, key) { + continue + } + cookies[cookie.Name] = cookie.Value + } + + var sb strings.Builder + var numOfChunks int + var err error + var token string + var ok bool + + if token, ok = cookies[key]; !ok { + return "", fmt.Errorf("failed to retrieve cookie %s", key) + } + parts := strings.Split(token, ":") + + if len(parts) == 2 { + if numOfChunks, err = strconv.Atoi(parts[0]); err != nil { + return "", err + } + sb.WriteString(parts[1]) + } else if len(parts) == 1 { + numOfChunks = 1 + sb.WriteString(parts[0]) + } else { + return "", fmt.Errorf("invalid cookie for key %s", key) + } + + for i := 1; i < numOfChunks; i++ { + sb.WriteString(cookies[fmt.Sprintf("%s-%d", key, i)]) + } + return sb.String(), nil +} + +func maxCookieValueLength(key, attributes string) int { + if len(attributes) > 0 { + return maxCookieLength - (len(key) + 3) - (len(attributes) + 2) + } + return maxCookieLength - (len(key) + 3) +} + +// DebugTransport is a HTTP Client Transport to enable debugging +type DebugTransport struct { + T http.RoundTripper +} + +func (d DebugTransport) RoundTrip(req *http.Request) (*http.Response, error) { + reqDump, err := httputil.DumpRequest(req, true) + if err != nil { + return nil, err + } + log.Printf("%s", reqDump) + + resp, err := d.T.RoundTrip(req) + if err != nil { + return nil, err + } + + respDump, err := httputil.DumpResponse(resp, true) + if err != nil { + _ = resp.Body.Close() + return nil, err + } + log.Printf("%s", respDump) + return resp, nil +} + +// TransportWithHeader is a HTTP Client Transport with default headers. +type TransportWithHeader struct { + RoundTripper http.RoundTripper + Header http.Header +} + +func (rt *TransportWithHeader) RoundTrip(r *http.Request) (*http.Response, error) { + if rt.Header != nil { + headers := rt.Header.Clone() + for k, vs := range r.Header { + for _, v := range vs { + headers.Add(k, v) + } + } + r.Header = headers + } + return rt.RoundTripper.RoundTrip(r) +} + +func WithRetry(maxRetries int64, baseRetryBackoff time.Duration) transport.WrapperFunc { + return func(rt http.RoundTripper) http.RoundTripper { + return &retryTransport{ + inner: rt, + maxRetries: maxRetries, + backoff: baseRetryBackoff, + } + } +} + +type retryTransport struct { + inner http.RoundTripper + maxRetries int64 + backoff time.Duration +} + +func isRetriable(resp *http.Response) bool { + if resp == nil { + return false + } + if resp.StatusCode == http.StatusTooManyRequests { + return true + } + if resp.StatusCode == 0 || (resp.StatusCode >= 500 && resp.StatusCode != http.StatusNotImplemented) { + return true + } + return false +} + +func (t *retryTransport) RoundTrip(req *http.Request) (*http.Response, error) { + var resp *http.Response + var err error + backoff := t.backoff + var bodyBytes []byte + if req.Body != nil { + bodyBytes, _ = io.ReadAll(req.Body) + } + for i := 0; i <= int(t.maxRetries); i++ { + req.Body = io.NopCloser(bytes.NewBuffer(bodyBytes)) + resp, err = t.inner.RoundTrip(req) + if i < int(t.maxRetries) && (err != nil || isRetriable(resp)) { + if resp != nil && resp.Body != nil { + drainBody(resp.Body) + } + if backoff > retryWaitMax { + backoff = retryWaitMax + } + select { + case <-time.After(backoff): + case <-req.Context().Done(): + return nil, req.Context().Err() + } + backoff *= 2 + continue + } + break + } + return resp, err +} + +func drainBody(body io.ReadCloser) { + defer body.Close() + _, err := io.Copy(io.Discard, io.LimitReader(body, respReadLimit)) + if err != nil { + log.Warnf("error reading response body: %s", err.Error()) + } +} diff --git a/vendor/github.com/argoproj/argo-cd/v2/util/io/closer.go b/vendor/github.com/argoproj/argo-cd/v2/util/io/closer.go index 7ca4981e96..2c9293024e 100644 --- a/vendor/github.com/argoproj/argo-cd/v2/util/io/closer.go +++ b/vendor/github.com/argoproj/argo-cd/v2/util/io/closer.go @@ -2,11 +2,9 @@ package io import log "github.com/sirupsen/logrus" -var ( - NopCloser = NewCloser(func() error { - return nil - }) -) +var NopCloser = NewCloser(func() error { + return nil +}) type Closer interface { Close() error diff --git a/vendor/github.com/argoproj/argo-cd/v2/util/io/files/tar.go b/vendor/github.com/argoproj/argo-cd/v2/util/io/files/tar.go index 13973f732f..344c84b71b 100644 --- a/vendor/github.com/argoproj/argo-cd/v2/util/io/files/tar.go +++ b/vendor/github.com/argoproj/argo-cd/v2/util/io/files/tar.go @@ -45,7 +45,6 @@ func Tgz(srcPath string, inclusions []string, exclusions []string, writers ...io tarWriter: tw, } err := filepath.Walk(srcPath, t.tgzFile) - if err != nil { return 0, err } @@ -92,7 +91,7 @@ func Untgz(dstPath string, r io.Reader, maxSize int64, preserveFileMode bool) er switch header.Typeflag { case tar.TypeDir: - var mode os.FileMode = 0755 + var mode os.FileMode = 0o755 if preserveFileMode { mode = os.FileMode(header.Mode) } @@ -107,22 +106,22 @@ func Untgz(dstPath string, r io.Reader, maxSize int64, preserveFileMode bool) er if os.IsNotExist(err) { realPath = linkTarget } else if err != nil { - return fmt.Errorf("error checking symlink realpath: %s", err) + return fmt.Errorf("error checking symlink realpath: %w", err) } if !Inbound(realPath, dstPath) { return fmt.Errorf("illegal filepath in symlink: %s", linkTarget) } err = os.Symlink(realPath, target) if err != nil { - return fmt.Errorf("error creating symlink: %s", err) + return fmt.Errorf("error creating symlink: %w", err) } case tar.TypeReg: - var mode os.FileMode = 0644 + var mode os.FileMode = 0o644 if preserveFileMode { mode = os.FileMode(header.Mode) } - err := os.MkdirAll(filepath.Dir(target), 0755) + err := os.MkdirAll(filepath.Dir(target), 0o755) if err != nil { return fmt.Errorf("error creating nested folders: %w", err) } @@ -155,7 +154,7 @@ func (t *tgz) tgzFile(path string, fi os.FileInfo, err error) error { relativePath, err := RelativePath(path, t.srcPath) if err != nil { - return fmt.Errorf("relative path error: %s", err) + return fmt.Errorf("relative path error: %w", err) } if t.inclusions != nil && base != "." && !fi.IsDir() { @@ -197,7 +196,7 @@ func (t *tgz) tgzFile(path string, fi os.FileInfo, err error) error { if IsSymlink(fi) { link, err = os.Readlink(path) if err != nil { - return fmt.Errorf("error getting link target: %s", err) + return fmt.Errorf("error getting link target: %w", err) } } diff --git a/vendor/github.com/argoproj/argo-cd/v2/util/io/files/util.go b/vendor/github.com/argoproj/argo-cd/v2/util/io/files/util.go index 741f224c3c..deef3166fc 100644 --- a/vendor/github.com/argoproj/argo-cd/v2/util/io/files/util.go +++ b/vendor/github.com/argoproj/argo-cd/v2/util/io/files/util.go @@ -57,11 +57,11 @@ func CreateTempDir(baseDir string) (string, error) { } newUUID, err := uuid.NewRandom() if err != nil { - return "", fmt.Errorf("error creating directory name: %s", err) + return "", fmt.Errorf("error creating directory name: %w", err) } tempDir := path.Join(base, newUUID.String()) - if err := os.MkdirAll(tempDir, 0755); err != nil { - return "", fmt.Errorf("error creating tempDir: %s", err) + if err := os.MkdirAll(tempDir, 0o755); err != nil { + return "", fmt.Errorf("error creating tempDir: %w", err) } return tempDir, nil } diff --git a/vendor/github.com/argoproj/argo-cd/v2/util/io/path/resolved.go b/vendor/github.com/argoproj/argo-cd/v2/util/io/path/resolved.go index d18df45e29..4ac5831c54 100644 --- a/vendor/github.com/argoproj/argo-cd/v2/util/io/path/resolved.go +++ b/vendor/github.com/argoproj/argo-cd/v2/util/io/path/resolved.go @@ -1,6 +1,7 @@ package path import ( + "errors" "fmt" "net/url" "os" @@ -25,11 +26,11 @@ func resolveSymbolicLinkRecursive(path string, maxDepth int) (string, error) { resolved, err := os.Readlink(path) if err != nil { // path is not a symbolic link - _, ok := err.(*os.PathError) - if ok { + var pathErr *os.PathError + if errors.As(err, &pathErr) { return path, nil } - // Other error has occured + // Other error has occurred return "", err } diff --git a/vendor/github.com/argoproj/argo-cd/v2/util/kube/kube.go b/vendor/github.com/argoproj/argo-cd/v2/util/kube/kube.go index 5ea4394b72..afd48a9cd2 100644 --- a/vendor/github.com/argoproj/argo-cd/v2/util/kube/kube.go +++ b/vendor/github.com/argoproj/argo-cd/v2/util/kube/kube.go @@ -161,6 +161,30 @@ func RemoveLabel(un *unstructured.Unstructured, key string) error { return nil } +// RemoveAnnotation removes annotation with the specified name +func RemoveAnnotation(un *unstructured.Unstructured, key string) error { + annotations, _, err := nestedNullableStringMap(un.Object, "metadata", "annotations") + if err != nil { + return fmt.Errorf("failed to get annotations for %s %s/%s: %w", un.GroupVersionKind().String(), un.GetNamespace(), un.GetName(), err) + } + if annotations == nil { + return nil + } + + for k := range annotations { + if k == key { + delete(annotations, k) + if len(annotations) == 0 { + un.SetAnnotations(nil) + } else { + un.SetAnnotations(annotations) + } + break + } + } + return nil +} + // nestedNullableStringMap returns a copy of map[string]string value of a nested field. // Returns false if value is not found and an error if not one of map[string]interface{} or nil, or contains non-string values in the map. func nestedNullableStringMap(obj map[string]interface{}, fields ...string) (map[string]string, bool, error) { diff --git a/vendor/github.com/argoproj/argo-cd/v2/util/kube/util.go b/vendor/github.com/argoproj/argo-cd/v2/util/kube/util.go index 19db8c817b..81c7c0d44c 100644 --- a/vendor/github.com/argoproj/argo-cd/v2/util/kube/util.go +++ b/vendor/github.com/argoproj/argo-cd/v2/util/kube/util.go @@ -66,7 +66,6 @@ func (ku *kubeUtil) CreateOrUpdateSecret(ns string, name string, update updateFn } return err - } // CreateOrUpdateSecretField creates or updates a secret name in namespace ns, with given value for given field diff --git a/vendor/github.com/argoproj/argo-cd/v2/util/log/logrus.go b/vendor/github.com/argoproj/argo-cd/v2/util/log/logrus.go index b52357498d..dd583fb5a3 100644 --- a/vendor/github.com/argoproj/argo-cd/v2/util/log/logrus.go +++ b/vendor/github.com/argoproj/argo-cd/v2/util/log/logrus.go @@ -38,13 +38,14 @@ func CreateFormatter(logFormat string) logrus.Formatter { case JsonFormat: formatType = &logrus.JSONFormatter{} case TextFormat: - if os.Getenv("FORCE_LOG_COLORS") == "1" { - formatType = &logrus.TextFormatter{ForceColors: true} - } else { - formatType = &logrus.TextFormatter{} + formatType = &logrus.TextFormatter{ + ForceColors: checkForceLogColors(), + FullTimestamp: checkEnableFullTimestamp(), } default: - formatType = &logrus.TextFormatter{} + formatType = &logrus.TextFormatter{ + FullTimestamp: checkEnableFullTimestamp(), + } } return formatType @@ -57,3 +58,11 @@ func createLogLevel() logrus.Level { } return level } + +func checkForceLogColors() bool { + return strings.ToLower(os.Getenv("FORCE_LOG_COLORS")) == "1" +} + +func checkEnableFullTimestamp() bool { + return strings.ToLower(os.Getenv(common.EnvLogFormatEnableFullTimestamp)) == "1" +} diff --git a/vendor/github.com/argoproj/argo-cd/v2/util/password/password.go b/vendor/github.com/argoproj/argo-cd/v2/util/password/password.go index 21ed21e11f..5544a4fc84 100644 --- a/vendor/github.com/argoproj/argo-cd/v2/util/password/password.go +++ b/vendor/github.com/argoproj/argo-cd/v2/util/password/password.go @@ -21,8 +21,10 @@ type BcryptPasswordHasher struct { Cost int } -var _ PasswordHasher = DummyPasswordHasher{} -var _ PasswordHasher = BcryptPasswordHasher{0} +var ( + _ PasswordHasher = DummyPasswordHasher{} + _ PasswordHasher = BcryptPasswordHasher{0} +) // PreferredHashers holds the list of preferred hashing algorithms, in order of most to least preferred. Any password that does not validate with the primary algorithm will be considered "stale." DO NOT ADD THE DUMMY HASHER FOR USE IN PRODUCTION. var preferredHashers = []PasswordHasher{ diff --git a/vendor/github.com/argoproj/argo-cd/v2/util/regex/regex.go b/vendor/github.com/argoproj/argo-cd/v2/util/regex/regex.go new file mode 100644 index 0000000000..9ff73b8497 --- /dev/null +++ b/vendor/github.com/argoproj/argo-cd/v2/util/regex/regex.go @@ -0,0 +1,20 @@ +package regex + +import ( + "github.com/dlclark/regexp2" + log "github.com/sirupsen/logrus" +) + +func Match(pattern, text string) bool { + compiledRegex, err := regexp2.Compile(pattern, 0) + if err != nil { + log.Warnf("failed to compile pattern %s due to error %v", pattern, err) + return false + } + regexMatch, err := compiledRegex.MatchString(text) + if err != nil { + log.Warnf("failed to match pattern %s due to error %v", pattern, err) + return false + } + return regexMatch +} diff --git a/vendor/github.com/argoproj/argo-cd/v2/util/security/application_namespaces.go b/vendor/github.com/argoproj/argo-cd/v2/util/security/application_namespaces.go index 2ef5edea33..89019beb6f 100644 --- a/vendor/github.com/argoproj/argo-cd/v2/util/security/application_namespaces.go +++ b/vendor/github.com/argoproj/argo-cd/v2/util/security/application_namespaces.go @@ -7,7 +7,7 @@ import ( ) func IsNamespaceEnabled(namespace string, serverNamespace string, enabledNamespaces []string) bool { - return namespace == serverNamespace || glob.MatchStringInList(enabledNamespaces, namespace, false) + return namespace == serverNamespace || glob.MatchStringInList(enabledNamespaces, namespace, glob.REGEXP) } func NamespaceNotPermittedError(namespace string) error { diff --git a/vendor/github.com/argoproj/argo-cd/v2/util/security/jwt.go b/vendor/github.com/argoproj/argo-cd/v2/util/security/jwt.go index 1f122ae036..c6645a4eab 100644 --- a/vendor/github.com/argoproj/argo-cd/v2/util/security/jwt.go +++ b/vendor/github.com/argoproj/argo-cd/v2/util/security/jwt.go @@ -20,7 +20,7 @@ func parseJWT(p string) ([]byte, error) { } payload, err := base64.RawURLEncoding.DecodeString(parts[1]) if err != nil { - return nil, fmt.Errorf("malformed jwt payload: %v", err) + return nil, fmt.Errorf("malformed jwt payload: %w", err) } return payload, nil } @@ -58,11 +58,11 @@ type jwtWithOnlyAudClaim struct { func getUnverifiedAudClaim(rawIDToken string) ([]string, error) { payload, err := parseJWT(rawIDToken) if err != nil { - return nil, fmt.Errorf("malformed jwt: %v", err) + return nil, fmt.Errorf("malformed jwt: %w", err) } var token jwtWithOnlyAudClaim if err = json.Unmarshal(payload, &token); err != nil { - return nil, fmt.Errorf("failed to unmarshal claims: %v", err) + return nil, fmt.Errorf("failed to unmarshal claims: %w", err) } return token.Aud, nil } diff --git a/vendor/github.com/argoproj/argo-cd/v2/util/settings/resources_filter.go b/vendor/github.com/argoproj/argo-cd/v2/util/settings/resources_filter.go index 7e656eabba..d6a08c5a23 100644 --- a/vendor/github.com/argoproj/argo-cd/v2/util/settings/resources_filter.go +++ b/vendor/github.com/argoproj/argo-cd/v2/util/settings/resources_filter.go @@ -22,7 +22,6 @@ func (rf *ResourcesFilter) getExcludedResources() []FilteredResource { } func (rf *ResourcesFilter) checkResourcePresence(apiGroup, kind, cluster string, filteredResources []FilteredResource) bool { - for _, includedResource := range filteredResources { if includedResource.Match(apiGroup, kind, cluster) { return true diff --git a/vendor/github.com/argoproj/argo-cd/v2/util/settings/settings.go b/vendor/github.com/argoproj/argo-cd/v2/util/settings/settings.go index ad4238eb12..14023175cc 100644 --- a/vendor/github.com/argoproj/argo-cd/v2/util/settings/settings.go +++ b/vendor/github.com/argoproj/argo-cd/v2/util/settings/settings.go @@ -7,6 +7,7 @@ import ( "crypto/tls" "crypto/x509" "encoding/base64" + "errors" "fmt" "math/big" "net/url" @@ -30,6 +31,9 @@ import ( "k8s.io/client-go/tools/cache" "sigs.k8s.io/yaml" + enginecache "github.com/argoproj/gitops-engine/pkg/cache" + timeutil "github.com/argoproj/pkg/time" + "github.com/argoproj/argo-cd/v2/common" "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1" "github.com/argoproj/argo-cd/v2/server/settings/oidc" @@ -38,8 +42,6 @@ import ( "github.com/argoproj/argo-cd/v2/util/kube" "github.com/argoproj/argo-cd/v2/util/password" tlsutil "github.com/argoproj/argo-cd/v2/util/tls" - enginecache "github.com/argoproj/gitops-engine/pkg/cache" - timeutil "github.com/argoproj/pkg/time" ) // ArgoCDSettings holds in-memory runtime configuration options. @@ -102,6 +104,8 @@ type ArgoCDSettings struct { InClusterEnabled bool `json:"inClusterEnabled"` // ServerRBACLogEnforceEnable temporary var indicates whether rbac will be enforced on logs ServerRBACLogEnforceEnable bool `json:"serverRBACLogEnforceEnable"` + // MaxPodLogsToRender the maximum number of pod logs to render + MaxPodLogsToRender int64 `json:"maxPodLogsToRender"` // ExecEnabled indicates whether the UI exec feature is enabled ExecEnabled bool `json:"execEnabled"` // ExecShells restricts which shells are allowed for `exec` and in which order they are tried @@ -155,28 +159,38 @@ func (o *oidcConfig) toExported() *OIDCConfig { return nil } return &OIDCConfig{ - Name: o.Name, - Issuer: o.Issuer, - ClientID: o.ClientID, - ClientSecret: o.ClientSecret, - CLIClientID: o.CLIClientID, - RequestedScopes: o.RequestedScopes, - RequestedIDTokenClaims: o.RequestedIDTokenClaims, - LogoutURL: o.LogoutURL, - RootCA: o.RootCA, + Name: o.Name, + Issuer: o.Issuer, + ClientID: o.ClientID, + ClientSecret: o.ClientSecret, + CLIClientID: o.CLIClientID, + UserInfoPath: o.UserInfoPath, + EnableUserInfoGroups: o.EnableUserInfoGroups, + UserInfoCacheExpiration: o.UserInfoCacheExpiration, + RequestedScopes: o.RequestedScopes, + RequestedIDTokenClaims: o.RequestedIDTokenClaims, + LogoutURL: o.LogoutURL, + RootCA: o.RootCA, + EnablePKCEAuthentication: o.EnablePKCEAuthentication, + DomainHint: o.DomainHint, } } type OIDCConfig struct { - Name string `json:"name,omitempty"` - Issuer string `json:"issuer,omitempty"` - ClientID string `json:"clientID,omitempty"` - ClientSecret string `json:"clientSecret,omitempty"` - CLIClientID string `json:"cliClientID,omitempty"` - RequestedScopes []string `json:"requestedScopes,omitempty"` - RequestedIDTokenClaims map[string]*oidc.Claim `json:"requestedIDTokenClaims,omitempty"` - LogoutURL string `json:"logoutURL,omitempty"` - RootCA string `json:"rootCA,omitempty"` + Name string `json:"name,omitempty"` + Issuer string `json:"issuer,omitempty"` + ClientID string `json:"clientID,omitempty"` + ClientSecret string `json:"clientSecret,omitempty"` + CLIClientID string `json:"cliClientID,omitempty"` + EnableUserInfoGroups bool `json:"enableUserInfoGroups,omitempty"` + UserInfoPath string `json:"userInfoPath,omitempty"` + UserInfoCacheExpiration string `json:"userInfoCacheExpiration,omitempty"` + RequestedScopes []string `json:"requestedScopes,omitempty"` + RequestedIDTokenClaims map[string]*oidc.Claim `json:"requestedIDTokenClaims,omitempty"` + LogoutURL string `json:"logoutURL,omitempty"` + RootCA string `json:"rootCA,omitempty"` + EnablePKCEAuthentication bool `json:"enablePKCEAuthentication,omitempty"` + DomainHint string `json:"domainHint,omitempty"` } // DEPRECATED. Helm repository credentials are now managed using RepoCredentials @@ -426,6 +440,8 @@ const ( settingsApplicationInstanceLabelKey = "application.instanceLabelKey" // settingsResourceTrackingMethodKey is the key to configure tracking method for application resources settingsResourceTrackingMethodKey = "application.resourceTrackingMethod" + // settingsInstallationID holds the key for the instance installation ID + settingsInstallationID = "installationID" // resourcesCustomizationsKey is the key to the map of resource overrides resourceCustomizationsKey = "resource.customizations" // resourceExclusions is the key to the list of excluded resources @@ -436,6 +452,10 @@ const ( resourceIgnoreResourceUpdatesEnabledKey = "resource.ignoreResourceUpdatesEnabled" // resourceCustomLabelKey is the key to a custom label to show in node info, if present resourceCustomLabelsKey = "resource.customLabels" + // resourceIncludeEventLabelKeys is the key to labels to be added onto Application k8s events if present on an Application or it's AppProject. Supports wildcard. + resourceIncludeEventLabelKeys = "resource.includeEventLabelKeys" + // resourceExcludeEventLabelKeys is the key to labels to be excluded from adding onto Application's k8s events. Supports wildcard. + resourceExcludeEventLabelKeys = "resource.excludeEventLabelKeys" // kustomizeBuildOptionsKey is a string of kustomize build parameters kustomizeBuildOptionsKey = "kustomize.buildOptions" // kustomizeVersionKeyPrefix is a kustomize version key prefix @@ -478,6 +498,8 @@ const ( inClusterEnabledKey = "cluster.inClusterEnabled" // settingsServerRBACLogEnforceEnable is the key to configure whether logs RBAC enforcement is enabled settingsServerRBACLogEnforceEnableKey = "server.rbac.log.enforce.enable" + // MaxPodLogsToRender the maximum number of pod logs to render + settingsMaxPodLogsToRender = "server.maxPodLogsToRender" // helmValuesFileSchemesKey is the key to configure the list of supported helm values file schemas helmValuesFileSchemesKey = "helm.valuesFileSchemes" // execEnabledKey is the key to configure whether the UI exec feature is enabled @@ -759,6 +781,14 @@ func (mgr *SettingsManager) GetTrackingMethod() (string, error) { return argoCDCM.Data[settingsResourceTrackingMethodKey], nil } +func (mgr *SettingsManager) GetInstallationID() (string, error) { + argoCDCM, err := mgr.getConfigMap() + if err != nil { + return "", err + } + return argoCDCM.Data[settingsInstallationID], nil +} + func (mgr *SettingsManager) GetPasswordPattern() (string, error) { argoCDCM, err := mgr.getConfigMap() if err != nil { @@ -784,6 +814,19 @@ func (mgr *SettingsManager) GetServerRBACLogEnforceEnable() (bool, error) { return strconv.ParseBool(argoCDCM.Data[settingsServerRBACLogEnforceEnableKey]) } +func (mgr *SettingsManager) GetMaxPodLogsToRender() (int64, error) { + argoCDCM, err := mgr.getConfigMap() + if err != nil { + return 10, err + } + + if argoCDCM.Data[settingsMaxPodLogsToRender] == "" { + return 10, nil + } + + return strconv.ParseInt(argoCDCM.Data[settingsMaxPodLogsToRender], 10, 64) +} + func (mgr *SettingsManager) GetDeepLinks(deeplinkType string) ([]DeepLink, error) { argoCDCM, err := mgr.getConfigMap() if err != nil { @@ -1045,7 +1088,7 @@ func (mgr *SettingsManager) GetResourceCompareOptions() (ArgoCDDiffOptions, erro func (mgr *SettingsManager) GetHelmSettings() (*v1alpha1.HelmOptions, error) { argoCDCM, err := mgr.getConfigMap() if err != nil { - return nil, fmt.Errorf("failed to get argo-cd config map: %v", err) + return nil, fmt.Errorf("failed to get argo-cd config map: %w", err) } helmOptions := &v1alpha1.HelmOptions{} if value, ok := argoCDCM.Data[helmValuesFileSchemesKey]; ok { @@ -1138,7 +1181,6 @@ func (mgr *SettingsManager) GetHelmRepositories() ([]HelmRepoCredentials, error) } func (mgr *SettingsManager) GetRepositories() ([]Repository, error) { - mgr.mutex.Lock() reposCache := mgr.reposCache mgr.mutex.Unlock() @@ -1198,7 +1240,6 @@ func (mgr *SettingsManager) SaveRepositoryCredentials(creds []RepositoryCredenti } func (mgr *SettingsManager) GetRepositoryCredentials() ([]RepositoryCredentials, error) { - mgr.mutex.Lock() repoCredsCache := mgr.repoCredsCache mgr.mutex.Unlock() @@ -1329,8 +1370,15 @@ func (mgr *SettingsManager) initialize(ctx context.Context) error { } cmInformer := v1.NewFilteredConfigMapInformer(mgr.clientset, mgr.namespace, 3*time.Minute, indexers, tweakConfigMap) secretsInformer := v1.NewSecretInformer(mgr.clientset, mgr.namespace, 3*time.Minute, indexers) - cmInformer.AddEventHandler(eventHandler) - secretsInformer.AddEventHandler(eventHandler) + _, err := cmInformer.AddEventHandler(eventHandler) + if err != nil { + log.Error(err) + } + + _, err = secretsInformer.AddEventHandler(eventHandler) + if err != nil { + log.Error(err) + } log.Info("Starting configmap/secret informers") go func() { @@ -1363,7 +1411,6 @@ func (mgr *SettingsManager) initialize(ctx context.Context) error { tryNotify() } } - }, UpdateFunc: func(oldObj, newObj interface{}) { oldMeta, oldOk := oldObj.(metav1.Common) @@ -1373,8 +1420,14 @@ func (mgr *SettingsManager) initialize(ctx context.Context) error { } }, } - secretsInformer.AddEventHandler(handler) - cmInformer.AddEventHandler(handler) + _, err = secretsInformer.AddEventHandler(handler) + if err != nil { + log.Error(err) + } + _, err = cmInformer.AddEventHandler(handler) + if err != nil { + log.Error(err) + } mgr.secrets = v1listers.NewSecretLister(secretsInformer.GetIndexer()) mgr.secretsInformer = secretsInformer mgr.configmaps = v1listers.NewConfigMapLister(cmInformer.GetIndexer()) @@ -1440,6 +1493,13 @@ func updateSettingsFromConfigMap(settings *ArgoCDSettings, argoCDCM *apiv1.Confi if settings.PasswordPattern == "" { settings.PasswordPattern = common.PasswordPatten } + if maxPodLogsToRenderStr, ok := argoCDCM.Data[settingsMaxPodLogsToRender]; ok { + if val, err := strconv.ParseInt(maxPodLogsToRenderStr, 10, 64); err != nil { + log.Warnf("Failed to parse '%s' key: %v", settingsMaxPodLogsToRender, err) + } else { + settings.MaxPodLogsToRender = val + } + } settings.InClusterEnabled = argoCDCM.Data[inClusterEnabledKey] != "false" settings.ExecEnabled = argoCDCM.Data[execEnabledKey] == "true" execShells := argoCDCM.Data[execShellsKey] @@ -1461,7 +1521,7 @@ func validateExternalURL(u string) error { } URL, err := url.Parse(u) if err != nil { - return fmt.Errorf("Failed to parse URL: %v", err) + return fmt.Errorf("Failed to parse URL: %w", err) } if URL.Scheme != "http" && URL.Scheme != "https" { return fmt.Errorf("URL must include http or https protocol") @@ -1478,27 +1538,6 @@ func (mgr *SettingsManager) updateSettingsFromSecret(settings *ArgoCDSettings, a } else { errs = append(errs, &incompleteSettingsError{message: "server.secretkey is missing"}) } - if githubWebhookSecret := argoCDSecret.Data[settingsWebhookGitHubSecretKey]; len(githubWebhookSecret) > 0 { - settings.WebhookGitHubSecret = string(githubWebhookSecret) - } - if gitlabWebhookSecret := argoCDSecret.Data[settingsWebhookGitLabSecretKey]; len(gitlabWebhookSecret) > 0 { - settings.WebhookGitLabSecret = string(gitlabWebhookSecret) - } - if bitbucketWebhookUUID := argoCDSecret.Data[settingsWebhookBitbucketUUIDKey]; len(bitbucketWebhookUUID) > 0 { - settings.WebhookBitbucketUUID = string(bitbucketWebhookUUID) - } - if bitbucketserverWebhookSecret := argoCDSecret.Data[settingsWebhookBitbucketServerSecretKey]; len(bitbucketserverWebhookSecret) > 0 { - settings.WebhookBitbucketServerSecret = string(bitbucketserverWebhookSecret) - } - if gogsWebhookSecret := argoCDSecret.Data[settingsWebhookGogsSecretKey]; len(gogsWebhookSecret) > 0 { - settings.WebhookGogsSecret = string(gogsWebhookSecret) - } - if azureDevOpsUsername := argoCDSecret.Data[settingsWebhookAzureDevOpsUsernameKey]; len(azureDevOpsUsername) > 0 { - settings.WebhookAzureDevOpsUsername = string(azureDevOpsUsername) - } - if azureDevOpsPassword := argoCDSecret.Data[settingsWebhookAzureDevOpsPasswordKey]; len(azureDevOpsPassword) > 0 { - settings.WebhookAzureDevOpsPassword = string(azureDevOpsPassword) - } // The TLS certificate may be externally managed. We try to load it from an // external secret first. If the external secret doesn't exist, we either @@ -1538,6 +1577,15 @@ func (mgr *SettingsManager) updateSettingsFromSecret(settings *ArgoCDSettings, a if len(errs) > 0 { return errs[0] } + + settings.WebhookGitHubSecret = ReplaceStringSecret(string(argoCDSecret.Data[settingsWebhookGitHubSecretKey]), settings.Secrets) + settings.WebhookGitLabSecret = ReplaceStringSecret(string(argoCDSecret.Data[settingsWebhookGitLabSecretKey]), settings.Secrets) + settings.WebhookBitbucketUUID = ReplaceStringSecret(string(argoCDSecret.Data[settingsWebhookBitbucketUUIDKey]), settings.Secrets) + settings.WebhookBitbucketServerSecret = ReplaceStringSecret(string(argoCDSecret.Data[settingsWebhookBitbucketServerSecretKey]), settings.Secrets) + settings.WebhookGogsSecret = ReplaceStringSecret(string(argoCDSecret.Data[settingsWebhookGogsSecretKey]), settings.Secrets) + settings.WebhookAzureDevOpsUsername = ReplaceStringSecret(string(argoCDSecret.Data[settingsWebhookAzureDevOpsUsernameKey]), settings.Secrets) + settings.WebhookAzureDevOpsPassword = ReplaceStringSecret(string(argoCDSecret.Data[settingsWebhookAzureDevOpsPasswordKey]), settings.Secrets) + return nil } @@ -1596,7 +1644,6 @@ func (mgr *SettingsManager) SaveSettings(settings *ArgoCDSettings) error { } return nil }) - if err != nil { return err } @@ -1702,7 +1749,6 @@ func (mgr *SettingsManager) SaveGPGPublicKeyData(ctx context.Context, gpgPublicK } return mgr.ResyncInformers() - } type SettingsManagerOpts func(mgs *SettingsManager) @@ -1715,7 +1761,6 @@ func WithRepoOrClusterChangedHandler(handler func()) SettingsManagerOpts { // NewSettingsManager generates a new SettingsManager pointer and returns it func NewSettingsManager(ctx context.Context, clientset kubernetes.Interface, namespace string, opts ...SettingsManagerOpts) *SettingsManager { - mgr := &SettingsManager{ ctx: ctx, clientset: clientset, @@ -1839,6 +1884,34 @@ func (a *ArgoCDSettings) IssuerURL() string { return "" } +// UserInfoGroupsEnabled returns whether group claims should be fetch from UserInfo endpoint +func (a *ArgoCDSettings) UserInfoGroupsEnabled() bool { + if oidcConfig := a.OIDCConfig(); oidcConfig != nil { + return oidcConfig.EnableUserInfoGroups + } + return false +} + +// UserInfoPath returns the sub-path on which the IDP exposes the UserInfo endpoint +func (a *ArgoCDSettings) UserInfoPath() string { + if oidcConfig := a.OIDCConfig(); oidcConfig != nil { + return oidcConfig.UserInfoPath + } + return "" +} + +// UserInfoCacheExpiration returns the expiry time of the UserInfo cache +func (a *ArgoCDSettings) UserInfoCacheExpiration() time.Duration { + if oidcConfig := a.OIDCConfig(); oidcConfig != nil && oidcConfig.UserInfoCacheExpiration != "" { + userInfoCacheExpiration, err := time.ParseDuration(oidcConfig.UserInfoCacheExpiration) + if err != nil { + log.Warnf("Failed to parse 'oidc.config.userInfoCacheExpiration' key: %v", err) + } + return userInfoCacheExpiration + } + return 0 +} + func (a *ArgoCDSettings) OAuth2ClientID() string { if oidcConfig := a.OIDCConfig(); oidcConfig != nil { return oidcConfig.ClientID @@ -1986,8 +2059,8 @@ func (mgr *SettingsManager) notifySubscribers(newSettings *ArgoCDSettings) { } func isIncompleteSettingsError(err error) bool { - _, ok := err.(*incompleteSettingsError) - return ok + var incompleteSettingsErr *incompleteSettingsError + return errors.As(err, &incompleteSettingsErr) } // InitializeSettings is used to initialize empty admin password, signature, certificate etc if missing @@ -2156,7 +2229,7 @@ func (mgr *SettingsManager) GetNamespace() string { func (mgr *SettingsManager) GetResourceCustomLabels() ([]string, error) { argoCDCM, err := mgr.getConfigMap() if err != nil { - return []string{}, fmt.Errorf("failed getting configmap: %v", err) + return []string{}, fmt.Errorf("failed getting configmap: %w", err) } labels := argoCDCM.Data[resourceCustomLabelsKey] if labels != "" { @@ -2165,6 +2238,38 @@ func (mgr *SettingsManager) GetResourceCustomLabels() ([]string, error) { return []string{}, nil } +func (mgr *SettingsManager) GetIncludeEventLabelKeys() []string { + labelKeys := []string{} + argoCDCM, err := mgr.getConfigMap() + if err != nil { + log.Error(fmt.Errorf("failed getting configmap: %w", err)) + return labelKeys + } + if value, ok := argoCDCM.Data[resourceIncludeEventLabelKeys]; ok { + if value != "" { + value = strings.ReplaceAll(value, " ", "") + labelKeys = strings.Split(value, ",") + } + } + return labelKeys +} + +func (mgr *SettingsManager) GetExcludeEventLabelKeys() []string { + labelKeys := []string{} + argoCDCM, err := mgr.getConfigMap() + if err != nil { + log.Error(fmt.Errorf("failed getting configmap: %w", err)) + return labelKeys + } + if value, ok := argoCDCM.Data[resourceExcludeEventLabelKeys]; ok { + if value != "" { + value = strings.ReplaceAll(value, " ", "") + labelKeys = strings.Split(value, ",") + } + } + return labelKeys +} + func (mgr *SettingsManager) GetMaxWebhookPayloadSize() int64 { argoCDCM, err := mgr.getConfigMap() if err != nil { diff --git a/vendor/github.com/argoproj/argo-cd/v2/util/tls/tls.go b/vendor/github.com/argoproj/argo-cd/v2/util/tls/tls.go index 5e18c8eb75..819a8761af 100644 --- a/vendor/github.com/argoproj/argo-cd/v2/util/tls/tls.go +++ b/vendor/github.com/argoproj/argo-cd/v2/util/tls/tls.go @@ -28,21 +28,19 @@ const ( DefaultRSABits = 2048 // The default TLS cipher suites to provide to clients - see https://cipherlist.eu for updates // Note that for TLS v1.3, cipher suites are not configurable and will be chosen automatically. - DefaultTLSCipherSuite = "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384:TLS_RSA_WITH_AES_256_GCM_SHA384" + DefaultTLSCipherSuite = "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384" // The default minimum TLS version to provide to clients DefaultTLSMinVersion = "1.2" // The default maximum TLS version to provide to clients DefaultTLSMaxVersion = "1.3" ) -var ( - tlsVersionByString = map[string]uint16{ - "1.0": tls.VersionTLS10, - "1.1": tls.VersionTLS11, - "1.2": tls.VersionTLS12, - "1.3": tls.VersionTLS13, - } -) +var tlsVersionByString = map[string]uint16{ + "1.0": tls.VersionTLS10, + "1.1": tls.VersionTLS11, + "1.2": tls.VersionTLS12, + "1.3": tls.VersionTLS13, +} type CertOptions struct { // Hostnames and IPs to generate a certificate for @@ -164,7 +162,6 @@ func getTLSConfigCustomizer(minVersionStr, maxVersionStr, tlsCiphersStr string) config.MaxVersion = maxVersion config.CipherSuites = cipherSuites }, nil - } // Adds TLS server related command line options to a command and returns a TLS @@ -235,7 +232,7 @@ func generate(opts CertOptions) ([]byte, crypto.PrivateKey, error) { return nil, nil, fmt.Errorf("Unrecognized elliptic curve: %q", opts.ECDSACurve) } if err != nil { - return nil, nil, fmt.Errorf("failed to generate private key: %s", err) + return nil, nil, fmt.Errorf("failed to generate private key: %w", err) } var notBefore time.Time @@ -255,7 +252,7 @@ func generate(opts CertOptions) ([]byte, crypto.PrivateKey, error) { serialNumberLimit := new(big.Int).Lsh(big.NewInt(1), 128) serialNumber, err := rand.Int(rand.Reader, serialNumberLimit) if err != nil { - return nil, nil, fmt.Errorf("failed to generate serial number: %s", err) + return nil, nil, fmt.Errorf("failed to generate serial number: %w", err) } if opts.Organization == "" { @@ -289,7 +286,7 @@ func generate(opts CertOptions) ([]byte, crypto.PrivateKey, error) { certBytes, err := x509.CreateCertificate(rand.Reader, &template, &template, publicKey(privateKey), privateKey) if err != nil { - return nil, nil, fmt.Errorf("Failed to create certificate: %s", err) + return nil, nil, fmt.Errorf("Failed to create certificate: %w", err) } return certBytes, privateKey, nil } @@ -320,7 +317,6 @@ func GenerateX509KeyPair(opts CertOptions) (*tls.Certificate, error) { // EncodeX509KeyPair encodes a TLS Certificate into its pem encoded format for storage func EncodeX509KeyPair(cert tls.Certificate) ([]byte, []byte) { - certpem := []byte{} for _, certtmp := range cert.Certificate { certpem = append(certpem, pem.EncodeToMemory(&pem.Block{Type: "CERTIFICATE", Bytes: certtmp})...) @@ -348,11 +344,11 @@ func LoadX509CertPool(paths ...string) (*x509.CertPool, error) { continue } // ...but everything else is considered an error - return nil, fmt.Errorf("could not load TLS certificate: %v", err) + return nil, fmt.Errorf("could not load TLS certificate: %w", err) } else { f, err := os.ReadFile(path) if err != nil { - return nil, fmt.Errorf("failure to load TLS certificates from %s: %v", path, err) + return nil, fmt.Errorf("failure to load TLS certificates from %s: %w", path, err) } if ok := pool.AppendCertsFromPEM(f); !ok { return nil, fmt.Errorf("invalid cert data in %s", path) @@ -366,7 +362,7 @@ func LoadX509CertPool(paths ...string) (*x509.CertPool, error) { func LoadX509Cert(path string) (*x509.Certificate, error) { bytes, err := os.ReadFile(path) if err != nil { - return nil, fmt.Errorf("could not read certificate file: %v", err) + return nil, fmt.Errorf("could not read certificate file: %w", err) } block, _ := pem.Decode(bytes) if block == nil { @@ -374,7 +370,7 @@ func LoadX509Cert(path string) (*x509.Certificate, error) { } cert, err := x509.ParseCertificate(block.Bytes) if err != nil { - return nil, fmt.Errorf("could not parse certificate: %v", err) + return nil, fmt.Errorf("could not parse certificate: %w", err) } return cert, nil } @@ -427,11 +423,10 @@ func CreateServerTLSConfig(tlsCertPath, tlsKeyPath string, hosts []string) (*tls log.Infof("Loading TLS configuration from cert=%s and key=%s", tlsCertPath, tlsKeyPath) c, err := tls.LoadX509KeyPair(tlsCertPath, tlsKeyPath) if err != nil { - return nil, fmt.Errorf("Unable to initalize TLS configuration with cert=%s and key=%s: %v", tlsCertPath, tlsKeyPath, err) + return nil, fmt.Errorf("Unable to initialize TLS configuration with cert=%s and key=%s: %w", tlsCertPath, tlsKeyPath, err) } cert = &c } return &tls.Config{Certificates: []tls.Certificate{*cert}}, nil - } diff --git a/vendor/github.com/argoproj/gitops-engine/pkg/cache/cluster.go b/vendor/github.com/argoproj/gitops-engine/pkg/cache/cluster.go index 3f662effcb..96f9ebe72b 100644 --- a/vendor/github.com/argoproj/gitops-engine/pkg/cache/cluster.go +++ b/vendor/github.com/argoproj/gitops-engine/pkg/cache/cluster.go @@ -69,9 +69,7 @@ const ( ) type apiMeta struct { - namespaced bool - // watchCancel stops the watch of all resources for this API. This gets called when the cache is invalidated or when - // the watched API ceases to exist (e.g. a CRD gets deleted). + namespaced bool watchCancel context.CancelFunc } @@ -122,9 +120,6 @@ type ClusterCache interface { // IterateHierarchy iterates resource tree starting from the specified top level resource and executes callback for each resource in the tree. // The action callback returns true if iteration should continue and false otherwise. IterateHierarchy(key kube.ResourceKey, action func(resource *Resource, namespaceResources map[kube.ResourceKey]*Resource) bool) - // IterateHierarchyV2 iterates resource tree starting from the specified top level resources and executes callback for each resource in the tree. - // The action callback returns true if iteration should continue and false otherwise. - IterateHierarchyV2(keys []kube.ResourceKey, action func(resource *Resource, namespaceResources map[kube.ResourceKey]*Resource) bool) // IsNamespaced answers if specified group/kind is a namespaced resource API or not IsNamespaced(gk schema.GroupKind) (bool, error) // GetManagedLiveObjs helps finding matching live K8S resources for a given resources list. @@ -473,7 +468,7 @@ func (c *clusterCache) stopWatching(gk schema.GroupKind, ns string) { } } -// startMissingWatches lists supported cluster resources and starts watching for changes unless watch is already running +// startMissingWatches lists supported cluster resources and start watching for changes unless watch is already running func (c *clusterCache) startMissingWatches() error { apis, err := c.kubectl.GetAPIResources(c.config, true, c.settings.ResourcesFilter) if err != nil { @@ -575,7 +570,6 @@ func (c *clusterCache) listResources(ctx context.Context, resClient dynamic.Reso return resourceVersion, callback(listPager) } -// loadInitialState loads the state of all the resources retrieved by the given resource client. func (c *clusterCache) loadInitialState(ctx context.Context, api kube.APIResourceInfo, resClient dynamic.ResourceInterface, ns string, lock bool) (string, error) { var items []*Resource resourceVersion, err := c.listResources(ctx, resClient, func(listPager *pager.ListPager) error { @@ -734,9 +728,6 @@ func (c *clusterCache) watchEvents(ctx context.Context, api kube.APIResourceInfo }) } -// processApi processes all the resources for a given API. First we construct an API client for the given API. Then we -// call the callback. If we're managing the whole cluster, we call the callback with the client and an empty namespace. -// If we're managing specific namespaces, we call the callback for each namespace. func (c *clusterCache) processApi(client dynamic.Interface, api kube.APIResourceInfo, callback func(resClient dynamic.ResourceInterface, ns string) error) error { resClient := client.Resource(api.GroupVersionResource) switch { @@ -806,17 +797,6 @@ func (c *clusterCache) checkPermission(ctx context.Context, reviewInterface auth return true, nil } -// sync retrieves the current state of the cluster and stores relevant information in the clusterCache fields. -// -// First we get some metadata from the cluster, like the server version, OpenAPI document, and the list of all API -// resources. -// -// Then we get a list of the preferred versions of all API resources which are to be monitored (it's possible to exclude -// resources from monitoring). We loop through those APIs asynchronously and for each API we list all resources. We also -// kick off a goroutine to watch the resources for that API and update the cache constantly. -// -// When this function exits, the cluster cache is up to date, and the appropriate resources are being watched for -// changes. func (c *clusterCache) sync() error { c.log.Info("Start syncing cluster") @@ -863,8 +843,6 @@ func (c *clusterCache) sync() error { if err != nil { return err } - - // Each API is processed in parallel, so we need to take out a lock when we update clusterCache fields. lock := sync.Mutex{} err = kube.RunAllAsync(len(apis), func(i int) error { api := apis[i] @@ -1026,107 +1004,6 @@ func (c *clusterCache) IterateHierarchy(key kube.ResourceKey, action func(resour } } -// IterateHierarchy iterates resource tree starting from the specified top level resources and executes callback for each resource in the tree -func (c *clusterCache) IterateHierarchyV2(keys []kube.ResourceKey, action func(resource *Resource, namespaceResources map[kube.ResourceKey]*Resource) bool) { - c.lock.RLock() - defer c.lock.RUnlock() - keysPerNamespace := make(map[string][]kube.ResourceKey) - for _, key := range keys { - _, ok := c.resources[key] - if !ok { - continue - } - keysPerNamespace[key.Namespace] = append(keysPerNamespace[key.Namespace], key) - } - for namespace, namespaceKeys := range keysPerNamespace { - nsNodes := c.nsIndex[namespace] - graph := buildGraph(nsNodes) - visited := make(map[kube.ResourceKey]int) - for _, key := range namespaceKeys { - visited[key] = 0 - } - for _, key := range namespaceKeys { - // The check for existence of key is done above. - res := c.resources[key] - if visited[key] == 2 || !action(res, nsNodes) { - continue - } - visited[key] = 1 - if _, ok := graph[key]; ok { - for _, child := range graph[key] { - if visited[child.ResourceKey()] == 0 && action(child, nsNodes) { - child.iterateChildrenV2(graph, nsNodes, visited, func(err error, child *Resource, namespaceResources map[kube.ResourceKey]*Resource) bool { - if err != nil { - c.log.V(2).Info(err.Error()) - return false - } - return action(child, namespaceResources) - }) - } - } - } - visited[key] = 2 - } - } -} - -func buildGraph(nsNodes map[kube.ResourceKey]*Resource) map[kube.ResourceKey]map[types.UID]*Resource { - // Prepare to construct a graph - nodesByUID := make(map[types.UID][]*Resource, len(nsNodes)) - for _, node := range nsNodes { - nodesByUID[node.Ref.UID] = append(nodesByUID[node.Ref.UID], node) - } - - // In graph, they key is the parent and the value is a list of children. - graph := make(map[kube.ResourceKey]map[types.UID]*Resource) - - // Loop through all nodes, calling each one "childNode," because we're only bothering with it if it has a parent. - for _, childNode := range nsNodes { - for i, ownerRef := range childNode.OwnerRefs { - // First, backfill UID of inferred owner child references. - if ownerRef.UID == "" { - group, err := schema.ParseGroupVersion(ownerRef.APIVersion) - if err != nil { - // APIVersion is invalid, so we couldn't find the parent. - continue - } - graphKeyNode, ok := nsNodes[kube.ResourceKey{Group: group.Group, Kind: ownerRef.Kind, Namespace: childNode.Ref.Namespace, Name: ownerRef.Name}] - if ok { - ownerRef.UID = graphKeyNode.Ref.UID - childNode.OwnerRefs[i] = ownerRef - } else { - // No resource found with the given graph key, so move on. - continue - } - } - - // Now that we have the UID of the parent, update the graph. - uidNodes, ok := nodesByUID[ownerRef.UID] - if ok { - for _, uidNode := range uidNodes { - // Update the graph for this owner to include the child. - if _, ok := graph[uidNode.ResourceKey()]; !ok { - graph[uidNode.ResourceKey()] = make(map[types.UID]*Resource) - } - r, ok := graph[uidNode.ResourceKey()][childNode.Ref.UID] - if !ok { - graph[uidNode.ResourceKey()][childNode.Ref.UID] = childNode - } else if r != nil { - // The object might have multiple children with the same UID (e.g. replicaset from apps and extensions group). - // It is ok to pick any object, but we need to make sure we pick the same child after every refresh. - key1 := r.ResourceKey() - key2 := childNode.ResourceKey() - if strings.Compare(key1.String(), key2.String()) > 0 { - graph[uidNode.ResourceKey()][childNode.Ref.UID] = childNode - } - } - } - } - } - } - return graph -} - // IsNamespaced answers if specified group/kind is a namespaced resource API or not func (c *clusterCache) IsNamespaced(gk schema.GroupKind) (bool, error) { if isNamespaced, ok := c.namespacedResources[gk]; ok { diff --git a/vendor/github.com/argoproj/gitops-engine/pkg/cache/resource.go b/vendor/github.com/argoproj/gitops-engine/pkg/cache/resource.go index eae3d4e6eb..4097f4dcaf 100644 --- a/vendor/github.com/argoproj/gitops-engine/pkg/cache/resource.go +++ b/vendor/github.com/argoproj/gitops-engine/pkg/cache/resource.go @@ -2,7 +2,6 @@ package cache import ( "fmt" - "k8s.io/apimachinery/pkg/types" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -100,33 +99,3 @@ func (r *Resource) iterateChildren(ns map[kube.ResourceKey]*Resource, parents ma } } } - -// iterateChildrenV2 is a depth-first traversal of the graph of resources starting from the current resource. -func (r *Resource) iterateChildrenV2(graph map[kube.ResourceKey]map[types.UID]*Resource, ns map[kube.ResourceKey]*Resource, visited map[kube.ResourceKey]int, action func(err error, child *Resource, namespaceResources map[kube.ResourceKey]*Resource) bool) { - key := r.ResourceKey() - if visited[key] == 2 { - return - } - // this indicates that we've started processing this node's children - visited[key] = 1 - defer func() { - // this indicates that we've finished processing this node's children - visited[key] = 2 - }() - children, ok := graph[key] - if !ok || children == nil { - return - } - for _, c := range children { - childKey := c.ResourceKey() - child := ns[childKey] - if visited[childKey] == 1 { - // Since we encountered a node that we're currently processing, we know we have a circular dependency. - _ = action(fmt.Errorf("circular dependency detected. %s is child and parent of %s", childKey.String(), key.String()), child, ns) - } else if visited[childKey] == 0 { - if action(nil, child, ns) { - child.iterateChildrenV2(graph, ns, visited, action) - } - } - } -} diff --git a/vendor/github.com/argoproj/gitops-engine/pkg/diff/diff.go b/vendor/github.com/argoproj/gitops-engine/pkg/diff/diff.go index 2278222c30..48b15ca956 100644 --- a/vendor/github.com/argoproj/gitops-engine/pkg/diff/diff.go +++ b/vendor/github.com/argoproj/gitops-engine/pkg/diff/diff.go @@ -7,6 +7,7 @@ package diff import ( "bytes" "context" + "encoding/base64" "encoding/json" "errors" "fmt" @@ -864,6 +865,32 @@ func NormalizeSecret(un *unstructured.Unstructured, opts ...Option) { if gvk.Group != "" || gvk.Kind != "Secret" { return } + + // move stringData to data section + if stringData, found, err := unstructured.NestedMap(un.Object, "stringData"); found && err == nil { + var data map[string]interface{} + data, found, _ = unstructured.NestedMap(un.Object, "data") + if !found { + data = make(map[string]interface{}) + } + + // base64 encode string values and add non-string values as is. + // This ensures that the apply fails if the secret is invalid. + for k, v := range stringData { + strVal, ok := v.(string) + if ok { + data[k] = base64.StdEncoding.EncodeToString([]byte(strVal)) + } else { + data[k] = v + } + } + + err := unstructured.SetNestedField(un.Object, data, "data") + if err == nil { + delete(un.Object, "stringData") + } + } + o := applyOptions(opts) var secret corev1.Secret err := runtime.DefaultUnstructuredConverter.FromUnstructured(un.Object, &secret) @@ -877,15 +904,6 @@ func NormalizeSecret(un *unstructured.Unstructured, opts ...Option) { secret.Data[k] = []byte("") } } - if len(secret.StringData) > 0 { - if secret.Data == nil { - secret.Data = make(map[string][]byte) - } - for k, v := range secret.StringData { - secret.Data[k] = []byte(v) - } - delete(un.Object, "stringData") - } newObj, err := runtime.DefaultUnstructuredConverter.ToUnstructured(&secret) if err != nil { o.log.Error(err, "object unable to convert from secret") diff --git a/vendor/github.com/argoproj/gitops-engine/pkg/utils/kube/kube.go b/vendor/github.com/argoproj/gitops-engine/pkg/utils/kube/kube.go index f88ed172b5..9db1090622 100644 --- a/vendor/github.com/argoproj/gitops-engine/pkg/utils/kube/kube.go +++ b/vendor/github.com/argoproj/gitops-engine/pkg/utils/kube/kube.go @@ -205,12 +205,15 @@ var ( // See ApplyOpts::Run() // cmdutil.AddSourceToErr(fmt.Sprintf("applying patch:\n%s\nto:\n%v\nfor:", patchBytes, info), info.Source, err) kubectlApplyPatchErrOutRegexp = regexp.MustCompile(`(?s)^error when applying patch:.*\nfor: "\S+": `) + + kubectlErrOutMapRegexp = regexp.MustCompile(`map\[.*\]`) ) // cleanKubectlOutput makes the error output of kubectl a little better to read func cleanKubectlOutput(s string) string { s = strings.TrimSpace(s) s = kubectlErrOutRegexp.ReplaceAllString(s, "") + s = kubectlErrOutMapRegexp.ReplaceAllString(s, "") s = kubectlApplyPatchErrOutRegexp.ReplaceAllString(s, "") s = strings.Replace(s, "; if you choose to ignore these errors, turn validation off with --validate=false", "", -1) return s diff --git a/vendor/github.com/aws/aws-sdk-go/aws/config.go b/vendor/github.com/aws/aws-sdk-go/aws/config.go index 776e31b21d..c483e0cb8e 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/config.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/config.go @@ -442,6 +442,17 @@ func (c *Config) WithUseDualStack(enable bool) *Config { return c } +// WithUseFIPSEndpoint sets a config UseFIPSEndpoint value returning a Config +// pointer for chaining. +func (c *Config) WithUseFIPSEndpoint(enable bool) *Config { + if enable { + c.UseFIPSEndpoint = endpoints.FIPSEndpointStateEnabled + } else { + c.UseFIPSEndpoint = endpoints.FIPSEndpointStateDisabled + } + return c +} + // WithEC2MetadataDisableTimeoutOverride sets a config EC2MetadataDisableTimeoutOverride value // returning a Config pointer for chaining. func (c *Config) WithEC2MetadataDisableTimeoutOverride(enable bool) *Config { diff --git a/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/awsinternal.go b/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/awsinternal.go new file mode 100644 index 0000000000..140242dd1b --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/awsinternal.go @@ -0,0 +1,4 @@ +// DO NOT EDIT +package corehandlers + +const isAwsInternal = "" \ No newline at end of file diff --git a/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/user_agent.go b/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/user_agent.go index ab69c7a6f3..ac842c55d8 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/user_agent.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/user_agent.go @@ -35,3 +35,13 @@ var AddHostExecEnvUserAgentHander = request.NamedHandler{ request.AddToUserAgent(r, execEnvUAKey+"/"+v) }, } + +var AddAwsInternal = request.NamedHandler{ + Name: "core.AddAwsInternal", + Fn: func(r *request.Request) { + if len(isAwsInternal) == 0 { + return + } + request.AddToUserAgent(r, isAwsInternal) + }, +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/endpointcreds/provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/endpointcreds/provider.go index 785f30d8e6..329f788a38 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/endpointcreds/provider.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/endpointcreds/provider.go @@ -31,6 +31,8 @@ package endpointcreds import ( "encoding/json" + "fmt" + "strings" "time" "github.com/aws/aws-sdk-go/aws" @@ -69,7 +71,37 @@ type Provider struct { // Optional authorization token value if set will be used as the value of // the Authorization header of the endpoint credential request. + // + // When constructed from environment, the provider will use the value of + // AWS_CONTAINER_AUTHORIZATION_TOKEN environment variable as the token + // + // Will be overridden if AuthorizationTokenProvider is configured AuthorizationToken string + + // Optional auth provider func to dynamically load the auth token from a file + // everytime a credential is retrieved + // + // When constructed from environment, the provider will read and use the content + // of the file pointed to by AWS_CONTAINER_AUTHORIZATION_TOKEN_FILE environment variable + // as the auth token everytime credentials are retrieved + // + // Will override AuthorizationToken if configured + AuthorizationTokenProvider AuthTokenProvider +} + +// AuthTokenProvider defines an interface to dynamically load a value to be passed +// for the Authorization header of a credentials request. +type AuthTokenProvider interface { + GetToken() (string, error) +} + +// TokenProviderFunc is a func type implementing AuthTokenProvider interface +// and enables customizing token provider behavior +type TokenProviderFunc func() (string, error) + +// GetToken func retrieves auth token according to TokenProviderFunc implementation +func (p TokenProviderFunc) GetToken() (string, error) { + return p() } // NewProviderClient returns a credentials Provider for retrieving AWS credentials @@ -164,7 +196,20 @@ func (p *Provider) getCredentials(ctx aws.Context) (*getCredentialsOutput, error req := p.Client.NewRequest(op, nil, out) req.SetContext(ctx) req.HTTPRequest.Header.Set("Accept", "application/json") - if authToken := p.AuthorizationToken; len(authToken) != 0 { + + authToken := p.AuthorizationToken + var err error + if p.AuthorizationTokenProvider != nil { + authToken, err = p.AuthorizationTokenProvider.GetToken() + if err != nil { + return nil, fmt.Errorf("get authorization token: %v", err) + } + } + + if strings.ContainsAny(authToken, "\r\n") { + return nil, fmt.Errorf("authorization token contains invalid newline sequence") + } + if len(authToken) != 0 { req.HTTPRequest.Header.Set("Authorization", authToken) } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/token_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/token_provider.go index 7562cd0135..3388b78b4f 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/token_provider.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/token_provider.go @@ -111,6 +111,15 @@ func (p *SSOTokenProvider) refreshToken(token cachedToken) (cachedToken, error) if err != nil { return cachedToken{}, fmt.Errorf("unable to refresh SSO token, %v", err) } + if createResult.ExpiresIn == nil { + return cachedToken{}, fmt.Errorf("missing required field ExpiresIn") + } + if createResult.AccessToken == nil { + return cachedToken{}, fmt.Errorf("missing required field AccessToken") + } + if createResult.RefreshToken == nil { + return cachedToken{}, fmt.Errorf("missing required field RefreshToken") + } expiresAt := nowTime().Add(time.Duration(*createResult.ExpiresIn) * time.Second) diff --git a/vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go b/vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go index 23bb639e01..1ba80b5760 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go @@ -9,6 +9,7 @@ package defaults import ( "fmt" + "io/ioutil" "net" "net/http" "net/url" @@ -74,6 +75,7 @@ func Handlers() request.Handlers { handlers.Validate.PushBackNamed(corehandlers.ValidateEndpointHandler) handlers.Validate.AfterEachFn = request.HandlerListStopOnError handlers.Build.PushBackNamed(corehandlers.SDKVersionUserAgentHandler) + handlers.Build.PushBackNamed(corehandlers.AddAwsInternal) handlers.Build.PushBackNamed(corehandlers.AddHostExecEnvUserAgentHander) handlers.Build.AfterEachFn = request.HandlerListStopOnError handlers.Sign.PushBackNamed(corehandlers.BuildContentLengthHandler) @@ -114,9 +116,31 @@ func CredProviders(cfg *aws.Config, handlers request.Handlers) []credentials.Pro const ( httpProviderAuthorizationEnvVar = "AWS_CONTAINER_AUTHORIZATION_TOKEN" + httpProviderAuthFileEnvVar = "AWS_CONTAINER_AUTHORIZATION_TOKEN_FILE" httpProviderEnvVar = "AWS_CONTAINER_CREDENTIALS_FULL_URI" ) +// direct representation of the IPv4 address for the ECS container +// "169.254.170.2" +var ecsContainerIPv4 net.IP = []byte{ + 169, 254, 170, 2, +} + +// direct representation of the IPv4 address for the EKS container +// "169.254.170.23" +var eksContainerIPv4 net.IP = []byte{ + 169, 254, 170, 23, +} + +// direct representation of the IPv6 address for the EKS container +// "fd00:ec2::23" +var eksContainerIPv6 net.IP = []byte{ + 0xFD, 0, 0xE, 0xC2, + 0, 0, 0, 0, + 0, 0, 0, 0, + 0, 0, 0, 0x23, +} + // RemoteCredProvider returns a credentials provider for the default remote // endpoints such as EC2 or ECS Roles. func RemoteCredProvider(cfg aws.Config, handlers request.Handlers) credentials.Provider { @@ -134,19 +158,22 @@ func RemoteCredProvider(cfg aws.Config, handlers request.Handlers) credentials.P var lookupHostFn = net.LookupHost -func isLoopbackHost(host string) (bool, error) { - ip := net.ParseIP(host) - if ip != nil { - return ip.IsLoopback(), nil +// isAllowedHost allows host to be loopback or known ECS/EKS container IPs +// +// host can either be an IP address OR an unresolved hostname - resolution will +// be automatically performed in the latter case +func isAllowedHost(host string) (bool, error) { + if ip := net.ParseIP(host); ip != nil { + return isIPAllowed(ip), nil } - // Host is not an ip, perform lookup addrs, err := lookupHostFn(host) if err != nil { return false, err } + for _, addr := range addrs { - if !net.ParseIP(addr).IsLoopback() { + if ip := net.ParseIP(addr); ip == nil || !isIPAllowed(ip) { return false, nil } } @@ -154,6 +181,13 @@ func isLoopbackHost(host string) (bool, error) { return true, nil } +func isIPAllowed(ip net.IP) bool { + return ip.IsLoopback() || + ip.Equal(ecsContainerIPv4) || + ip.Equal(eksContainerIPv4) || + ip.Equal(eksContainerIPv6) +} + func localHTTPCredProvider(cfg aws.Config, handlers request.Handlers, u string) credentials.Provider { var errMsg string @@ -164,10 +198,12 @@ func localHTTPCredProvider(cfg aws.Config, handlers request.Handlers, u string) host := aws.URLHostname(parsed) if len(host) == 0 { errMsg = "unable to parse host from local HTTP cred provider URL" - } else if isLoopback, loopbackErr := isLoopbackHost(host); loopbackErr != nil { - errMsg = fmt.Sprintf("failed to resolve host %q, %v", host, loopbackErr) - } else if !isLoopback { - errMsg = fmt.Sprintf("invalid endpoint host, %q, only loopback hosts are allowed.", host) + } else if parsed.Scheme == "http" { + if isAllowedHost, allowHostErr := isAllowedHost(host); allowHostErr != nil { + errMsg = fmt.Sprintf("failed to resolve host %q, %v", host, allowHostErr) + } else if !isAllowedHost { + errMsg = fmt.Sprintf("invalid endpoint host, %q, only loopback/ecs/eks hosts are allowed.", host) + } } } @@ -189,6 +225,15 @@ func httpCredProvider(cfg aws.Config, handlers request.Handlers, u string) crede func(p *endpointcreds.Provider) { p.ExpiryWindow = 5 * time.Minute p.AuthorizationToken = os.Getenv(httpProviderAuthorizationEnvVar) + if authFilePath := os.Getenv(httpProviderAuthFileEnvVar); authFilePath != "" { + p.AuthorizationTokenProvider = endpointcreds.TokenProviderFunc(func() (string, error) { + if contents, err := ioutil.ReadFile(authFilePath); err != nil { + return "", fmt.Errorf("failed to read authorization token from %v: %v", authFilePath, err) + } else { + return string(contents), nil + } + }) + } }, ) } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/token_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/token_provider.go index 604aeffdeb..f1f9ba4ec5 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/token_provider.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/token_provider.go @@ -2,6 +2,7 @@ package ec2metadata import ( "fmt" + "github.com/aws/aws-sdk-go/aws" "net/http" "sync/atomic" "time" @@ -65,7 +66,9 @@ func (t *tokenProvider) fetchTokenHandler(r *request.Request) { switch requestFailureError.StatusCode() { case http.StatusForbidden, http.StatusNotFound, http.StatusMethodNotAllowed: atomic.StoreUint32(&t.disabled, 1) - t.client.Config.Logger.Log(fmt.Sprintf("WARN: failed to get session token, falling back to IMDSv1: %v", requestFailureError)) + if t.client.Config.LogLevel.Matches(aws.LogDebugWithDeprecated) { + t.client.Config.Logger.Log(fmt.Sprintf("WARN: failed to get session token, falling back to IMDSv1: %v", requestFailureError)) + } case http.StatusBadRequest: r.Error = requestFailureError } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go index 6027df1e18..6d5013fcac 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go @@ -31,6 +31,7 @@ const ( ApSoutheast3RegionID = "ap-southeast-3" // Asia Pacific (Jakarta). ApSoutheast4RegionID = "ap-southeast-4" // Asia Pacific (Melbourne). CaCentral1RegionID = "ca-central-1" // Canada (Central). + CaWest1RegionID = "ca-west-1" // Canada West (Calgary). EuCentral1RegionID = "eu-central-1" // Europe (Frankfurt). EuCentral2RegionID = "eu-central-2" // Europe (Zurich). EuNorth1RegionID = "eu-north-1" // Europe (Stockholm). @@ -190,6 +191,9 @@ var awsPartition = partition{ "ca-central-1": region{ Description: "Canada (Central)", }, + "ca-west-1": region{ + Description: "Canada West (Calgary)", + }, "eu-central-1": region{ Description: "Europe (Frankfurt)", }, @@ -291,6 +295,9 @@ var awsPartition = partition{ }: endpoint{ Hostname: "access-analyzer-fips.ca-central-1.amazonaws.com", }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -477,6 +484,24 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "acm-fips.ca-west-1.amazonaws.com", + }, + endpointKey{ + Region: "ca-west-1-fips", + }: endpoint{ + Hostname: "acm-fips.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -705,6 +730,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -752,6 +780,13 @@ var awsPartition = partition{ }, }, }, + "agreement-marketplace": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + }, + }, "airflow": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -1037,6 +1072,21 @@ var awsPartition = partition{ endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "api.detective-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "ca-central-1-fips", + }: endpoint{ + Hostname: "api.detective-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -1055,6 +1105,9 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -1241,6 +1294,14 @@ var awsPartition = partition{ Region: "ca-central-1", }, }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{ + Hostname: "api.ecr.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + }, endpointKey{ Region: "dkr-us-east-1", }: endpoint{ @@ -1812,6 +1873,12 @@ var awsPartition = partition{ endpointKey{ Region: "ap-northeast-1", }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, endpointKey{ Region: "ap-south-1", }: endpoint{}, @@ -1821,12 +1888,27 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, endpointKey{ Region: "us-east-1", }: endpoint{}, @@ -1850,6 +1932,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, endpointKey{ Region: "us-east-1", }: endpoint{}, @@ -1901,6 +1986,9 @@ var awsPartition = partition{ endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -2199,6 +2287,15 @@ var awsPartition = partition{ }: endpoint{ Hostname: "apigateway-fips.ca-central-1.amazonaws.com", }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "apigateway-fips.ca-west-1.amazonaws.com", + }, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -2232,6 +2329,15 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "fips-ca-west-1", + }: endpoint{ + Hostname: "apigateway-fips.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "fips-us-east-1", }: endpoint{ @@ -2390,6 +2496,9 @@ var awsPartition = partition{ endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -2478,6 +2587,9 @@ var awsPartition = partition{ endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -2563,21 +2675,81 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "appflow-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "appflow-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "appflow-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "appflow-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "sa-east-1", }: endpoint{}, endpointKey{ Region: "us-east-1", }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "appflow-fips.us-east-1.amazonaws.com", + }, endpointKey{ Region: "us-east-2", }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "appflow-fips.us-east-2.amazonaws.com", + }, endpointKey{ Region: "us-west-1", }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "appflow-fips.us-west-1.amazonaws.com", + }, endpointKey{ Region: "us-west-2", }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "appflow-fips.us-west-2.amazonaws.com", + }, }, }, "application-autoscaling": service{ @@ -2623,6 +2795,9 @@ var awsPartition = partition{ endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -2922,6 +3097,15 @@ var awsPartition = partition{ }: endpoint{ Hostname: "appmesh.eu-west-3.api.aws", }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "il-central-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "appmesh.il-central-1.api.aws", + }, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -3067,6 +3251,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-northeast-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -3079,6 +3266,12 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-1", }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, endpointKey{ Region: "fips-us-east-1", }: endpoint{ @@ -3257,6 +3450,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, @@ -3284,6 +3480,9 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -3396,6 +3595,9 @@ var awsPartition = partition{ endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -3664,6 +3866,15 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "il-central-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "athena.il-central-1.api.aws", + }, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -3860,6 +4071,21 @@ var awsPartition = partition{ endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "autoscaling-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "autoscaling-fips.ca-west-1.amazonaws.com", + }, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -3884,6 +4110,60 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "autoscaling-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-ca-west-1", + }: endpoint{ + Hostname: "autoscaling-fips.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "autoscaling-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "autoscaling-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "autoscaling-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "autoscaling-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "il-central-1", }: endpoint{}, @@ -3899,15 +4179,39 @@ var awsPartition = partition{ endpointKey{ Region: "us-east-1", }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "autoscaling-fips.us-east-1.amazonaws.com", + }, endpointKey{ Region: "us-east-2", }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "autoscaling-fips.us-east-2.amazonaws.com", + }, endpointKey{ Region: "us-west-1", }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "autoscaling-fips.us-west-1.amazonaws.com", + }, endpointKey{ Region: "us-west-2", }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "autoscaling-fips.us-west-2.amazonaws.com", + }, }, }, "autoscaling-plans": service{ @@ -4047,6 +4351,9 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -4268,6 +4575,9 @@ var awsPartition = partition{ endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -4328,6 +4638,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -4375,6 +4688,137 @@ var awsPartition = partition{ }, }, }, + "bedrock": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "bedrock-ap-northeast-1", + }: endpoint{ + Hostname: "bedrock.ap-northeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-1", + }, + }, + endpointKey{ + Region: "bedrock-ap-southeast-1", + }: endpoint{ + Hostname: "bedrock.ap-southeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-1", + }, + }, + endpointKey{ + Region: "bedrock-eu-central-1", + }: endpoint{ + Hostname: "bedrock.eu-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-1", + }, + }, + endpointKey{ + Region: "bedrock-fips-us-east-1", + }: endpoint{ + Hostname: "bedrock-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + endpointKey{ + Region: "bedrock-fips-us-west-2", + }: endpoint{ + Hostname: "bedrock-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + endpointKey{ + Region: "bedrock-runtime-ap-northeast-1", + }: endpoint{ + Hostname: "bedrock-runtime.ap-northeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-1", + }, + }, + endpointKey{ + Region: "bedrock-runtime-ap-southeast-1", + }: endpoint{ + Hostname: "bedrock-runtime.ap-southeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-1", + }, + }, + endpointKey{ + Region: "bedrock-runtime-eu-central-1", + }: endpoint{ + Hostname: "bedrock-runtime.eu-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-1", + }, + }, + endpointKey{ + Region: "bedrock-runtime-fips-us-east-1", + }: endpoint{ + Hostname: "bedrock-runtime-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + endpointKey{ + Region: "bedrock-runtime-fips-us-west-2", + }: endpoint{ + Hostname: "bedrock-runtime-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + endpointKey{ + Region: "bedrock-runtime-us-east-1", + }: endpoint{ + Hostname: "bedrock-runtime.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + endpointKey{ + Region: "bedrock-runtime-us-west-2", + }: endpoint{ + Hostname: "bedrock-runtime.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + endpointKey{ + Region: "bedrock-us-east-1", + }: endpoint{ + Hostname: "bedrock.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + endpointKey{ + Region: "bedrock-us-west-2", + }: endpoint{ + Hostname: "bedrock.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, "billingconductor": service{ PartitionEndpoint: "aws-global", IsRegionalized: boxedFalse, @@ -4677,6 +5121,9 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -4741,6 +5188,15 @@ var awsPartition = partition{ }: endpoint{ Hostname: "cloudcontrolapi-fips.ca-central-1.amazonaws.com", }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "cloudcontrolapi-fips.ca-west-1.amazonaws.com", + }, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -4774,6 +5230,15 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "fips-ca-west-1", + }: endpoint{ + Hostname: "cloudcontrolapi-fips.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "fips-us-east-1", }: endpoint{ @@ -4929,6 +5394,9 @@ var awsPartition = partition{ endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -5088,6 +5556,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -5219,6 +5690,9 @@ var awsPartition = partition{ endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -5678,6 +6152,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -5799,6 +6276,9 @@ var awsPartition = partition{ endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -5957,15 +6437,27 @@ var awsPartition = partition{ endpointKey{ Region: "ap-northeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, @@ -5987,6 +6479,9 @@ var awsPartition = partition{ endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -6041,6 +6536,12 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -6243,12 +6744,18 @@ var awsPartition = partition{ }, "cognito-identity": service{ Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, endpointKey{ Region: "ap-northeast-1", }: endpoint{}, endpointKey{ Region: "ap-northeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, endpointKey{ Region: "ap-south-1", }: endpoint{}, @@ -6258,6 +6765,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, @@ -6315,6 +6825,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -6361,12 +6874,18 @@ var awsPartition = partition{ }, "cognito-idp": service{ Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, endpointKey{ Region: "ap-northeast-1", }: endpoint{}, endpointKey{ Region: "ap-northeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, endpointKey{ Region: "ap-south-1", }: endpoint{}, @@ -6376,6 +6895,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, @@ -6433,6 +6955,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -6724,6 +7249,14 @@ var awsPartition = partition{ Region: "ap-south-1", }, }, + endpointKey{ + Region: "ap-south-2", + }: endpoint{ + Hostname: "compute-optimizer.ap-south-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-south-2", + }, + }, endpointKey{ Region: "ap-southeast-1", }: endpoint{ @@ -6740,6 +7273,22 @@ var awsPartition = partition{ Region: "ap-southeast-2", }, }, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{ + Hostname: "compute-optimizer.ap-southeast-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-3", + }, + }, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{ + Hostname: "compute-optimizer.ap-southeast-4.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-4", + }, + }, endpointKey{ Region: "ca-central-1", }: endpoint{ @@ -6756,6 +7305,14 @@ var awsPartition = partition{ Region: "eu-central-1", }, }, + endpointKey{ + Region: "eu-central-2", + }: endpoint{ + Hostname: "compute-optimizer.eu-central-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-2", + }, + }, endpointKey{ Region: "eu-north-1", }: endpoint{ @@ -6772,6 +7329,14 @@ var awsPartition = partition{ Region: "eu-south-1", }, }, + endpointKey{ + Region: "eu-south-2", + }: endpoint{ + Hostname: "compute-optimizer.eu-south-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-south-2", + }, + }, endpointKey{ Region: "eu-west-1", }: endpoint{ @@ -6796,6 +7361,22 @@ var awsPartition = partition{ Region: "eu-west-3", }, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{ + Hostname: "compute-optimizer.il-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "il-central-1", + }, + }, + endpointKey{ + Region: "me-central-1", + }: endpoint{ + Hostname: "compute-optimizer.me-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "me-central-1", + }, + }, endpointKey{ Region: "me-south-1", }: endpoint{ @@ -6884,6 +7465,9 @@ var awsPartition = partition{ endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -7066,6 +7650,9 @@ var awsPartition = partition{ endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, endpointKey{ Region: "eu-west-2", }: endpoint{}, @@ -7161,6 +7748,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -7170,6 +7760,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, @@ -7191,12 +7784,18 @@ var awsPartition = partition{ endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -7206,6 +7805,12 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -7286,6 +7891,18 @@ var awsPartition = partition{ }, }, }, + "cost-optimization-hub": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-east-1", + }: endpoint{ + Hostname: "cost-optimization-hub.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + }, + }, "cur": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -7822,6 +8439,15 @@ var awsPartition = partition{ }: endpoint{ Hostname: "datasync-fips.ca-central-1.amazonaws.com", }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "datasync-fips.ca-west-1.amazonaws.com", + }, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -7855,6 +8481,15 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "fips-ca-west-1", + }: endpoint{ + Hostname: "datasync-fips.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "fips-us-east-1", }: endpoint{ @@ -7891,6 +8526,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -7938,6 +8576,190 @@ var awsPartition = partition{ }, }, }, + "datazone": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + DNSSuffix: "api.aws", + }, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "{service}-fips.{region}.{dnsSuffix}", + DNSSuffix: "api.aws", + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{ + Hostname: "datazone.af-south-1.api.aws", + }, + endpointKey{ + Region: "ap-east-1", + }: endpoint{ + Hostname: "datazone.ap-east-1.api.aws", + }, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{ + Hostname: "datazone.ap-northeast-1.api.aws", + }, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{ + Hostname: "datazone.ap-northeast-2.api.aws", + }, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{ + Hostname: "datazone.ap-northeast-3.api.aws", + }, + endpointKey{ + Region: "ap-south-1", + }: endpoint{ + Hostname: "datazone.ap-south-1.api.aws", + }, + endpointKey{ + Region: "ap-south-2", + }: endpoint{ + Hostname: "datazone.ap-south-2.api.aws", + }, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{ + Hostname: "datazone.ap-southeast-1.api.aws", + }, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{ + Hostname: "datazone.ap-southeast-2.api.aws", + }, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{ + Hostname: "datazone.ap-southeast-3.api.aws", + }, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{ + Hostname: "datazone.ap-southeast-4.api.aws", + }, + endpointKey{ + Region: "ca-central-1", + }: endpoint{ + Hostname: "datazone.ca-central-1.api.aws", + }, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "datazone-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{ + Hostname: "datazone.ca-west-1.api.aws", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{ + Hostname: "datazone.eu-central-1.api.aws", + }, + endpointKey{ + Region: "eu-central-2", + }: endpoint{ + Hostname: "datazone.eu-central-2.api.aws", + }, + endpointKey{ + Region: "eu-north-1", + }: endpoint{ + Hostname: "datazone.eu-north-1.api.aws", + }, + endpointKey{ + Region: "eu-south-1", + }: endpoint{ + Hostname: "datazone.eu-south-1.api.aws", + }, + endpointKey{ + Region: "eu-south-2", + }: endpoint{ + Hostname: "datazone.eu-south-2.api.aws", + }, + endpointKey{ + Region: "eu-west-1", + }: endpoint{ + Hostname: "datazone.eu-west-1.api.aws", + }, + endpointKey{ + Region: "eu-west-2", + }: endpoint{ + Hostname: "datazone.eu-west-2.api.aws", + }, + endpointKey{ + Region: "eu-west-3", + }: endpoint{ + Hostname: "datazone.eu-west-3.api.aws", + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{ + Hostname: "datazone.il-central-1.api.aws", + }, + endpointKey{ + Region: "me-central-1", + }: endpoint{ + Hostname: "datazone.me-central-1.api.aws", + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{ + Hostname: "datazone.me-south-1.api.aws", + }, + endpointKey{ + Region: "sa-east-1", + }: endpoint{ + Hostname: "datazone.sa-east-1.api.aws", + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{ + Hostname: "datazone.us-east-1.api.aws", + }, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "datazone-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{ + Hostname: "datazone.us-east-2.api.aws", + }, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "datazone-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{ + Hostname: "datazone.us-west-1.api.aws", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{ + Hostname: "datazone.us-west-2.api.aws", + }, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "datazone-fips.us-west-2.amazonaws.com", + }, + }, + }, "dax": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -8158,6 +8980,9 @@ var awsPartition = partition{ endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -8331,6 +9156,9 @@ var awsPartition = partition{ endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -8355,6 +9183,9 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -8416,6 +9247,9 @@ var awsPartition = partition{ endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, endpointKey{ Region: "dms", }: endpoint{ @@ -8731,6 +9565,45 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "drs-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "drs-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "drs-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "drs-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -8743,15 +9616,39 @@ var awsPartition = partition{ endpointKey{ Region: "us-east-1", }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "drs-fips.us-east-1.amazonaws.com", + }, endpointKey{ Region: "us-east-2", }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "drs-fips.us-east-2.amazonaws.com", + }, endpointKey{ Region: "us-west-1", }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "drs-fips.us-west-1.amazonaws.com", + }, endpointKey{ Region: "us-west-2", }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "drs-fips.us-west-2.amazonaws.com", + }, }, }, "ds": service{ @@ -8798,6 +9695,15 @@ var awsPartition = partition{ }: endpoint{ Hostname: "ds-fips.ca-central-1.amazonaws.com", }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ds-fips.ca-west-1.amazonaws.com", + }, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -8831,6 +9737,15 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "fips-ca-west-1", + }: endpoint{ + Hostname: "ds-fips.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "fips-us-east-1", }: endpoint{ @@ -8975,6 +9890,24 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "dynamodb-fips.ca-west-1.amazonaws.com", + }, + endpointKey{ + Region: "ca-west-1-fips", + }: endpoint{ + Hostname: "dynamodb-fips.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -9138,6 +10071,15 @@ var awsPartition = partition{ }: endpoint{ Hostname: "ebs-fips.ca-central-1.amazonaws.com", }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ebs-fips.ca-west-1.amazonaws.com", + }, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -9171,6 +10113,15 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "fips-ca-west-1", + }: endpoint{ + Hostname: "ebs-fips.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "fips-us-east-1", }: endpoint{ @@ -9499,6 +10450,9 @@ var awsPartition = partition{ endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -9680,6 +10634,9 @@ var awsPartition = partition{ endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -9790,6 +10747,166 @@ var awsPartition = partition{ }, }, }, + "eks-auth": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + DNSSuffix: "api.aws", + }, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "{service}-fips.{region}.{dnsSuffix}", + DNSSuffix: "api.aws", + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{ + Hostname: "eks-auth.af-south-1.api.aws", + }, + endpointKey{ + Region: "ap-east-1", + }: endpoint{ + Hostname: "eks-auth.ap-east-1.api.aws", + }, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{ + Hostname: "eks-auth.ap-northeast-1.api.aws", + }, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{ + Hostname: "eks-auth.ap-northeast-2.api.aws", + }, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{ + Hostname: "eks-auth.ap-northeast-3.api.aws", + }, + endpointKey{ + Region: "ap-south-1", + }: endpoint{ + Hostname: "eks-auth.ap-south-1.api.aws", + }, + endpointKey{ + Region: "ap-south-2", + }: endpoint{ + Hostname: "eks-auth.ap-south-2.api.aws", + }, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{ + Hostname: "eks-auth.ap-southeast-1.api.aws", + }, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{ + Hostname: "eks-auth.ap-southeast-2.api.aws", + }, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{ + Hostname: "eks-auth.ap-southeast-3.api.aws", + }, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{ + Hostname: "eks-auth.ap-southeast-4.api.aws", + }, + endpointKey{ + Region: "ca-central-1", + }: endpoint{ + Hostname: "eks-auth.ca-central-1.api.aws", + }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{ + Hostname: "eks-auth.ca-west-1.api.aws", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{ + Hostname: "eks-auth.eu-central-1.api.aws", + }, + endpointKey{ + Region: "eu-central-2", + }: endpoint{ + Hostname: "eks-auth.eu-central-2.api.aws", + }, + endpointKey{ + Region: "eu-north-1", + }: endpoint{ + Hostname: "eks-auth.eu-north-1.api.aws", + }, + endpointKey{ + Region: "eu-south-1", + }: endpoint{ + Hostname: "eks-auth.eu-south-1.api.aws", + }, + endpointKey{ + Region: "eu-south-2", + }: endpoint{ + Hostname: "eks-auth.eu-south-2.api.aws", + }, + endpointKey{ + Region: "eu-west-1", + }: endpoint{ + Hostname: "eks-auth.eu-west-1.api.aws", + }, + endpointKey{ + Region: "eu-west-2", + }: endpoint{ + Hostname: "eks-auth.eu-west-2.api.aws", + }, + endpointKey{ + Region: "eu-west-3", + }: endpoint{ + Hostname: "eks-auth.eu-west-3.api.aws", + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{ + Hostname: "eks-auth.il-central-1.api.aws", + }, + endpointKey{ + Region: "me-central-1", + }: endpoint{ + Hostname: "eks-auth.me-central-1.api.aws", + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{ + Hostname: "eks-auth.me-south-1.api.aws", + }, + endpointKey{ + Region: "sa-east-1", + }: endpoint{ + Hostname: "eks-auth.sa-east-1.api.aws", + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{ + Hostname: "eks-auth.us-east-1.api.aws", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{ + Hostname: "eks-auth.us-east-2.api.aws", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{ + Hostname: "eks-auth.us-west-1.api.aws", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{ + Hostname: "eks-auth.us-west-2.api.aws", + }, + }, + }, "elasticache": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -9828,6 +10945,9 @@ var awsPartition = partition{ endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -10442,6 +11562,15 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "fips-il-central-1", + }: endpoint{ + Hostname: "elasticfilesystem-fips.il-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "il-central-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "fips-me-central-1", }: endpoint{ @@ -10505,6 +11634,15 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "il-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticfilesystem-fips.il-central-1.amazonaws.com", + }, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -10613,6 +11751,9 @@ var awsPartition = partition{ endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -10773,6 +11914,15 @@ var awsPartition = partition{ }: endpoint{ Hostname: "elasticmapreduce-fips.ca-central-1.amazonaws.com", }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticmapreduce-fips.ca-west-1.amazonaws.com", + }, endpointKey{ Region: "eu-central-1", }: endpoint{ @@ -10808,6 +11958,15 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "fips-ca-west-1", + }: endpoint{ + Hostname: "elasticmapreduce-fips.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "fips-us-east-1", }: endpoint{ @@ -10871,6 +12030,12 @@ var awsPartition = partition{ endpointKey{ Region: "us-east-2", }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "elasticmapreduce.us-east-2.api.aws", + }, endpointKey{ Region: "us-east-2", Variant: fipsVariant, @@ -10954,105 +12119,159 @@ var awsPartition = partition{ endpointKey{ Region: "ca-central-1", }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "email-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "email-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "email-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "email-fips.us-west-2.amazonaws.com", - }, - }, - }, - "emr-containers": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, endpointKey{ Region: "ca-central-1", Variant: fipsVariant, }: endpoint{ - Hostname: "emr-containers-fips.ca-central-1.amazonaws.com", + Hostname: "email-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "email-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "email-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "email-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "email-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "email-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "email-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "email-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "email-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "email-fips.us-west-2.amazonaws.com", + }, + }, + }, + "emr-containers": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "emr-containers-fips.ca-central-1.amazonaws.com", }, endpointKey{ Region: "eu-central-1", @@ -11117,6 +12336,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -11163,6 +12385,9 @@ var awsPartition = partition{ }, "emr-serverless": service{ Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, endpointKey{ Region: "ap-east-1", }: endpoint{}, @@ -11172,6 +12397,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-northeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, endpointKey{ Region: "ap-south-1", }: endpoint{}, @@ -11181,6 +12409,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, @@ -11196,6 +12427,9 @@ var awsPartition = partition{ endpointKey{ Region: "eu-north-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -11250,6 +12484,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -11313,63 +12550,192 @@ var awsPartition = partition{ endpointKey{ Region: "af-south-1", }: endpoint{}, + endpointKey{ + Region: "af-south-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "aos.af-south-1.api.aws", + }, endpointKey{ Region: "ap-east-1", }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "aos.ap-east-1.api.aws", + }, endpointKey{ Region: "ap-northeast-1", }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "aos.ap-northeast-1.api.aws", + }, endpointKey{ Region: "ap-northeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "aos.ap-northeast-2.api.aws", + }, endpointKey{ Region: "ap-northeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "aos.ap-northeast-3.api.aws", + }, endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "aos.ap-south-1.api.aws", + }, endpointKey{ Region: "ap-south-2", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "aos.ap-south-2.api.aws", + }, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "aos.ap-southeast-1.api.aws", + }, endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "aos.ap-southeast-2.api.aws", + }, endpointKey{ Region: "ap-southeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "aos.ap-southeast-3.api.aws", + }, endpointKey{ Region: "ap-southeast-4", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "aos.ap-southeast-4.api.aws", + }, endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "aos.ca-central-1.api.aws", + }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "aos.ca-west-1.api.aws", + }, endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "aos.eu-central-1.api.aws", + }, endpointKey{ Region: "eu-central-2", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "aos.eu-central-2.api.aws", + }, endpointKey{ Region: "eu-north-1", }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "aos.eu-north-1.api.aws", + }, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "aos.eu-south-1.api.aws", + }, endpointKey{ Region: "eu-south-2", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "aos.eu-south-2.api.aws", + }, endpointKey{ Region: "eu-west-1", }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "aos.eu-west-1.api.aws", + }, endpointKey{ Region: "eu-west-2", }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "aos.eu-west-2.api.aws", + }, endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "aos.eu-west-3.api.aws", + }, endpointKey{ Region: "fips", }: endpoint{ @@ -11382,18 +12748,48 @@ var awsPartition = partition{ endpointKey{ Region: "il-central-1", }: endpoint{}, + endpointKey{ + Region: "il-central-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "aos.il-central-1.api.aws", + }, endpointKey{ Region: "me-central-1", }: endpoint{}, + endpointKey{ + Region: "me-central-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "aos.me-central-1.api.aws", + }, endpointKey{ Region: "me-south-1", }: endpoint{}, + endpointKey{ + Region: "me-south-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "aos.me-south-1.api.aws", + }, endpointKey{ Region: "sa-east-1", }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "aos.sa-east-1.api.aws", + }, endpointKey{ Region: "us-east-1", }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "aos.us-east-1.api.aws", + }, endpointKey{ Region: "us-east-1", Variant: fipsVariant, @@ -11412,6 +12808,12 @@ var awsPartition = partition{ endpointKey{ Region: "us-east-2", }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "aos.us-east-2.api.aws", + }, endpointKey{ Region: "us-east-2", Variant: fipsVariant, @@ -11430,6 +12832,12 @@ var awsPartition = partition{ endpointKey{ Region: "us-west-1", }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "aos.us-west-1.api.aws", + }, endpointKey{ Region: "us-west-1", Variant: fipsVariant, @@ -11448,6 +12856,12 @@ var awsPartition = partition{ endpointKey{ Region: "us-west-2", }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "aos.us-west-2.api.aws", + }, endpointKey{ Region: "us-west-2", Variant: fipsVariant, @@ -11503,6 +12917,9 @@ var awsPartition = partition{ endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -11664,12 +13081,27 @@ var awsPartition = partition{ }, "finspace": service{ Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, endpointKey{ Region: "us-east-1", }: endpoint{}, @@ -11738,6 +13170,9 @@ var awsPartition = partition{ endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -12164,6 +13599,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -12561,6 +13999,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -12765,16 +14206,6 @@ var awsPartition = partition{ }: endpoint{}, }, }, - "gamesparks": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - }, - }, "geo": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -13459,6 +14890,9 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -13706,6 +15140,9 @@ var awsPartition = partition{ endpointKey{ Region: "af-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, endpointKey{ Region: "ap-northeast-1", }: endpoint{}, @@ -13727,12 +15164,18 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, @@ -13748,6 +15191,15 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, endpointKey{ Region: "sa-east-1", }: endpoint{}, @@ -13757,6 +15209,9 @@ var awsPartition = partition{ endpointKey{ Region: "us-east-2", }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, endpointKey{ Region: "us-west-2", }: endpoint{}, @@ -13986,6 +15441,9 @@ var awsPartition = partition{ }, "inspector2": service{ Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, endpointKey{ Region: "ap-east-1", }: endpoint{}, @@ -13995,6 +15453,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-northeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, endpointKey{ Region: "ap-south-1", }: endpoint{}, @@ -14004,12 +15465,18 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, @@ -14025,6 +15492,42 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "inspector2-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "inspector2-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "inspector2-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "inspector2-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -14034,15 +15537,39 @@ var awsPartition = partition{ endpointKey{ Region: "us-east-1", }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "inspector2-fips.us-east-1.amazonaws.com", + }, endpointKey{ Region: "us-east-2", }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "inspector2-fips.us-east-2.amazonaws.com", + }, endpointKey{ Region: "us-west-1", }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "inspector2-fips.us-west-1.amazonaws.com", + }, endpointKey{ Region: "us-west-2", }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "inspector2-fips.us-west-2.amazonaws.com", + }, }, }, "internetmonitor": service{ @@ -14122,7 +15649,12 @@ var awsPartition = partition{ Region: "ca-central-1", Variant: fipsVariant, }: endpoint{ - Hostname: "internetmonitor-fips.ca-central-1.api.aws", + Hostname: "internetmonitor-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{ + Hostname: "internetmonitor.ca-west-1.api.aws", }, endpointKey{ Region: "eu-central-1", @@ -14193,7 +15725,7 @@ var awsPartition = partition{ Region: "us-east-1", Variant: fipsVariant, }: endpoint{ - Hostname: "internetmonitor-fips.us-east-1.api.aws", + Hostname: "internetmonitor-fips.us-east-1.amazonaws.com", }, endpointKey{ Region: "us-east-2", @@ -14204,7 +15736,7 @@ var awsPartition = partition{ Region: "us-east-2", Variant: fipsVariant, }: endpoint{ - Hostname: "internetmonitor-fips.us-east-2.api.aws", + Hostname: "internetmonitor-fips.us-east-2.amazonaws.com", }, endpointKey{ Region: "us-west-1", @@ -14215,7 +15747,7 @@ var awsPartition = partition{ Region: "us-west-1", Variant: fipsVariant, }: endpoint{ - Hostname: "internetmonitor-fips.us-west-1.api.aws", + Hostname: "internetmonitor-fips.us-west-1.amazonaws.com", }, endpointKey{ Region: "us-west-2", @@ -14226,7 +15758,7 @@ var awsPartition = partition{ Region: "us-west-2", Variant: fipsVariant, }: endpoint{ - Hostname: "internetmonitor-fips.us-west-2.api.aws", + Hostname: "internetmonitor-fips.us-west-2.amazonaws.com", }, }, }, @@ -14945,12 +16477,45 @@ var awsPartition = partition{ }, "iottwinmaker": service{ Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "api-ap-northeast-1", + }: endpoint{ + Hostname: "api.iottwinmaker.ap-northeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-1", + }, + }, + endpointKey{ + Region: "api-ap-northeast-2", + }: endpoint{ + Hostname: "api.iottwinmaker.ap-northeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-2", + }, + }, + endpointKey{ + Region: "api-ap-south-1", + }: endpoint{ + Hostname: "api.iottwinmaker.ap-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-south-1", + }, + }, endpointKey{ Region: "api-ap-southeast-1", }: endpoint{ @@ -14999,6 +16564,30 @@ var awsPartition = partition{ Region: "us-west-2", }, }, + endpointKey{ + Region: "data-ap-northeast-1", + }: endpoint{ + Hostname: "data.iottwinmaker.ap-northeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-1", + }, + }, + endpointKey{ + Region: "data-ap-northeast-2", + }: endpoint{ + Hostname: "data.iottwinmaker.ap-northeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-2", + }, + }, + endpointKey{ + Region: "data-ap-south-1", + }: endpoint{ + Hostname: "data.iottwinmaker.ap-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-south-1", + }, + }, endpointKey{ Region: "data-ap-southeast-1", }: endpoint{ @@ -15355,6 +16944,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -15612,6 +17204,11 @@ var awsPartition = partition{ }: endpoint{ Hostname: "kendra-ranking-fips.ca-central-1.api.aws", }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{ + Hostname: "kendra-ranking.ca-west-1.api.aws", + }, endpointKey{ Region: "eu-central-2", }: endpoint{ @@ -15740,6 +17337,9 @@ var awsPartition = partition{ endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -15912,6 +17512,9 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -16214,6 +17817,24 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kms-fips.ca-west-1.amazonaws.com", + }, + endpointKey{ + Region: "ca-west-1-fips", + }: endpoint{ + Hostname: "kms-fips.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -16762,6 +18383,15 @@ var awsPartition = partition{ }: endpoint{ Hostname: "lambda.ca-central-1.api.aws", }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "lambda.ca-west-1.api.aws", + }, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -16988,6 +18618,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -16997,18 +18630,27 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -17054,6 +18696,12 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -17136,6 +18784,9 @@ var awsPartition = partition{ endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -17196,6 +18847,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -17263,24 +18917,39 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -17326,6 +18995,12 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -17454,6 +19129,9 @@ var awsPartition = partition{ endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -17635,12 +19313,18 @@ var awsPartition = partition{ }, "m2": service{ Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, endpointKey{ Region: "ap-northeast-1", }: endpoint{}, endpointKey{ Region: "ap-northeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, endpointKey{ Region: "ap-south-1", }: endpoint{}, @@ -17660,6 +19344,12 @@ var awsPartition = partition{ endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -17742,46 +19432,6 @@ var awsPartition = partition{ }: endpoint{}, }, }, - "macie": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "macie-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "macie-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "macie-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "macie-fips.us-west-2.amazonaws.com", - }, - }, - }, "macie2": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -17865,6 +19515,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -17931,6 +19584,13 @@ var awsPartition = partition{ }: endpoint{}, }, }, + "managedblockchain-query": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + }, + }, "marketplacecommerceanalytics": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -18004,12 +19664,18 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, @@ -18028,6 +19694,9 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, endpointKey{ Region: "sa-east-1", }: endpoint{}, @@ -18056,6 +19725,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-northeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, endpointKey{ Region: "ap-south-1", }: endpoint{}, @@ -18065,6 +19737,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, @@ -18274,6 +19949,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-northeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, endpointKey{ Region: "ap-south-1", }: endpoint{}, @@ -18283,6 +19961,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, @@ -18326,6 +20007,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-northeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, endpointKey{ Region: "ap-south-1", }: endpoint{}, @@ -18335,6 +20019,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, @@ -18378,6 +20065,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-northeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, endpointKey{ Region: "ap-south-1", }: endpoint{}, @@ -18387,6 +20077,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, @@ -18455,12 +20148,33 @@ var awsPartition = partition{ }, "meetings-chime": service{ Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "us-east-1", }: endpoint{}, @@ -18723,6 +20437,9 @@ var awsPartition = partition{ endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -18896,6 +20613,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -19153,6 +20873,9 @@ var awsPartition = partition{ endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -19361,6 +21084,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -19682,6 +21408,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -19741,6 +21470,24 @@ var awsPartition = partition{ Region: "us-west-2", }, }, + endpointKey{ + Region: "aws-global", + Variant: fipsVariant, + }: endpoint{ + Hostname: "networkmanager-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + endpointKey{ + Region: "fips-aws-global", + }: endpoint{ + Hostname: "networkmanager-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, }, }, "nimble": service{ @@ -19818,6 +21565,9 @@ var awsPartition = partition{ endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -19958,6 +21708,14 @@ var awsPartition = partition{ Region: "eu-central-1", }, }, + endpointKey{ + Region: "eu-central-2", + }: endpoint{ + Hostname: "oidc.eu-central-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-2", + }, + }, endpointKey{ Region: "eu-north-1", }: endpoint{ @@ -19998,6 +21756,22 @@ var awsPartition = partition{ Region: "eu-west-3", }, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{ + Hostname: "oidc.il-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "il-central-1", + }, + }, + endpointKey{ + Region: "me-central-1", + }: endpoint{ + Hostname: "oidc.me-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "me-central-1", + }, + }, endpointKey{ Region: "me-south-1", }: endpoint{ @@ -20100,6 +21874,14 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{ + Hostname: "omics.il-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "il-central-1", + }, + }, endpointKey{ Region: "us-east-1", }: endpoint{ @@ -20253,12 +22035,21 @@ var awsPartition = partition{ endpointKey{ Region: "ap-northeast-1", }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -20383,6 +22174,12 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -20566,6 +22363,9 @@ var awsPartition = partition{ endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -20775,6 +22575,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -20790,12 +22593,18 @@ var awsPartition = partition{ endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -21042,6 +22851,14 @@ var awsPartition = partition{ Region: "eu-central-1", }, }, + endpointKey{ + Region: "eu-central-2", + }: endpoint{ + Hostname: "portal.sso.eu-central-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-2", + }, + }, endpointKey{ Region: "eu-north-1", }: endpoint{ @@ -21082,6 +22899,22 @@ var awsPartition = partition{ Region: "eu-west-3", }, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{ + Hostname: "portal.sso.il-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "il-central-1", + }, + }, + endpointKey{ + Region: "me-central-1", + }: endpoint{ + Hostname: "portal.sso.me-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "me-central-1", + }, + }, endpointKey{ Region: "me-south-1", }: endpoint{ @@ -21273,6 +23106,166 @@ var awsPartition = partition{ }: endpoint{}, }, }, + "qbusiness": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + DNSSuffix: "api.aws", + }, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "{service}-fips.{region}.{dnsSuffix}", + DNSSuffix: "api.aws", + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{ + Hostname: "qbusiness.af-south-1.api.aws", + }, + endpointKey{ + Region: "ap-east-1", + }: endpoint{ + Hostname: "qbusiness.ap-east-1.api.aws", + }, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{ + Hostname: "qbusiness.ap-northeast-1.api.aws", + }, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{ + Hostname: "qbusiness.ap-northeast-2.api.aws", + }, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{ + Hostname: "qbusiness.ap-northeast-3.api.aws", + }, + endpointKey{ + Region: "ap-south-1", + }: endpoint{ + Hostname: "qbusiness.ap-south-1.api.aws", + }, + endpointKey{ + Region: "ap-south-2", + }: endpoint{ + Hostname: "qbusiness.ap-south-2.api.aws", + }, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{ + Hostname: "qbusiness.ap-southeast-1.api.aws", + }, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{ + Hostname: "qbusiness.ap-southeast-2.api.aws", + }, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{ + Hostname: "qbusiness.ap-southeast-3.api.aws", + }, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{ + Hostname: "qbusiness.ap-southeast-4.api.aws", + }, + endpointKey{ + Region: "ca-central-1", + }: endpoint{ + Hostname: "qbusiness.ca-central-1.api.aws", + }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{ + Hostname: "qbusiness.ca-west-1.api.aws", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{ + Hostname: "qbusiness.eu-central-1.api.aws", + }, + endpointKey{ + Region: "eu-central-2", + }: endpoint{ + Hostname: "qbusiness.eu-central-2.api.aws", + }, + endpointKey{ + Region: "eu-north-1", + }: endpoint{ + Hostname: "qbusiness.eu-north-1.api.aws", + }, + endpointKey{ + Region: "eu-south-1", + }: endpoint{ + Hostname: "qbusiness.eu-south-1.api.aws", + }, + endpointKey{ + Region: "eu-south-2", + }: endpoint{ + Hostname: "qbusiness.eu-south-2.api.aws", + }, + endpointKey{ + Region: "eu-west-1", + }: endpoint{ + Hostname: "qbusiness.eu-west-1.api.aws", + }, + endpointKey{ + Region: "eu-west-2", + }: endpoint{ + Hostname: "qbusiness.eu-west-2.api.aws", + }, + endpointKey{ + Region: "eu-west-3", + }: endpoint{ + Hostname: "qbusiness.eu-west-3.api.aws", + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{ + Hostname: "qbusiness.il-central-1.api.aws", + }, + endpointKey{ + Region: "me-central-1", + }: endpoint{ + Hostname: "qbusiness.me-central-1.api.aws", + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{ + Hostname: "qbusiness.me-south-1.api.aws", + }, + endpointKey{ + Region: "sa-east-1", + }: endpoint{ + Hostname: "qbusiness.sa-east-1.api.aws", + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{ + Hostname: "qbusiness.us-east-1.api.aws", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{ + Hostname: "qbusiness.us-east-2.api.aws", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{ + Hostname: "qbusiness.us-west-1.api.aws", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{ + Hostname: "qbusiness.us-west-2.api.aws", + }, + }, + }, "qldb": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -21463,6 +23456,15 @@ var awsPartition = partition{ }: endpoint{ Hostname: "ram-fips.ca-central-1.amazonaws.com", }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ram-fips.ca-west-1.amazonaws.com", + }, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -21496,6 +23498,15 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "fips-ca-west-1", + }: endpoint{ + Hostname: "ram-fips.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "fips-us-east-1", }: endpoint{ @@ -21626,6 +23637,15 @@ var awsPartition = partition{ }: endpoint{ Hostname: "rbin-fips.ca-central-1.amazonaws.com", }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rbin-fips.ca-west-1.amazonaws.com", + }, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -21659,6 +23679,15 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "fips-ca-west-1", + }: endpoint{ + Hostname: "rbin-fips.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "fips-us-east-1", }: endpoint{ @@ -21798,6 +23827,24 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rds-fips.ca-west-1.amazonaws.com", + }, + endpointKey{ + Region: "ca-west-1-fips", + }: endpoint{ + Hostname: "rds-fips.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -21840,6 +23887,15 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "rds-fips.ca-west-1", + }: endpoint{ + Hostname: "rds-fips.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "rds-fips.us-east-1", }: endpoint{ @@ -21894,6 +23950,24 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "rds.ca-west-1", + }: endpoint{ + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "rds.ca-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rds-fips.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "rds.us-east-1", }: endpoint{ @@ -22196,6 +24270,15 @@ var awsPartition = partition{ }: endpoint{ Hostname: "redshift-fips.ca-central-1.amazonaws.com", }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "redshift-fips.ca-west-1.amazonaws.com", + }, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -22229,6 +24312,15 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "fips-ca-west-1", + }: endpoint{ + Hostname: "redshift-fips.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "fips-us-east-1", }: endpoint{ @@ -22408,6 +24500,9 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-2", }: endpoint{}, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "rekognition-fips.ca-central-1", }: endpoint{ @@ -22694,6 +24789,16 @@ var awsPartition = partition{ }, }, Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{ + Hostname: "resource-explorer-2.af-south-1.api.aws", + }, + endpointKey{ + Region: "ap-east-1", + }: endpoint{ + Hostname: "resource-explorer-2.ap-east-1.api.aws", + }, endpointKey{ Region: "ap-northeast-1", }: endpoint{ @@ -22729,6 +24834,11 @@ var awsPartition = partition{ }: endpoint{ Hostname: "resource-explorer-2.ap-southeast-2.api.aws", }, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{ + Hostname: "resource-explorer-2.ap-southeast-3.api.aws", + }, endpointKey{ Region: "ap-southeast-4", }: endpoint{ @@ -22754,6 +24864,11 @@ var awsPartition = partition{ }: endpoint{ Hostname: "resource-explorer-2.eu-north-1.api.aws", }, + endpointKey{ + Region: "eu-south-1", + }: endpoint{ + Hostname: "resource-explorer-2.eu-south-1.api.aws", + }, endpointKey{ Region: "eu-west-1", }: endpoint{ @@ -22774,6 +24889,16 @@ var awsPartition = partition{ }: endpoint{ Hostname: "resource-explorer-2.il-central-1.api.aws", }, + endpointKey{ + Region: "me-central-1", + }: endpoint{ + Hostname: "resource-explorer-2.me-central-1.api.aws", + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{ + Hostname: "resource-explorer-2.me-south-1.api.aws", + }, endpointKey{ Region: "sa-east-1", }: endpoint{ @@ -22839,6 +24964,9 @@ var awsPartition = partition{ endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -22994,6 +25122,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -23003,18 +25134,27 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -23024,6 +25164,48 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "rolesanywhere-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "rolesanywhere-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "rolesanywhere-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "rolesanywhere-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -23033,15 +25215,39 @@ var awsPartition = partition{ endpointKey{ Region: "us-east-1", }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rolesanywhere-fips.us-east-1.amazonaws.com", + }, endpointKey{ Region: "us-east-2", }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rolesanywhere-fips.us-east-2.amazonaws.com", + }, endpointKey{ Region: "us-west-1", }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rolesanywhere-fips.us-west-1.amazonaws.com", + }, endpointKey{ Region: "us-west-2", }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rolesanywhere-fips.us-west-2.amazonaws.com", + }, }, }, "route53": service{ @@ -23138,6 +25344,9 @@ var awsPartition = partition{ endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -23378,6 +25587,9 @@ var awsPartition = partition{ endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -23655,6 +25867,27 @@ var awsPartition = partition{ }: endpoint{ Hostname: "s3-fips.dualstack.ca-central-1.amazonaws.com", }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3.dualstack.ca-west-1.amazonaws.com", + }, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "s3-fips.ca-west-1.amazonaws.com", + }, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "s3-fips.dualstack.ca-west-1.amazonaws.com", + }, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -23740,6 +25973,15 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "fips-ca-west-1", + }: endpoint{ + Hostname: "s3-fips.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "fips-us-east-1", }: endpoint{ @@ -24433,55 +26675,123 @@ var awsPartition = partition{ endpointKey{ Region: "af-south-1", }: endpoint{}, + endpointKey{ + Region: "af-south-1", + Variant: dualStackVariant, + }: endpoint{}, endpointKey{ Region: "ap-east-1", }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + Variant: dualStackVariant, + }: endpoint{}, endpointKey{ Region: "ap-northeast-1", }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + Variant: dualStackVariant, + }: endpoint{}, endpointKey{ Region: "ap-northeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + Variant: dualStackVariant, + }: endpoint{}, endpointKey{ Region: "ap-northeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + Variant: dualStackVariant, + }: endpoint{}, endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + Variant: dualStackVariant, + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + Variant: dualStackVariant, + }: endpoint{}, endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + Variant: dualStackVariant, + }: endpoint{}, endpointKey{ Region: "ap-southeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + Variant: dualStackVariant, + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: dualStackVariant, + }: endpoint{}, endpointKey{ Region: "ca-central-1", Variant: fipsVariant, }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + Variant: dualStackVariant, + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + Variant: dualStackVariant, + }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + Variant: dualStackVariant, + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + Variant: dualStackVariant, + }: endpoint{}, endpointKey{ Region: "eu-west-2", }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + Variant: dualStackVariant, + }: endpoint{}, endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + Variant: dualStackVariant, + }: endpoint{}, endpointKey{ Region: "fips-ca-central-1", }: endpoint{ @@ -24512,40 +26822,87 @@ var awsPartition = partition{ Deprecated: boxedTrue, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "il-central-1", + Variant: dualStackVariant, + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, + endpointKey{ + Region: "me-south-1", + Variant: dualStackVariant, + }: endpoint{}, endpointKey{ Region: "sa-east-1", }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + Variant: dualStackVariant, + }: endpoint{}, endpointKey{ Region: "us-east-1", }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: dualStackVariant, + }: endpoint{}, endpointKey{ Region: "us-east-1", Variant: fipsVariant, }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{}, endpointKey{ Region: "us-east-2", }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: dualStackVariant, + }: endpoint{}, endpointKey{ Region: "us-east-2", Variant: fipsVariant, }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant | dualStackVariant, + }: endpoint{}, endpointKey{ Region: "us-west-1", }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: dualStackVariant, + }: endpoint{}, endpointKey{ Region: "us-west-1", Variant: fipsVariant, }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{}, endpointKey{ Region: "us-west-2", }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: dualStackVariant, + }: endpoint{}, endpointKey{ Region: "us-west-2", Variant: fipsVariant, }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant | dualStackVariant, + }: endpoint{}, }, }, "sagemaker-geospatial": service{ @@ -24656,6 +27013,9 @@ var awsPartition = partition{ }, "schemas": service{ Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, endpointKey{ Region: "ap-east-1", }: endpoint{}, @@ -24665,6 +27025,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-northeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, endpointKey{ Region: "ap-south-1", }: endpoint{}, @@ -24674,15 +27037,27 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -24692,6 +27067,12 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, endpointKey{ Region: "sa-east-1", }: endpoint{}, @@ -24750,160 +27131,288 @@ var awsPartition = partition{ endpointKey{ Region: "af-south-1", }: endpoint{}, + endpointKey{ + Region: "af-south-1", + Variant: dualStackVariant, + }: endpoint{}, endpointKey{ Region: "ap-east-1", }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + Variant: dualStackVariant, + }: endpoint{}, endpointKey{ Region: "ap-northeast-1", }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + Variant: dualStackVariant, + }: endpoint{}, endpointKey{ Region: "ap-northeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + Variant: dualStackVariant, + }: endpoint{}, endpointKey{ Region: "ap-northeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + Variant: dualStackVariant, + }: endpoint{}, endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + Variant: dualStackVariant, + }: endpoint{}, endpointKey{ Region: "ap-south-2", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + Variant: dualStackVariant, + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + Variant: dualStackVariant, + }: endpoint{}, endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + Variant: dualStackVariant, + }: endpoint{}, endpointKey{ Region: "ap-southeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + Variant: dualStackVariant, + }: endpoint{}, endpointKey{ Region: "ap-southeast-4", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + Variant: dualStackVariant, + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: dualStackVariant, + }: endpoint{}, endpointKey{ Region: "ca-central-1", Variant: fipsVariant, + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "ca-central-1-fips", }: endpoint{ - Hostname: "secretsmanager-fips.ca-central-1.amazonaws.com", + + Deprecated: boxedTrue, }, endpointKey{ - Region: "ca-central-1-fips", + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + Variant: dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant, + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "ca-west-1-fips", }: endpoint{ - Hostname: "secretsmanager-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, + Deprecated: boxedTrue, }, endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + Variant: dualStackVariant, + }: endpoint{}, endpointKey{ Region: "eu-central-2", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + Variant: dualStackVariant, + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + Variant: dualStackVariant, + }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + Variant: dualStackVariant, + }: endpoint{}, endpointKey{ Region: "eu-south-2", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + Variant: dualStackVariant, + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + Variant: dualStackVariant, + }: endpoint{}, endpointKey{ Region: "eu-west-2", }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + Variant: dualStackVariant, + }: endpoint{}, endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + Variant: dualStackVariant, + }: endpoint{}, endpointKey{ Region: "il-central-1", }: endpoint{}, + endpointKey{ + Region: "il-central-1", + Variant: dualStackVariant, + }: endpoint{}, endpointKey{ Region: "me-central-1", }: endpoint{}, + endpointKey{ + Region: "me-central-1", + Variant: dualStackVariant, + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, + endpointKey{ + Region: "me-south-1", + Variant: dualStackVariant, + }: endpoint{}, endpointKey{ Region: "sa-east-1", }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + Variant: dualStackVariant, + }: endpoint{}, endpointKey{ Region: "us-east-1", }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: dualStackVariant, + }: endpoint{}, endpointKey{ Region: "us-east-1", Variant: fipsVariant, - }: endpoint{ - Hostname: "secretsmanager-fips.us-east-1.amazonaws.com", - }, + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{}, endpointKey{ Region: "us-east-1-fips", }: endpoint{ - Hostname: "secretsmanager-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, + Deprecated: boxedTrue, }, endpointKey{ Region: "us-east-2", }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: dualStackVariant, + }: endpoint{}, endpointKey{ Region: "us-east-2", Variant: fipsVariant, - }: endpoint{ - Hostname: "secretsmanager-fips.us-east-2.amazonaws.com", - }, + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant | dualStackVariant, + }: endpoint{}, endpointKey{ Region: "us-east-2-fips", }: endpoint{ - Hostname: "secretsmanager-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, + Deprecated: boxedTrue, }, endpointKey{ Region: "us-west-1", }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: dualStackVariant, + }: endpoint{}, endpointKey{ Region: "us-west-1", Variant: fipsVariant, - }: endpoint{ - Hostname: "secretsmanager-fips.us-west-1.amazonaws.com", - }, + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{}, endpointKey{ Region: "us-west-1-fips", }: endpoint{ - Hostname: "secretsmanager-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, + Deprecated: boxedTrue, }, endpointKey{ Region: "us-west-2", }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: dualStackVariant, + }: endpoint{}, endpointKey{ Region: "us-west-2", Variant: fipsVariant, - }: endpoint{ - Hostname: "secretsmanager-fips.us-west-2.amazonaws.com", - }, + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant | dualStackVariant, + }: endpoint{}, endpointKey{ Region: "us-west-2-fips", }: endpoint{ - Hostname: "secretsmanager-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, + Deprecated: boxedTrue, }, }, @@ -24946,6 +27455,9 @@ var awsPartition = partition{ endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -25006,6 +27518,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -25061,6 +27576,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-northeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, endpointKey{ Region: "ap-south-1", }: endpoint{}, @@ -25070,30 +27588,99 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, endpointKey{ Region: "eu-west-2", }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "securitylake-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "securitylake-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "securitylake-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "securitylake-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "sa-east-1", }: endpoint{}, endpointKey{ Region: "us-east-1", }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "securitylake-fips.us-east-1.amazonaws.com", + }, endpointKey{ Region: "us-east-2", }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "securitylake-fips.us-east-2.amazonaws.com", + }, endpointKey{ Region: "us-west-1", }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "securitylake-fips.us-west-1.amazonaws.com", + }, endpointKey{ Region: "us-west-2", }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "securitylake-fips.us-west-2.amazonaws.com", + }, }, }, "serverlessrepo": service{ @@ -25257,6 +27844,9 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -25384,6 +27974,9 @@ var awsPartition = partition{ }: endpoint{ Hostname: "servicecatalog-appregistry-fips.ca-central-1.amazonaws.com", }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -25453,6 +28046,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -25904,6 +28500,9 @@ var awsPartition = partition{ endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -25928,6 +28527,9 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -26151,6 +28753,38 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "fips-verification-us-east-1", + }: endpoint{ + Hostname: "verification.signer-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + endpointKey{ + Region: "fips-verification-us-east-2", + }: endpoint{ + Hostname: "verification.signer-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + endpointKey{ + Region: "fips-verification-us-west-1", + }: endpoint{ + Hostname: "verification.signer-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + endpointKey{ + Region: "fips-verification-us-west-2", + }: endpoint{ + Hostname: "verification.signer-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -26193,6 +28827,166 @@ var awsPartition = partition{ }: endpoint{ Hostname: "signer-fips.us-west-2.amazonaws.com", }, + endpointKey{ + Region: "verification-af-south-1", + }: endpoint{ + Hostname: "verification.signer.af-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "af-south-1", + }, + }, + endpointKey{ + Region: "verification-ap-east-1", + }: endpoint{ + Hostname: "verification.signer.ap-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-east-1", + }, + }, + endpointKey{ + Region: "verification-ap-northeast-1", + }: endpoint{ + Hostname: "verification.signer.ap-northeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-1", + }, + }, + endpointKey{ + Region: "verification-ap-northeast-2", + }: endpoint{ + Hostname: "verification.signer.ap-northeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-2", + }, + }, + endpointKey{ + Region: "verification-ap-south-1", + }: endpoint{ + Hostname: "verification.signer.ap-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-south-1", + }, + }, + endpointKey{ + Region: "verification-ap-southeast-1", + }: endpoint{ + Hostname: "verification.signer.ap-southeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-1", + }, + }, + endpointKey{ + Region: "verification-ap-southeast-2", + }: endpoint{ + Hostname: "verification.signer.ap-southeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-2", + }, + }, + endpointKey{ + Region: "verification-ca-central-1", + }: endpoint{ + Hostname: "verification.signer.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + endpointKey{ + Region: "verification-eu-central-1", + }: endpoint{ + Hostname: "verification.signer.eu-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-1", + }, + }, + endpointKey{ + Region: "verification-eu-north-1", + }: endpoint{ + Hostname: "verification.signer.eu-north-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-north-1", + }, + }, + endpointKey{ + Region: "verification-eu-south-1", + }: endpoint{ + Hostname: "verification.signer.eu-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-south-1", + }, + }, + endpointKey{ + Region: "verification-eu-west-1", + }: endpoint{ + Hostname: "verification.signer.eu-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-1", + }, + }, + endpointKey{ + Region: "verification-eu-west-2", + }: endpoint{ + Hostname: "verification.signer.eu-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-2", + }, + }, + endpointKey{ + Region: "verification-eu-west-3", + }: endpoint{ + Hostname: "verification.signer.eu-west-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-3", + }, + }, + endpointKey{ + Region: "verification-me-south-1", + }: endpoint{ + Hostname: "verification.signer.me-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "me-south-1", + }, + }, + endpointKey{ + Region: "verification-sa-east-1", + }: endpoint{ + Hostname: "verification.signer.sa-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "sa-east-1", + }, + }, + endpointKey{ + Region: "verification-us-east-1", + }: endpoint{ + Hostname: "verification.signer.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + endpointKey{ + Region: "verification-us-east-2", + }: endpoint{ + Hostname: "verification.signer.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + endpointKey{ + Region: "verification-us-west-1", + }: endpoint{ + Hostname: "verification.signer.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + endpointKey{ + Region: "verification-us-west-2", + }: endpoint{ + Hostname: "verification.signer.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, }, }, "simspaceweaver": service{ @@ -26304,6 +29098,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "us-east-1", }: endpoint{}, @@ -26677,6 +29474,15 @@ var awsPartition = partition{ endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "sns-fips.ca-west-1.amazonaws.com", + }, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -26701,6 +29507,15 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "fips-ca-west-1", + }: endpoint{ + Hostname: "sns-fips.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "fips-us-east-1", }: endpoint{ @@ -26831,6 +29646,9 @@ var awsPartition = partition{ endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -26988,6 +29806,15 @@ var awsPartition = partition{ }: endpoint{ Hostname: "ssm-fips.ca-central-1.amazonaws.com", }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ssm-fips.ca-west-1.amazonaws.com", + }, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -27021,6 +29848,15 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "fips-ca-west-1", + }: endpoint{ + Hostname: "ssm-fips.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "fips-us-east-1", }: endpoint{ @@ -27378,15 +30214,157 @@ var awsPartition = partition{ endpointKey{ Region: "ca-central-1", }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "ssm-sap-fips.ca-central-1.amazonaws.com", - }, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ssm-sap-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "ssm-sap-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "ssm-sap-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "ssm-sap-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "ssm-sap-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "ssm-sap-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ssm-sap-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ssm-sap-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ssm-sap-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ssm-sap-fips.us-west-2.amazonaws.com", + }, + }, + }, + "sso": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, @@ -27403,143 +30381,10 @@ var awsPartition = partition{ Region: "eu-west-3", }: endpoint{}, endpointKey{ - Region: "fips-ca-central-1", - }: endpoint{ - Hostname: "ssm-sap-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "ssm-sap-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "ssm-sap-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-1", - }: endpoint{ - Hostname: "ssm-sap-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "ssm-sap-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "ssm-sap-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "ssm-sap-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "ssm-sap-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "ssm-sap-fips.us-west-2.amazonaws.com", - }, - }, - }, - "sso": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-3", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", + Region: "il-central-1", }: endpoint{}, endpointKey{ - Region: "eu-west-3", + Region: "me-central-1", }: endpoint{}, endpointKey{ Region: "me-south-1", @@ -27599,6 +30444,9 @@ var awsPartition = partition{ endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -27786,6 +30634,9 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -27915,6 +30766,9 @@ var awsPartition = partition{ endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -28021,6 +30875,9 @@ var awsPartition = partition{ endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -28195,6 +31052,9 @@ var awsPartition = partition{ endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -28344,152 +31204,158 @@ var awsPartition = partition{ Region: "ca-central-1", }: endpoint{}, endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-2", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "synthetics-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "synthetics-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-1", - }: endpoint{ - Hostname: "synthetics-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "synthetics-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "il-central-1", - }: endpoint{}, - endpointKey{ - Region: "me-central-1", - }: endpoint{}, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "synthetics-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "synthetics-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "synthetics-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "synthetics-fips.us-west-2.amazonaws.com", - }, - }, - }, - "tagging": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-south-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-4", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "synthetics-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "synthetics-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "synthetics-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "synthetics-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "synthetics-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "synthetics-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "synthetics-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "synthetics-fips.us-west-2.amazonaws.com", + }, + }, + }, + "tagging": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", }: endpoint{}, endpointKey{ Region: "eu-central-1", @@ -28659,6 +31525,65 @@ var awsPartition = partition{ }, }, }, + "thinclient": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "tnb": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, "transcribe": service{ Defaults: endpointDefaults{ defaultKey{}: endpoint{ @@ -29073,6 +31998,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -29318,6 +32246,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-1", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, @@ -29467,9 +32398,21 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, endpointKey{ Region: "us-east-1", }: endpoint{}, @@ -31067,9 +34010,18 @@ var awsPartition = partition{ endpointKey{ Region: "ap-northeast-1", }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -31091,9 +34043,18 @@ var awsPartition = partition{ endpointKey{ Region: "ui-ap-northeast-1", }: endpoint{}, + endpointKey{ + Region: "ui-ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ui-ap-southeast-1", + }: endpoint{}, endpointKey{ Region: "ui-ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ui-ca-central-1", + }: endpoint{}, endpointKey{ Region: "ui-eu-central-1", }: endpoint{}, @@ -31242,6 +34203,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "sa-east-1", }: endpoint{}, @@ -31337,6 +34301,9 @@ var awsPartition = partition{ endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -31569,6 +34536,20 @@ var awscnPartition = partition{ }, }, }, + "api.pricing": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + CredentialScope: credentialScope{ + Service: "pricing", + }, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, "api.sagemaker": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -31676,6 +34657,16 @@ var awscnPartition = partition{ }: endpoint{}, }, }, + "arc-zonal-shift": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, "athena": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -31976,6 +34967,31 @@ var awscnPartition = partition{ }: endpoint{}, }, }, + "datazone": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + DNSSuffix: "api.amazonwebservices.com.cn", + }, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "{service}-fips.{region}.{dnsSuffix}", + DNSSuffix: "api.amazonwebservices.com.cn", + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{ + Hostname: "datazone.cn-north-1.api.amazonwebservices.com.cn", + }, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{ + Hostname: "datazone.cn-northwest-1.api.amazonwebservices.com.cn", + }, + }, + }, "dax": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -32103,6 +35119,31 @@ var awscnPartition = partition{ }: endpoint{}, }, }, + "eks-auth": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + DNSSuffix: "api.amazonwebservices.com.cn", + }, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "{service}-fips.{region}.{dnsSuffix}", + DNSSuffix: "api.amazonwebservices.com.cn", + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{ + Hostname: "eks-auth.cn-north-1.api.amazonwebservices.com.cn", + }, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{ + Hostname: "eks-auth.cn-northwest-1.api.amazonwebservices.com.cn", + }, + }, + }, "elasticache": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -32188,9 +35229,21 @@ var awscnPartition = partition{ endpointKey{ Region: "cn-north-1", }: endpoint{}, + endpointKey{ + Region: "cn-north-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "elasticmapreduce.cn-north-1.api.amazonwebservices.com.cn", + }, endpointKey{ Region: "cn-northwest-1", }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "elasticmapreduce.cn-northwest-1.api.amazonwebservices.com.cn", + }, }, }, "emr-containers": service{ @@ -32218,9 +35271,21 @@ var awscnPartition = partition{ endpointKey{ Region: "cn-north-1", }: endpoint{}, + endpointKey{ + Region: "cn-north-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "aos.cn-north-1.api.amazonwebservices.com.cn", + }, endpointKey{ Region: "cn-northwest-1", }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "aos.cn-northwest-1.api.amazonwebservices.com.cn", + }, }, }, "events": service{ @@ -32378,6 +35443,16 @@ var awscnPartition = partition{ }, }, }, + "identitystore": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, "internetmonitor": service{ Defaults: endpointDefaults{ defaultKey{}: endpoint{ @@ -32456,6 +35531,29 @@ var awscnPartition = partition{ }: endpoint{}, }, }, + "iottwinmaker": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "api-cn-north-1", + }: endpoint{ + Hostname: "api.iottwinmaker.cn-north-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-north-1", + }, + }, + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "data-cn-north-1", + }: endpoint{ + Hostname: "data.iottwinmaker.cn-north-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-north-1", + }, + }, + }, + }, "kafka": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -32677,6 +35775,26 @@ var awscnPartition = partition{ }: endpoint{}, }, }, + "oidc": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{ + Hostname: "oidc.cn-north-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-north-1", + }, + }, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{ + Hostname: "oidc.cn-northwest-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + }, + }, + }, "organizations": service{ PartitionEndpoint: "aws-cn-global", IsRegionalized: boxedFalse, @@ -32708,6 +35826,16 @@ var awscnPartition = partition{ }: endpoint{}, }, }, + "pipes": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, "polly": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -32715,6 +35843,58 @@ var awscnPartition = partition{ }: endpoint{}, }, }, + "portal.sso": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{ + Hostname: "portal.sso.cn-north-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-north-1", + }, + }, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{ + Hostname: "portal.sso.cn-northwest-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + }, + }, + }, + "qbusiness": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + DNSSuffix: "api.amazonwebservices.com.cn", + }, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "{service}-fips.{region}.{dnsSuffix}", + DNSSuffix: "api.amazonwebservices.com.cn", + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{ + Hostname: "qbusiness.cn-north-1.api.amazonwebservices.com.cn", + }, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{ + Hostname: "qbusiness.cn-northwest-1.api.amazonwebservices.com.cn", + }, + }, + }, + "quicksight": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + }, + }, "ram": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -32755,6 +35935,13 @@ var awscnPartition = partition{ }: endpoint{}, }, }, + "redshift-serverless": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + }, + }, "resource-explorer-2": service{ Defaults: endpointDefaults{ defaultKey{}: endpoint{ @@ -32952,14 +36139,32 @@ var awscnPartition = partition{ }, }, }, + "schemas": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, "secretsmanager": service{ Endpoints: serviceEndpoints{ endpointKey{ Region: "cn-north-1", }: endpoint{}, + endpointKey{ + Region: "cn-north-1", + Variant: dualStackVariant, + }: endpoint{}, endpointKey{ Region: "cn-northwest-1", }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + Variant: dualStackVariant, + }: endpoint{}, }, }, "securityhub": service{ @@ -33046,6 +36251,22 @@ var awscnPartition = partition{ endpointKey{ Region: "cn-northwest-1", }: endpoint{}, + endpointKey{ + Region: "verification-cn-north-1", + }: endpoint{ + Hostname: "verification.signer.cn-north-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-north-1", + }, + }, + endpointKey{ + Region: "verification-cn-northwest-1", + }: endpoint{ + Hostname: "verification.signer.cn-northwest-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + }, }, }, "sms": service{ @@ -33136,14 +36357,36 @@ var awscnPartition = partition{ }: endpoint{}, }, }, + "sso": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, "states": service{ Endpoints: serviceEndpoints{ endpointKey{ Region: "cn-north-1", }: endpoint{}, + endpointKey{ + Region: "cn-north-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "states.cn-north-1.api.amazonwebservices.com.cn", + }, endpointKey{ Region: "cn-northwest-1", }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "states.cn-northwest-1.api.amazonwebservices.com.cn", + }, }, }, "storagegateway": service{ @@ -33917,12 +37160,42 @@ var awsusgovPartition = partition{ }, "appconfigdata": service{ Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "appconfigdata.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "appconfigdata.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "us-gov-east-1", }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "appconfigdata.us-gov-east-1.amazonaws.com", + }, endpointKey{ Region: "us-gov-west-1", }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "appconfigdata.us-gov-west-1.amazonaws.com", + }, }, }, "application-autoscaling": service{ @@ -34057,6 +37330,16 @@ var awsusgovPartition = partition{ }, }, }, + "arc-zonal-shift": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + }, + }, "athena": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -34240,6 +37523,13 @@ var awsusgovPartition = partition{ }, }, }, + "bedrock": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + }, + }, "cassandra": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -34655,6 +37945,13 @@ var awsusgovPartition = partition{ }, }, }, + "codestar-connections": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + }, + }, "cognito-identity": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -34998,6 +38295,31 @@ var awsusgovPartition = partition{ }, }, }, + "datazone": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + DNSSuffix: "api.aws", + }, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "{service}-fips.{region}.{dnsSuffix}", + DNSSuffix: "api.aws", + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{ + Hostname: "datazone.us-gov-east-1.api.aws", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{ + Hostname: "datazone.us-gov-west-1.api.aws", + }, + }, + }, "directconnect": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -35145,6 +38467,46 @@ var awsusgovPartition = partition{ }, }, }, + "drs": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "drs-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "drs-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "drs-fips.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "drs-fips.us-gov-west-1.amazonaws.com", + }, + }, + }, "ds": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -35380,6 +38742,31 @@ var awsusgovPartition = partition{ }, }, }, + "eks-auth": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + DNSSuffix: "api.aws", + }, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "{service}-fips.{region}.{dnsSuffix}", + DNSSuffix: "api.aws", + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{ + Hostname: "eks-auth.us-gov-east-1.api.aws", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{ + Hostname: "eks-auth.us-gov-west-1.api.aws", + }, + }, + }, "elasticache": service{ Defaults: endpointDefaults{ defaultKey{}: endpoint{}, @@ -35600,6 +38987,12 @@ var awsusgovPartition = partition{ endpointKey{ Region: "us-gov-east-1", }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "elasticmapreduce.us-gov-east-1.api.aws", + }, endpointKey{ Region: "us-gov-east-1", Variant: fipsVariant, @@ -35611,6 +39004,13 @@ var awsusgovPartition = partition{ }: endpoint{ Protocols: []string{"https"}, }, + endpointKey{ + Region: "us-gov-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "elasticmapreduce.us-gov-west-1.api.aws", + Protocols: []string{"https"}, + }, endpointKey{ Region: "us-gov-west-1", Variant: fipsVariant, @@ -35666,6 +39066,12 @@ var awsusgovPartition = partition{ endpointKey{ Region: "us-gov-east-1", }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "aos.us-gov-east-1.api.aws", + }, endpointKey{ Region: "us-gov-east-1", Variant: fipsVariant, @@ -35684,6 +39090,12 @@ var awsusgovPartition = partition{ endpointKey{ Region: "us-gov-west-1", }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "aos.us-gov-west-1.api.aws", + }, endpointKey{ Region: "us-gov-west-1", Variant: fipsVariant, @@ -35920,6 +39332,28 @@ var awsusgovPartition = partition{ }, }, }, + "geo": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "geo-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "geo-fips.us-gov-west-1.amazonaws.com", + }, + }, + }, "glacier": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -35986,21 +39420,45 @@ var awsusgovPartition = partition{ endpointKey{ Region: "us-gov-east-1", }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "glue.us-gov-east-1.api.aws", + }, endpointKey{ Region: "us-gov-east-1", Variant: fipsVariant, }: endpoint{ Hostname: "glue-fips.us-gov-east-1.amazonaws.com", }, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "glue-fips.us-gov-east-1.api.aws", + }, endpointKey{ Region: "us-gov-west-1", }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "glue.us-gov-west-1.api.aws", + }, endpointKey{ Region: "us-gov-west-1", Variant: fipsVariant, }: endpoint{ Hostname: "glue-fips.us-gov-west-1.amazonaws.com", }, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "glue-fips.us-gov-west-1.api.aws", + }, }, }, "greengrass": service{ @@ -36118,7 +39576,21 @@ var awsusgovPartition = partition{ }, }, "health": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + SSLCommonName: "health.us-gov-west-1.amazonaws.com", + Protocols: []string{"https"}, + }, + }, Endpoints: serviceEndpoints{ + endpointKey{ + Region: "aws-us-gov-global", + }: endpoint{ + Hostname: "global.health.us-gov.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, endpointKey{ Region: "fips-us-gov-west-1", }: endpoint{ @@ -36319,12 +39791,42 @@ var awsusgovPartition = partition{ }, "inspector2": service{ Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "inspector2-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "inspector2-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "us-gov-east-1", }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "inspector2-fips.us-gov-east-1.amazonaws.com", + }, endpointKey{ Region: "us-gov-west-1", }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "inspector2-fips.us-gov-west-1.amazonaws.com", + }, }, }, "internetmonitor": service{ @@ -36805,21 +40307,45 @@ var awsusgovPartition = partition{ endpointKey{ Region: "us-gov-east-1", }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "lakeformation.us-gov-east-1.api.aws", + }, endpointKey{ Region: "us-gov-east-1", Variant: fipsVariant, }: endpoint{ Hostname: "lakeformation-fips.us-gov-east-1.amazonaws.com", }, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "lakeformation-fips.us-gov-east-1.api.aws", + }, endpointKey{ Region: "us-gov-west-1", }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "lakeformation.us-gov-west-1.api.aws", + }, endpointKey{ Region: "us-gov-west-1", Variant: fipsVariant, }: endpoint{ Hostname: "lakeformation-fips.us-gov-west-1.amazonaws.com", }, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "lakeformation-fips.us-gov-west-1.api.aws", + }, }, }, "lambda": service{ @@ -36914,6 +40440,16 @@ var awsusgovPartition = partition{ }, }, }, + "license-manager-linux-subscriptions": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + }, + }, "logs": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -36954,6 +40490,36 @@ var awsusgovPartition = partition{ }, }, }, + "m2": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{}, + }, + }, "managedblockchain": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -37287,6 +40853,24 @@ var awsusgovPartition = partition{ Region: "us-gov-west-1", }, }, + endpointKey{ + Region: "aws-us-gov-global", + Variant: fipsVariant, + }: endpoint{ + Hostname: "networkmanager.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + endpointKey{ + Region: "fips-aws-us-gov-global", + }: endpoint{ + Hostname: "networkmanager.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, }, }, "oidc": service{ @@ -37492,6 +41076,31 @@ var awsusgovPartition = partition{ }, }, }, + "qbusiness": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + DNSSuffix: "api.aws", + }, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "{service}-fips.{region}.{dnsSuffix}", + DNSSuffix: "api.aws", + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{ + Hostname: "qbusiness.us-gov-east-1.api.aws", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{ + Hostname: "qbusiness.us-gov-west-1.api.aws", + }, + }, + }, "quicksight": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -37733,6 +41342,46 @@ var awsusgovPartition = partition{ }, }, }, + "resiliencehub": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "resiliencehub-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "resiliencehub-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "resiliencehub-fips.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "resiliencehub-fips.us-gov-west-1.amazonaws.com", + }, + }, + }, "resource-explorer-2": service{ Defaults: endpointDefaults{ defaultKey{}: endpoint{ @@ -37813,6 +41462,46 @@ var awsusgovPartition = partition{ }: endpoint{}, }, }, + "rolesanywhere": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "rolesanywhere-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "rolesanywhere-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rolesanywhere-fips.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rolesanywhere-fips.us-gov-west-1.amazonaws.com", + }, + }, + }, "route53": service{ PartitionEndpoint: "aws-us-gov-global", IsRegionalized: boxedFalse, @@ -38173,17 +41862,33 @@ var awsusgovPartition = partition{ endpointKey{ Region: "us-gov-east-1", }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: dualStackVariant, + }: endpoint{}, endpointKey{ Region: "us-gov-east-1", Variant: fipsVariant, }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{}, endpointKey{ Region: "us-gov-west-1", }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: dualStackVariant, + }: endpoint{}, endpointKey{ Region: "us-gov-west-1", Variant: fipsVariant, }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{}, }, }, "secretsmanager": service{ @@ -38191,37 +41896,43 @@ var awsusgovPartition = partition{ endpointKey{ Region: "us-gov-east-1", }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: dualStackVariant, + }: endpoint{}, endpointKey{ Region: "us-gov-east-1", Variant: fipsVariant, - }: endpoint{ - Hostname: "secretsmanager-fips.us-gov-east-1.amazonaws.com", - }, + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{}, endpointKey{ Region: "us-gov-east-1-fips", }: endpoint{ - Hostname: "secretsmanager-fips.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, + Deprecated: boxedTrue, }, endpointKey{ Region: "us-gov-west-1", }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: dualStackVariant, + }: endpoint{}, endpointKey{ Region: "us-gov-west-1", Variant: fipsVariant, - }: endpoint{ - Hostname: "secretsmanager-fips.us-gov-west-1.amazonaws.com", - }, + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{}, endpointKey{ Region: "us-gov-west-1-fips", }: endpoint{ - Hostname: "secretsmanager-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, + Deprecated: boxedTrue, }, }, @@ -38411,7 +42122,7 @@ var awsusgovPartition = partition{ Region: "us-gov-east-1", Variant: dualStackVariant, }: endpoint{ - Hostname: "servicediscovery.us-gov-east-1.amazonaws.com", + Hostname: "servicediscovery.us-gov-east-1.api.aws", }, endpointKey{ Region: "us-gov-east-1", @@ -38423,7 +42134,7 @@ var awsusgovPartition = partition{ Region: "us-gov-east-1", Variant: fipsVariant | dualStackVariant, }: endpoint{ - Hostname: "servicediscovery-fips.us-gov-east-1.amazonaws.com", + Hostname: "servicediscovery-fips.us-gov-east-1.api.aws", }, endpointKey{ Region: "us-gov-east-1-fips", @@ -38441,7 +42152,7 @@ var awsusgovPartition = partition{ Region: "us-gov-west-1", Variant: dualStackVariant, }: endpoint{ - Hostname: "servicediscovery.us-gov-west-1.amazonaws.com", + Hostname: "servicediscovery.us-gov-west-1.api.aws", }, endpointKey{ Region: "us-gov-west-1", @@ -38453,7 +42164,7 @@ var awsusgovPartition = partition{ Region: "us-gov-west-1", Variant: fipsVariant | dualStackVariant, }: endpoint{ - Hostname: "servicediscovery-fips.us-gov-west-1.amazonaws.com", + Hostname: "servicediscovery-fips.us-gov-west-1.api.aws", }, endpointKey{ Region: "us-gov-west-1-fips", @@ -38519,12 +42230,42 @@ var awsusgovPartition = partition{ }, "simspaceweaver": service{ Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "simspaceweaver.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "simspaceweaver.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "us-gov-east-1", }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "simspaceweaver.us-gov-east-1.amazonaws.com", + }, endpointKey{ Region: "us-gov-west-1", }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "simspaceweaver.us-gov-west-1.amazonaws.com", + }, }, }, "sms": service{ @@ -38742,6 +42483,24 @@ var awsusgovPartition = partition{ Region: "us-gov-east-1", }, }, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "sso.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + endpointKey{ + Region: "us-gov-east-1-fips", + }: endpoint{ + Hostname: "sso.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "us-gov-west-1", }: endpoint{ @@ -38750,6 +42509,24 @@ var awsusgovPartition = partition{ Region: "us-gov-west-1", }, }, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "sso.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + endpointKey{ + Region: "us-gov-west-1-fips", + }: endpoint{ + Hostname: "sso.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, }, }, "states": service{ @@ -39659,6 +43436,46 @@ var awsisoPartition = partition{ }: endpoint{}, }, }, + "datasync": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-iso-east-1", + }: endpoint{ + Hostname: "datasync-fips.us-iso-east-1.c2s.ic.gov", + CredentialScope: credentialScope{ + Region: "us-iso-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-iso-west-1", + }: endpoint{ + Hostname: "datasync-fips.us-iso-west-1.c2s.ic.gov", + CredentialScope: credentialScope{ + Region: "us-iso-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-iso-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "datasync-fips.us-iso-east-1.c2s.ic.gov", + }, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "datasync-fips.us-iso-west-1.c2s.ic.gov", + }, + }, + }, "directconnect": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -39781,6 +43598,9 @@ var awsisoPartition = partition{ endpointKey{ Region: "us-iso-east-1", }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, }, }, "ec2": service{ @@ -39813,6 +43633,9 @@ var awsisoPartition = partition{ endpointKey{ Region: "us-iso-east-1", }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, }, }, "elasticache": service{ @@ -39879,14 +43702,45 @@ var awsisoPartition = partition{ }, "elasticmapreduce": service{ Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-iso-east-1", + }: endpoint{ + Hostname: "elasticmapreduce.us-iso-east-1.c2s.ic.gov", + CredentialScope: credentialScope{ + Region: "us-iso-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-iso-west-1", + }: endpoint{ + Hostname: "elasticmapreduce.us-iso-west-1.c2s.ic.gov", + CredentialScope: credentialScope{ + Region: "us-iso-west-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "us-iso-east-1", }: endpoint{ Protocols: []string{"https"}, }, + endpointKey{ + Region: "us-iso-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticmapreduce.us-iso-east-1.c2s.ic.gov", + Protocols: []string{"https"}, + }, endpointKey{ Region: "us-iso-west-1", }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticmapreduce.us-iso-west-1.c2s.ic.gov", + }, }, }, "es": service{ @@ -39938,6 +43792,19 @@ var awsisoPartition = partition{ }: endpoint{}, }, }, + "guardduty": service{ + IsRegionalized: boxedTrue, + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + }, + }, "health": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -40088,12 +43955,42 @@ var awsisoPartition = partition{ }, "ram": service{ Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-iso-east-1", + }: endpoint{ + Hostname: "ram-fips.us-iso-east-1.c2s.ic.gov", + CredentialScope: credentialScope{ + Region: "us-iso-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-iso-west-1", + }: endpoint{ + Hostname: "ram-fips.us-iso-west-1.c2s.ic.gov", + CredentialScope: credentialScope{ + Region: "us-iso-west-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "us-iso-east-1", }: endpoint{}, + endpointKey{ + Region: "us-iso-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ram-fips.us-iso-east-1.c2s.ic.gov", + }, endpointKey{ Region: "us-iso-west-1", }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ram-fips.us-iso-west-1.c2s.ic.gov", + }, }, }, "rbin": service{ @@ -40138,15 +44035,139 @@ var awsisoPartition = partition{ }, "rds": service{ Endpoints: serviceEndpoints{ + endpointKey{ + Region: "rds-fips.us-iso-east-1", + }: endpoint{ + Hostname: "rds-fips.us-iso-east-1.c2s.ic.gov", + CredentialScope: credentialScope{ + Region: "us-iso-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "rds-fips.us-iso-west-1", + }: endpoint{ + Hostname: "rds-fips.us-iso-west-1.c2s.ic.gov", + CredentialScope: credentialScope{ + Region: "us-iso-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "rds.us-iso-east-1", + }: endpoint{ + CredentialScope: credentialScope{ + Region: "us-iso-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "rds.us-iso-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rds-fips.us-iso-east-1.c2s.ic.gov", + CredentialScope: credentialScope{ + Region: "us-iso-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "rds.us-iso-west-1", + }: endpoint{ + CredentialScope: credentialScope{ + Region: "us-iso-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "rds.us-iso-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rds-fips.us-iso-west-1.c2s.ic.gov", + CredentialScope: credentialScope{ + Region: "us-iso-west-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "us-iso-east-1", }: endpoint{}, + endpointKey{ + Region: "us-iso-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rds-fips.us-iso-east-1.c2s.ic.gov", + }, + endpointKey{ + Region: "us-iso-east-1-fips", + }: endpoint{ + Hostname: "rds-fips.us-iso-east-1.c2s.ic.gov", + CredentialScope: credentialScope{ + Region: "us-iso-east-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "us-iso-west-1", }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rds-fips.us-iso-west-1.c2s.ic.gov", + }, + endpointKey{ + Region: "us-iso-west-1-fips", + }: endpoint{ + Hostname: "rds-fips.us-iso-west-1.c2s.ic.gov", + CredentialScope: credentialScope{ + Region: "us-iso-west-1", + }, + Deprecated: boxedTrue, + }, }, }, "redshift": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-iso-east-1", + }: endpoint{ + Hostname: "redshift-fips.us-iso-east-1.c2s.ic.gov", + CredentialScope: credentialScope{ + Region: "us-iso-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-iso-west-1", + }: endpoint{ + Hostname: "redshift-fips.us-iso-west-1.c2s.ic.gov", + CredentialScope: credentialScope{ + Region: "us-iso-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-iso-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "redshift-fips.us-iso-east-1.c2s.ic.gov", + }, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "redshift-fips.us-iso-west-1.c2s.ic.gov", + }, + }, + }, + "resource-groups": service{ Endpoints: serviceEndpoints{ endpointKey{ Region: "us-iso-east-1", @@ -40194,15 +44215,61 @@ var awsisoPartition = partition{ }, }, Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-iso-east-1", + }: endpoint{ + Hostname: "s3-fips.us-iso-east-1.c2s.ic.gov", + CredentialScope: credentialScope{ + Region: "us-iso-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-iso-west-1", + }: endpoint{ + Hostname: "s3-fips.us-iso-west-1.c2s.ic.gov", + CredentialScope: credentialScope{ + Region: "us-iso-west-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "us-iso-east-1", }: endpoint{ Protocols: []string{"http", "https"}, SignatureVersions: []string{"s3v4"}, }, + endpointKey{ + Region: "us-iso-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "s3-fips.us-iso-east-1.c2s.ic.gov", + Protocols: []string{"http", "https"}, + SignatureVersions: []string{"s3v4"}, + }, + endpointKey{ + Region: "us-iso-east-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "s3-fips.dualstack.us-iso-east-1.c2s.ic.gov", + Protocols: []string{"http", "https"}, + SignatureVersions: []string{"s3v4"}, + }, endpointKey{ Region: "us-iso-west-1", }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "s3-fips.us-iso-west-1.c2s.ic.gov", + }, + endpointKey{ + Region: "us-iso-west-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "s3-fips.dualstack.us-iso-west-1.c2s.ic.gov", + }, }, }, "secretsmanager": service{ @@ -40428,6 +44495,13 @@ var awsisobPartition = partition{ }, }, }, + "api.sagemaker": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + }, + }, "appconfig": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -40466,6 +44540,13 @@ var awsisobPartition = partition{ }: endpoint{}, }, }, + "cloudcontrolapi": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + }, + }, "cloudformation": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -40662,9 +44743,24 @@ var awsisobPartition = partition{ }, "elasticmapreduce": service{ Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-isob-east-1", + }: endpoint{ + Hostname: "elasticmapreduce.us-isob-east-1.sc2s.sgov.gov", + CredentialScope: credentialScope{ + Region: "us-isob-east-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "us-isob-east-1", }: endpoint{}, + endpointKey{ + Region: "us-isob-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticmapreduce.us-isob-east-1.sc2s.sgov.gov", + }, }, }, "es": service{ @@ -40796,11 +44892,33 @@ var awsisobPartition = partition{ }: endpoint{}, }, }, + "outposts": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + }, + }, "ram": service{ Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-isob-east-1", + }: endpoint{ + Hostname: "ram-fips.us-isob-east-1.sc2s.sgov.gov", + CredentialScope: credentialScope{ + Region: "us-isob-east-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "us-isob-east-1", }: endpoint{}, + endpointKey{ + Region: "us-isob-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ram-fips.us-isob-east-1.sc2s.sgov.gov", + }, }, }, "rbin": service{ @@ -40827,16 +44945,73 @@ var awsisobPartition = partition{ }, "rds": service{ Endpoints: serviceEndpoints{ + endpointKey{ + Region: "rds-fips.us-isob-east-1", + }: endpoint{ + Hostname: "rds-fips.us-isob-east-1.sc2s.sgov.gov", + CredentialScope: credentialScope{ + Region: "us-isob-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "rds.us-isob-east-1", + }: endpoint{ + CredentialScope: credentialScope{ + Region: "us-isob-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "rds.us-isob-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rds-fips.us-isob-east-1.sc2s.sgov.gov", + CredentialScope: credentialScope{ + Region: "us-isob-east-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "us-isob-east-1", }: endpoint{}, + endpointKey{ + Region: "us-isob-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rds-fips.us-isob-east-1.sc2s.sgov.gov", + }, + endpointKey{ + Region: "us-isob-east-1-fips", + }: endpoint{ + Hostname: "rds-fips.us-isob-east-1.sc2s.sgov.gov", + CredentialScope: credentialScope{ + Region: "us-isob-east-1", + }, + Deprecated: boxedTrue, + }, }, }, "redshift": service{ Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-isob-east-1", + }: endpoint{ + Hostname: "redshift-fips.us-isob-east-1.sc2s.sgov.gov", + CredentialScope: credentialScope{ + Region: "us-isob-east-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "us-isob-east-1", }: endpoint{}, + endpointKey{ + Region: "us-isob-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "redshift-fips.us-isob-east-1.sc2s.sgov.gov", + }, }, }, "resource-groups": service{ @@ -40867,6 +45042,13 @@ var awsisobPartition = partition{ }: endpoint{}, }, }, + "runtime.sagemaker": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + }, + }, "s3": service{ Defaults: endpointDefaults{ defaultKey{}: endpoint{ @@ -40875,9 +45057,30 @@ var awsisobPartition = partition{ }, }, Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-isob-east-1", + }: endpoint{ + Hostname: "s3-fips.us-isob-east-1.sc2s.sgov.gov", + CredentialScope: credentialScope{ + Region: "us-isob-east-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "us-isob-east-1", }: endpoint{}, + endpointKey{ + Region: "us-isob-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "s3-fips.us-isob-east-1.sc2s.sgov.gov", + }, + endpointKey{ + Region: "us-isob-east-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "s3-fips.dualstack.us-isob-east-1.sc2s.sgov.gov", + }, }, }, "secretsmanager": service{ diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go b/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go index d6fa24776c..93bb5de647 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go @@ -171,6 +171,12 @@ type envConfig struct { // AWS_EC2_METADATA_SERVICE_ENDPOINT_MODE=IPv6 EC2IMDSEndpointMode endpoints.EC2IMDSEndpointModeState + // Specifies that IMDS clients should not fallback to IMDSv1 if token + // requests fail. + // + // AWS_EC2_METADATA_V1_DISABLED=true + EC2IMDSv1Disabled *bool + // Specifies that SDK clients must resolve a dual-stack endpoint for // services. // @@ -251,6 +257,9 @@ var ( ec2IMDSEndpointModeEnvKey = []string{ "AWS_EC2_METADATA_SERVICE_ENDPOINT_MODE", } + ec2MetadataV1DisabledEnvKey = []string{ + "AWS_EC2_METADATA_V1_DISABLED", + } useCABundleKey = []string{ "AWS_CA_BUNDLE", } @@ -393,6 +402,7 @@ func envConfigLoad(enableSharedConfig bool) (envConfig, error) { if err := setEC2IMDSEndpointMode(&cfg.EC2IMDSEndpointMode, ec2IMDSEndpointModeEnvKey); err != nil { return envConfig{}, err } + setBoolPtrFromEnvVal(&cfg.EC2IMDSv1Disabled, ec2MetadataV1DisabledEnvKey) if err := setUseDualStackEndpointFromEnvVal(&cfg.UseDualStackEndpoint, awsUseDualStackEndpoint); err != nil { return cfg, err @@ -414,6 +424,24 @@ func setFromEnvVal(dst *string, keys []string) { } } +func setBoolPtrFromEnvVal(dst **bool, keys []string) { + for _, k := range keys { + value := os.Getenv(k) + if len(value) == 0 { + continue + } + + switch { + case strings.EqualFold(value, "false"): + *dst = new(bool) + **dst = false + case strings.EqualFold(value, "true"): + *dst = new(bool) + **dst = true + } + } +} + func setEC2IMDSEndpointMode(mode *endpoints.EC2IMDSEndpointModeState, keys []string) error { for _, k := range keys { value := os.Getenv(k) diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/session.go b/vendor/github.com/aws/aws-sdk-go/aws/session/session.go index 8127c99a9a..3c88dee526 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/session/session.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/session/session.go @@ -779,6 +779,14 @@ func mergeConfigSrcs(cfg, userCfg *aws.Config, cfg.EndpointResolver = wrapEC2IMDSEndpoint(cfg.EndpointResolver, ec2IMDSEndpoint, endpointMode) } + cfg.EC2MetadataEnableFallback = userCfg.EC2MetadataEnableFallback + if cfg.EC2MetadataEnableFallback == nil && envCfg.EC2IMDSv1Disabled != nil { + cfg.EC2MetadataEnableFallback = aws.Bool(!*envCfg.EC2IMDSv1Disabled) + } + if cfg.EC2MetadataEnableFallback == nil && sharedCfg.EC2IMDSv1Disabled != nil { + cfg.EC2MetadataEnableFallback = aws.Bool(!*sharedCfg.EC2IMDSv1Disabled) + } + cfg.S3UseARNRegion = userCfg.S3UseARNRegion if cfg.S3UseARNRegion == nil { cfg.S3UseARNRegion = &envCfg.S3UseARNRegion diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go b/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go index ea3ac0d031..f3ce8183dd 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go @@ -80,6 +80,9 @@ const ( // EC2 IMDS Endpoint ec2MetadataServiceEndpointKey = "ec2_metadata_service_endpoint" + // ECS IMDSv1 disable fallback + ec2MetadataV1DisabledKey = "ec2_metadata_v1_disabled" + // Use DualStack Endpoint Resolution useDualStackEndpoint = "use_dualstack_endpoint" @@ -179,6 +182,12 @@ type sharedConfig struct { // ec2_metadata_service_endpoint=http://fd00:ec2::254 EC2IMDSEndpoint string + // Specifies that IMDS clients should not fallback to IMDSv1 if token + // requests fail. + // + // ec2_metadata_v1_disabled=true + EC2IMDSv1Disabled *bool + // Specifies that SDK clients must resolve a dual-stack endpoint for // services. // @@ -389,8 +398,15 @@ func (cfg *sharedConfig) setFromIniFile(profile string, file sharedConfigFile, e updateString(&cfg.Region, section, regionKey) updateString(&cfg.CustomCABundle, section, customCABundleKey) + // we're retaining a behavioral quirk with this field that existed before + // the removal of literal parsing for (aws-sdk-go-v2/#2276): + // - if the key is missing, the config field will not be set + // - if the key is set to a non-numeric, the config field will be set to 0 if section.Has(roleDurationSecondsKey) { - d := time.Duration(section.Int(roleDurationSecondsKey)) * time.Second + var d time.Duration + if v, ok := section.Int(roleDurationSecondsKey); ok { + d = time.Duration(v) * time.Second + } cfg.AssumeRoleDuration = &d } @@ -427,6 +443,7 @@ func (cfg *sharedConfig) setFromIniFile(profile string, file sharedConfigFile, e ec2MetadataServiceEndpointModeKey, file.Filename, err) } updateString(&cfg.EC2IMDSEndpoint, section, ec2MetadataServiceEndpointKey) + updateBoolPtr(&cfg.EC2IMDSv1Disabled, section, ec2MetadataV1DisabledKey) updateUseDualStackEndpoint(&cfg.UseDualStackEndpoint, section, useDualStackEndpoint) @@ -668,7 +685,10 @@ func updateBool(dst *bool, section ini.Section, key string) { if !section.Has(key) { return } - *dst = section.Bool(key) + + // retains pre-(aws-sdk-go-v2#2276) behavior where non-bool value would resolve to false + v, _ := section.Bool(key) + *dst = v } // updateBoolPtr will only update the dst with the value in the section key, @@ -677,8 +697,11 @@ func updateBoolPtr(dst **bool, section ini.Section, key string) { if !section.Has(key) { return } + + // retains pre-(aws-sdk-go-v2#2276) behavior where non-bool value would resolve to false + v, _ := section.Bool(key) *dst = new(bool) - **dst = section.Bool(key) + **dst = v } // SharedConfigLoadError is an error for the shared config file failed to load. @@ -805,7 +828,8 @@ func updateUseDualStackEndpoint(dst *endpoints.DualStackEndpointState, section i return } - if section.Bool(key) { + // retains pre-(aws-sdk-go-v2/#2276) behavior where non-bool value would resolve to false + if v, _ := section.Bool(key); v { *dst = endpoints.DualStackEndpointStateEnabled } else { *dst = endpoints.DualStackEndpointStateDisabled @@ -821,7 +845,8 @@ func updateUseFIPSEndpoint(dst *endpoints.FIPSEndpointState, section ini.Section return } - if section.Bool(key) { + // retains pre-(aws-sdk-go-v2/#2276) behavior where non-bool value would resolve to false + if v, _ := section.Bool(key); v { *dst = endpoints.FIPSEndpointStateEnabled } else { *dst = endpoints.FIPSEndpointStateDisabled diff --git a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go index 41386bab12..b542df9315 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go @@ -125,6 +125,7 @@ var requiredSignedHeaders = rules{ "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Algorithm": struct{}{}, "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key": struct{}{}, "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key-Md5": struct{}{}, + "X-Amz-Expected-Bucket-Owner": struct{}{}, "X-Amz-Grant-Full-control": struct{}{}, "X-Amz-Grant-Read": struct{}{}, "X-Amz-Grant-Read-Acp": struct{}{}, @@ -135,6 +136,7 @@ var requiredSignedHeaders = rules{ "X-Amz-Request-Payer": struct{}{}, "X-Amz-Server-Side-Encryption": struct{}{}, "X-Amz-Server-Side-Encryption-Aws-Kms-Key-Id": struct{}{}, + "X-Amz-Server-Side-Encryption-Context": struct{}{}, "X-Amz-Server-Side-Encryption-Customer-Algorithm": struct{}{}, "X-Amz-Server-Side-Encryption-Customer-Key": struct{}{}, "X-Amz-Server-Side-Encryption-Customer-Key-Md5": struct{}{}, diff --git a/vendor/github.com/aws/aws-sdk-go/aws/version.go b/vendor/github.com/aws/aws-sdk-go/aws/version.go index 0e5f95c1c1..4885dbc909 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/version.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/version.go @@ -5,4 +5,4 @@ package aws const SDKName = "aws-sdk-go" // SDKVersion is the version of this SDK -const SDKVersion = "1.44.317" +const SDKVersion = "1.50.8" diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/literal_tokens.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/literal_tokens.go index 34a481afbd..b1b686086a 100644 --- a/vendor/github.com/aws/aws-sdk-go/internal/ini/literal_tokens.go +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/literal_tokens.go @@ -154,11 +154,11 @@ func (v ValueType) String() string { // ValueType enums const ( NoneType = ValueType(iota) - DecimalType - IntegerType + DecimalType // deprecated + IntegerType // deprecated StringType QuotedStringType - BoolType + BoolType // deprecated ) // Value is a union container @@ -166,9 +166,9 @@ type Value struct { Type ValueType raw []rune - integer int64 - decimal float64 - boolean bool + integer int64 // deprecated + decimal float64 // deprecated + boolean bool // deprecated str string } @@ -253,24 +253,6 @@ func newLitToken(b []rune) (Token, int, error) { } token = newToken(TokenLit, b[:n], QuotedStringType) - } else if isNumberValue(b) { - var base int - base, n, err = getNumericalValue(b) - if err != nil { - return token, 0, err - } - - value := b[:n] - vType := IntegerType - if contains(value, '.') || hasExponent(value) { - vType = DecimalType - } - token = newToken(TokenLit, value, vType) - token.base = base - } else if isBoolValue(b) { - n, err = getBoolValue(b) - - token = newToken(TokenLit, b[:n], BoolType) } else { n, err = getValue(b) token = newToken(TokenLit, b[:n], StringType) @@ -280,18 +262,33 @@ func newLitToken(b []rune) (Token, int, error) { } // IntValue returns an integer value -func (v Value) IntValue() int64 { - return v.integer +func (v Value) IntValue() (int64, bool) { + i, err := strconv.ParseInt(string(v.raw), 0, 64) + if err != nil { + return 0, false + } + return i, true } // FloatValue returns a float value -func (v Value) FloatValue() float64 { - return v.decimal +func (v Value) FloatValue() (float64, bool) { + f, err := strconv.ParseFloat(string(v.raw), 64) + if err != nil { + return 0, false + } + return f, true } // BoolValue returns a bool value -func (v Value) BoolValue() bool { - return v.boolean +func (v Value) BoolValue() (bool, bool) { + // we don't use ParseBool as it recognizes more than what we've + // historically supported + if isCaselessLitValue(runesTrue, v.raw) { + return true, true + } else if isCaselessLitValue(runesFalse, v.raw) { + return false, true + } + return false, false } func isTrimmable(r rune) bool { diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/visitor.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/visitor.go index 081cf43342..1d08e138ab 100644 --- a/vendor/github.com/aws/aws-sdk-go/internal/ini/visitor.go +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/visitor.go @@ -145,17 +145,17 @@ func (t Section) ValueType(k string) (ValueType, bool) { } // Bool returns a bool value at k -func (t Section) Bool(k string) bool { +func (t Section) Bool(k string) (bool, bool) { return t.values[k].BoolValue() } // Int returns an integer value at k -func (t Section) Int(k string) int64 { +func (t Section) Int(k string) (int64, bool) { return t.values[k].IntValue() } // Float64 returns a float value at k -func (t Section) Float64(k string) float64 { +func (t Section) Float64(k string) (float64, bool) { return t.values[k].FloatValue() } diff --git a/vendor/github.com/aws/aws-sdk-go/service/autoscaling/api.go b/vendor/github.com/aws/aws-sdk-go/service/autoscaling/api.go index b4d7de3c3d..8d4a594114 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/autoscaling/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/autoscaling/api.go @@ -2411,6 +2411,12 @@ func (c *AutoScaling) DescribeInstanceRefreshesRequest(input *DescribeInstanceRe Name: opDescribeInstanceRefreshes, HTTPMethod: "POST", HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxRecords", + TruncationToken: "", + }, } if input == nil { @@ -2425,7 +2431,7 @@ func (c *AutoScaling) DescribeInstanceRefreshesRequest(input *DescribeInstanceRe // DescribeInstanceRefreshes API operation for Auto Scaling. // // Gets information about the instance refreshes for the specified Auto Scaling -// group. +// group from the previous six weeks. // // This operation is part of the instance refresh feature (https://docs.aws.amazon.com/autoscaling/ec2/userguide/asg-instance-refresh.html) // in Amazon EC2 Auto Scaling, which helps you update instances in your Auto @@ -2477,6 +2483,57 @@ func (c *AutoScaling) DescribeInstanceRefreshesWithContext(ctx aws.Context, inpu return out, req.Send() } +// DescribeInstanceRefreshesPages iterates over the pages of a DescribeInstanceRefreshes operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeInstanceRefreshes method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeInstanceRefreshes operation. +// pageNum := 0 +// err := client.DescribeInstanceRefreshesPages(params, +// func(page *autoscaling.DescribeInstanceRefreshesOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +func (c *AutoScaling) DescribeInstanceRefreshesPages(input *DescribeInstanceRefreshesInput, fn func(*DescribeInstanceRefreshesOutput, bool) bool) error { + return c.DescribeInstanceRefreshesPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribeInstanceRefreshesPagesWithContext same as DescribeInstanceRefreshesPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *AutoScaling) DescribeInstanceRefreshesPagesWithContext(ctx aws.Context, input *DescribeInstanceRefreshesInput, fn func(*DescribeInstanceRefreshesOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeInstanceRefreshesInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeInstanceRefreshesRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*DescribeInstanceRefreshesOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + const opDescribeLaunchConfigurations = "DescribeLaunchConfigurations" // DescribeLaunchConfigurationsRequest generates a "aws/request.Request" representing the @@ -2812,6 +2869,12 @@ func (c *AutoScaling) DescribeLoadBalancerTargetGroupsRequest(input *DescribeLoa Name: opDescribeLoadBalancerTargetGroups, HTTPMethod: "POST", HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxRecords", + TruncationToken: "", + }, } if input == nil { @@ -2899,6 +2962,57 @@ func (c *AutoScaling) DescribeLoadBalancerTargetGroupsWithContext(ctx aws.Contex return out, req.Send() } +// DescribeLoadBalancerTargetGroupsPages iterates over the pages of a DescribeLoadBalancerTargetGroups operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeLoadBalancerTargetGroups method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeLoadBalancerTargetGroups operation. +// pageNum := 0 +// err := client.DescribeLoadBalancerTargetGroupsPages(params, +// func(page *autoscaling.DescribeLoadBalancerTargetGroupsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +func (c *AutoScaling) DescribeLoadBalancerTargetGroupsPages(input *DescribeLoadBalancerTargetGroupsInput, fn func(*DescribeLoadBalancerTargetGroupsOutput, bool) bool) error { + return c.DescribeLoadBalancerTargetGroupsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribeLoadBalancerTargetGroupsPagesWithContext same as DescribeLoadBalancerTargetGroupsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *AutoScaling) DescribeLoadBalancerTargetGroupsPagesWithContext(ctx aws.Context, input *DescribeLoadBalancerTargetGroupsInput, fn func(*DescribeLoadBalancerTargetGroupsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeLoadBalancerTargetGroupsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeLoadBalancerTargetGroupsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*DescribeLoadBalancerTargetGroupsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + const opDescribeLoadBalancers = "DescribeLoadBalancers" // DescribeLoadBalancersRequest generates a "aws/request.Request" representing the @@ -2929,6 +3043,12 @@ func (c *AutoScaling) DescribeLoadBalancersRequest(input *DescribeLoadBalancersI Name: opDescribeLoadBalancers, HTTPMethod: "POST", HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxRecords", + TruncationToken: "", + }, } if input == nil { @@ -3016,6 +3136,57 @@ func (c *AutoScaling) DescribeLoadBalancersWithContext(ctx aws.Context, input *D return out, req.Send() } +// DescribeLoadBalancersPages iterates over the pages of a DescribeLoadBalancers operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeLoadBalancers method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeLoadBalancers operation. +// pageNum := 0 +// err := client.DescribeLoadBalancersPages(params, +// func(page *autoscaling.DescribeLoadBalancersOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +func (c *AutoScaling) DescribeLoadBalancersPages(input *DescribeLoadBalancersInput, fn func(*DescribeLoadBalancersOutput, bool) bool) error { + return c.DescribeLoadBalancersPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribeLoadBalancersPagesWithContext same as DescribeLoadBalancersPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *AutoScaling) DescribeLoadBalancersPagesWithContext(ctx aws.Context, input *DescribeLoadBalancersInput, fn func(*DescribeLoadBalancersOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeLoadBalancersInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeLoadBalancersRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*DescribeLoadBalancersOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + const opDescribeMetricCollectionTypes = "DescribeMetricCollectionTypes" // DescribeMetricCollectionTypesRequest generates a "aws/request.Request" representing the @@ -6341,10 +6512,7 @@ func (c *AutoScaling) StartInstanceRefreshRequest(input *StartInstanceRefreshInp // StartInstanceRefresh API operation for Auto Scaling. // -// Starts an instance refresh. During an instance refresh, Amazon EC2 Auto Scaling -// performs a rolling update of instances in an Auto Scaling group. Instances -// are terminated first and then replaced, which temporarily reduces the capacity -// available within your Auto Scaling group. +// Starts an instance refresh. // // This operation is part of the instance refresh feature (https://docs.aws.amazon.com/autoscaling/ec2/userguide/asg-instance-refresh.html) // in Amazon EC2 Auto Scaling, which helps you update instances in your Auto @@ -8131,6 +8299,11 @@ type CreateAutoScalingGroupInput struct { // in the Amazon EC2 Auto Scaling User Guide. InstanceId *string `min:"1" type:"string"` + // An instance maintenance policy. For more information, see Set instance maintenance + // policy (https://docs.aws.amazon.com/autoscaling/ec2/userguide/ec2-auto-scaling-instance-maintenance-policy.html) + // in the Amazon EC2 Auto Scaling User Guide. + InstanceMaintenancePolicy *InstanceMaintenancePolicy `type:"structure"` + // The name of the launch configuration to use to launch instances. // // Conditional: You must specify either a launch template (LaunchTemplate or @@ -8309,6 +8482,11 @@ func (s *CreateAutoScalingGroupInput) Validate() error { if s.VPCZoneIdentifier != nil && len(*s.VPCZoneIdentifier) < 1 { invalidParams.Add(request.NewErrParamMinLen("VPCZoneIdentifier", 1)) } + if s.InstanceMaintenancePolicy != nil { + if err := s.InstanceMaintenancePolicy.Validate(); err != nil { + invalidParams.AddNested("InstanceMaintenancePolicy", err.(request.ErrInvalidParams)) + } + } if s.LaunchTemplate != nil { if err := s.LaunchTemplate.Validate(); err != nil { invalidParams.AddNested("LaunchTemplate", err.(request.ErrInvalidParams)) @@ -8422,6 +8600,12 @@ func (s *CreateAutoScalingGroupInput) SetInstanceId(v string) *CreateAutoScaling return s } +// SetInstanceMaintenancePolicy sets the InstanceMaintenancePolicy field's value. +func (s *CreateAutoScalingGroupInput) SetInstanceMaintenancePolicy(v *InstanceMaintenancePolicy) *CreateAutoScalingGroupInput { + s.InstanceMaintenancePolicy = v + return s +} + // SetLaunchConfigurationName sets the LaunchConfigurationName field's value. func (s *CreateAutoScalingGroupInput) SetLaunchConfigurationName(v string) *CreateAutoScalingGroupInput { s.LaunchConfigurationName = &v @@ -13434,6 +13618,9 @@ type Group struct { // HealthCheckType is a required field HealthCheckType *string `min:"1" type:"string" required:"true"` + // An instance maintenance policy. + InstanceMaintenancePolicy *InstanceMaintenancePolicy `type:"structure"` + // The EC2 instances associated with the group. Instances []*Instance `type:"list"` @@ -13603,6 +13790,12 @@ func (s *Group) SetHealthCheckType(v string) *Group { return s } +// SetInstanceMaintenancePolicy sets the InstanceMaintenancePolicy field's value. +func (s *Group) SetInstanceMaintenancePolicy(v *InstanceMaintenancePolicy) *Group { + s.InstanceMaintenancePolicy = v + return s +} + // SetInstances sets the Instances field's value. func (s *Group) SetInstances(v []*Instance) *Group { s.Instances = v @@ -13993,6 +14186,78 @@ func (s *InstanceDetails) SetWeightedCapacity(v string) *InstanceDetails { return s } +// Describes an instance maintenance policy. +// +// For more information, see Set instance maintenance policy (https://docs.aws.amazon.com/autoscaling/ec2/userguide/ec2-auto-scaling-instance-maintenance-policy.html) +// in the Amazon EC2 Auto Scaling User Guide. +type InstanceMaintenancePolicy struct { + _ struct{} `type:"structure"` + + // Specifies the upper threshold as a percentage of the desired capacity of + // the Auto Scaling group. It represents the maximum percentage of the group + // that can be in service and healthy, or pending, to support your workload + // when replacing instances. Value range is 100 to 200. To clear a previously + // set value, specify a value of -1. + // + // Both MinHealthyPercentage and MaxHealthyPercentage must be specified, and + // the difference between them cannot be greater than 100. A large range increases + // the number of instances that can be replaced at the same time. + MaxHealthyPercentage *int64 `type:"integer"` + + // Specifies the lower threshold as a percentage of the desired capacity of + // the Auto Scaling group. It represents the minimum percentage of the group + // to keep in service, healthy, and ready to use to support your workload when + // replacing instances. Value range is 0 to 100. To clear a previously set value, + // specify a value of -1. + MinHealthyPercentage *int64 `type:"integer"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s InstanceMaintenancePolicy) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s InstanceMaintenancePolicy) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *InstanceMaintenancePolicy) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "InstanceMaintenancePolicy"} + if s.MaxHealthyPercentage != nil && *s.MaxHealthyPercentage < -1 { + invalidParams.Add(request.NewErrParamMinValue("MaxHealthyPercentage", -1)) + } + if s.MinHealthyPercentage != nil && *s.MinHealthyPercentage < -1 { + invalidParams.Add(request.NewErrParamMinValue("MinHealthyPercentage", -1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetMaxHealthyPercentage sets the MaxHealthyPercentage field's value. +func (s *InstanceMaintenancePolicy) SetMaxHealthyPercentage(v int64) *InstanceMaintenancePolicy { + s.MaxHealthyPercentage = &v + return s +} + +// SetMinHealthyPercentage sets the MinHealthyPercentage field's value. +func (s *InstanceMaintenancePolicy) SetMinHealthyPercentage(v int64) *InstanceMaintenancePolicy { + s.MinHealthyPercentage = &v + return s +} + // The metadata options for the instances. For more information, see Configuring // the Instance Metadata Options (https://docs.aws.amazon.com/autoscaling/ec2/userguide/create-launch-config.html#launch-configurations-imds) // in the Amazon EC2 Auto Scaling User Guide. @@ -14590,6 +14855,31 @@ type InstanceRequirements struct { // Default: Any local storage type LocalStorageTypes []*string `type:"list" enum:"LocalStorageType"` + // [Price protection] The price protection threshold for Spot Instances, as + // a percentage of an identified On-Demand price. The identified On-Demand price + // is the price of the lowest priced current generation C, M, or R instance + // type with your specified attributes. If no current generation C, M, or R + // instance type matches your attributes, then the identified price is from + // either the lowest priced current generation instance types or, failing that, + // the lowest priced previous generation instance types that match your attributes. + // When Amazon EC2 Auto Scaling selects instance types with your attributes, + // we will exclude instance types whose price exceeds your specified threshold. + // + // The parameter accepts an integer, which Amazon EC2 Auto Scaling interprets + // as a percentage. + // + // To indicate no price protection threshold, specify a high value, such as + // 999999. + // + // If you set DesiredCapacityType to vcpu or memory-mib, the price protection + // threshold is based on the per-vCPU or per-memory price instead of the per + // instance price. + // + // Only one of SpotMaxPricePercentageOverLowestPrice or MaxSpotPriceAsPercentageOfOptimalOnDemandPrice + // can be specified. If you don't specify either, then SpotMaxPricePercentageOverLowestPrice + // is used and the value for that parameter defaults to 100. + MaxSpotPriceAsPercentageOfOptimalOnDemandPrice *int64 `type:"integer"` + // The minimum and maximum amount of memory per vCPU for an instance type, in // GiB. // @@ -14612,17 +14902,24 @@ type InstanceRequirements struct { // Default: No minimum or maximum limits NetworkInterfaceCount *NetworkInterfaceCountRequest `type:"structure"` - // The price protection threshold for On-Demand Instances. This is the maximum - // you’ll pay for an On-Demand Instance, expressed as a percentage higher - // than the least expensive current generation M, C, or R instance type with - // your specified attributes. When Amazon EC2 Auto Scaling selects instance - // types with your attributes, we will exclude instance types whose price is - // higher than your threshold. The parameter accepts an integer, which Amazon - // EC2 Auto Scaling interprets as a percentage. To turn off price protection, - // specify a high value, such as 999999. + // [Price protection] The price protection threshold for On-Demand Instances, + // as a percentage higher than an identified On-Demand price. The identified + // On-Demand price is the price of the lowest priced current generation C, M, + // or R instance type with your specified attributes. If no current generation + // C, M, or R instance type matches your attributes, then the identified price + // is from either the lowest priced current generation instance types or, failing + // that, the lowest priced previous generation instance types that match your + // attributes. When Amazon EC2 Auto Scaling selects instance types with your + // attributes, we will exclude instance types whose price exceeds your specified + // threshold. + // + // The parameter accepts an integer, which Amazon EC2 Auto Scaling interprets + // as a percentage. + // + // To turn off price protection, specify a high value, such as 999999. // // If you set DesiredCapacityType to vcpu or memory-mib, the price protection - // threshold is applied based on the per vCPU or per memory price instead of + // threshold is applied based on the per-vCPU or per-memory price instead of // the per instance price. // // Default: 20 @@ -14634,18 +14931,27 @@ type InstanceRequirements struct { // Default: false RequireHibernateSupport *bool `type:"boolean"` - // The price protection threshold for Spot Instances. This is the maximum you’ll - // pay for a Spot Instance, expressed as a percentage higher than the least - // expensive current generation M, C, or R instance type with your specified - // attributes. When Amazon EC2 Auto Scaling selects instance types with your - // attributes, we will exclude instance types whose price is higher than your - // threshold. The parameter accepts an integer, which Amazon EC2 Auto Scaling - // interprets as a percentage. To turn off price protection, specify a high - // value, such as 999999. + // [Price protection] The price protection threshold for Spot Instances, as + // a percentage higher than an identified Spot price. The identified Spot price + // is the price of the lowest priced current generation C, M, or R instance + // type with your specified attributes. If no current generation C, M, or R + // instance type matches your attributes, then the identified price is from + // either the lowest priced current generation instance types or, failing that, + // the lowest priced previous generation instance types that match your attributes. + // When Amazon EC2 Auto Scaling selects instance types with your attributes, + // we will exclude instance types whose price exceeds your specified threshold. + // + // The parameter accepts an integer, which Amazon EC2 Auto Scaling interprets + // as a percentage. + // + // To turn off price protection, specify a high value, such as 999999. // // If you set DesiredCapacityType to vcpu or memory-mib, the price protection - // threshold is applied based on the per vCPU or per memory price instead of - // the per instance price. + // threshold is based on the per-vCPU or per-memory price instead of the per + // instance price. + // + // Only one of SpotMaxPricePercentageOverLowestPrice or MaxSpotPriceAsPercentageOfOptimalOnDemandPrice + // can be specified. // // Default: 100 SpotMaxPricePercentageOverLowestPrice *int64 `type:"integer"` @@ -14790,6 +15096,12 @@ func (s *InstanceRequirements) SetLocalStorageTypes(v []*string) *InstanceRequir return s } +// SetMaxSpotPriceAsPercentageOfOptimalOnDemandPrice sets the MaxSpotPriceAsPercentageOfOptimalOnDemandPrice field's value. +func (s *InstanceRequirements) SetMaxSpotPriceAsPercentageOfOptimalOnDemandPrice(v int64) *InstanceRequirements { + s.MaxSpotPriceAsPercentageOfOptimalOnDemandPrice = &v + return s +} + // SetMemoryGiBPerVCpu sets the MemoryGiBPerVCpu field's value. func (s *InstanceRequirements) SetMemoryGiBPerVCpu(v *MemoryGiBPerVCpuRequest) *InstanceRequirements { s.MemoryGiBPerVCpu = v @@ -16515,8 +16827,8 @@ func (s *MetricGranularityType) SetGranularity(v string) *MetricGranularityType return s } -// This structure defines the CloudWatch metric to return, along with the statistic, -// period, and unit. +// This structure defines the CloudWatch metric to return, along with the statistic +// and unit. // // For more information about the CloudWatch terminology below, see Amazon CloudWatch // concepts (https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/cloudwatch_concepts.html) @@ -18829,14 +19141,26 @@ type RefreshPreferences struct { // in all cases), or the HealthCheckGracePeriod property otherwise. InstanceWarmup *int64 `type:"integer"` - // The amount of capacity in the Auto Scaling group that must pass your group's - // health checks to allow the operation to continue. The value is expressed - // as a percentage of the desired capacity of the Auto Scaling group (rounded - // up to the nearest integer). The default is 90. + // Specifies the maximum percentage of the group that can be in service and + // healthy, or pending, to support your workload when replacing instances. The + // value is expressed as a percentage of the desired capacity of the Auto Scaling + // group. Value range is 100 to 200. + // + // If you specify MaxHealthyPercentage, you must also specify MinHealthyPercentage, + // and the difference between them cannot be greater than 100. A larger range + // increases the number of instances that can be replaced at the same time. + // + // If you do not specify this property, the default is 100 percent, or the percentage + // set in the instance maintenance policy for the Auto Scaling group, if defined. + MaxHealthyPercentage *int64 `min:"100" type:"integer"` + + // Specifies the minimum percentage of the group to keep in service, healthy, + // and ready to use to support your workload to allow the operation to continue. + // The value is expressed as a percentage of the desired capacity of the Auto + // Scaling group. Value range is 0 to 100. // - // Setting the minimum healthy percentage to 100 percent limits the rate of - // replacement to one instance at a time. In contrast, setting it to 0 percent - // has the effect of replacing all instances at the same time. + // If you do not specify this property, the default is 90 percent, or the percentage + // set in the instance maintenance policy for the Auto Scaling group, if defined. MinHealthyPercentage *int64 `type:"integer"` // Choose the behavior that you want Amazon EC2 Auto Scaling to use if instances @@ -18910,6 +19234,19 @@ func (s RefreshPreferences) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *RefreshPreferences) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RefreshPreferences"} + if s.MaxHealthyPercentage != nil && *s.MaxHealthyPercentage < 100 { + invalidParams.Add(request.NewErrParamMinValue("MaxHealthyPercentage", 100)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + // SetAlarmSpecification sets the AlarmSpecification field's value. func (s *RefreshPreferences) SetAlarmSpecification(v *AlarmSpecification) *RefreshPreferences { s.AlarmSpecification = v @@ -18940,6 +19277,12 @@ func (s *RefreshPreferences) SetInstanceWarmup(v int64) *RefreshPreferences { return s } +// SetMaxHealthyPercentage sets the MaxHealthyPercentage field's value. +func (s *RefreshPreferences) SetMaxHealthyPercentage(v int64) *RefreshPreferences { + s.MaxHealthyPercentage = &v + return s +} + // SetMinHealthyPercentage sets the MinHealthyPercentage field's value. func (s *RefreshPreferences) SetMinHealthyPercentage(v int64) *RefreshPreferences { s.MinHealthyPercentage = &v @@ -19999,10 +20342,11 @@ type StartInstanceRefreshInput struct { DesiredConfiguration *DesiredConfiguration `type:"structure"` // Sets your preferences for the instance refresh so that it performs as expected - // when you start it. Includes the instance warmup time, the minimum healthy - // percentage, and the behaviors that you want Amazon EC2 Auto Scaling to use - // if instances that are in Standby state or protected from scale in are found. - // You can also choose to enable additional features, such as the following: + // when you start it. Includes the instance warmup time, the minimum and maximum + // healthy percentages, and the behaviors that you want Amazon EC2 Auto Scaling + // to use if instances that are in Standby state or protected from scale in + // are found. You can also choose to enable additional features, such as the + // following: // // * Auto rollback // @@ -20049,6 +20393,11 @@ func (s *StartInstanceRefreshInput) Validate() error { invalidParams.AddNested("DesiredConfiguration", err.(request.ErrInvalidParams)) } } + if s.Preferences != nil { + if err := s.Preferences.Validate(); err != nil { + invalidParams.AddNested("Preferences", err.(request.ErrInvalidParams)) + } + } if invalidParams.Len() > 0 { return invalidParams @@ -21155,6 +21504,11 @@ type UpdateAutoScalingGroupInput struct { // Only specify EC2 if you must clear a value that was previously set. HealthCheckType *string `min:"1" type:"string"` + // An instance maintenance policy. For more information, see Set instance maintenance + // policy (https://docs.aws.amazon.com/autoscaling/ec2/userguide/ec2-auto-scaling-instance-maintenance-policy.html) + // in the Amazon EC2 Auto Scaling User Guide. + InstanceMaintenancePolicy *InstanceMaintenancePolicy `type:"structure"` + // The name of the launch configuration. If you specify LaunchConfigurationName // in your update request, you can't specify LaunchTemplate or MixedInstancesPolicy. LaunchConfigurationName *string `min:"1" type:"string"` @@ -21272,6 +21626,11 @@ func (s *UpdateAutoScalingGroupInput) Validate() error { if s.VPCZoneIdentifier != nil && len(*s.VPCZoneIdentifier) < 1 { invalidParams.Add(request.NewErrParamMinLen("VPCZoneIdentifier", 1)) } + if s.InstanceMaintenancePolicy != nil { + if err := s.InstanceMaintenancePolicy.Validate(); err != nil { + invalidParams.AddNested("InstanceMaintenancePolicy", err.(request.ErrInvalidParams)) + } + } if s.LaunchTemplate != nil { if err := s.LaunchTemplate.Validate(); err != nil { invalidParams.AddNested("LaunchTemplate", err.(request.ErrInvalidParams)) @@ -21349,6 +21708,12 @@ func (s *UpdateAutoScalingGroupInput) SetHealthCheckType(v string) *UpdateAutoSc return s } +// SetInstanceMaintenancePolicy sets the InstanceMaintenancePolicy field's value. +func (s *UpdateAutoScalingGroupInput) SetInstanceMaintenancePolicy(v *InstanceMaintenancePolicy) *UpdateAutoScalingGroupInput { + s.InstanceMaintenancePolicy = v + return s +} + // SetLaunchConfigurationName sets the LaunchConfigurationName field's value. func (s *UpdateAutoScalingGroupInput) SetLaunchConfigurationName(v string) *UpdateAutoScalingGroupInput { s.LaunchConfigurationName = &v diff --git a/vendor/github.com/aws/aws-sdk-go/service/ecr/api.go b/vendor/github.com/aws/aws-sdk-go/service/ecr/api.go index d8813a8af4..7146f5ff74 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/ecr/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/ecr/api.go @@ -272,6 +272,16 @@ func (c *ECR) BatchGetImageRequest(input *BatchGetImageInput) (req *request.Requ // The specified repository could not be found. Check the spelling of the specified // repository and ensure that you are performing operations on the correct registry. // +// - LimitExceededException +// The operation did not succeed because it would have exceeded a service limit +// for your account. For more information, see Amazon ECR service quotas (https://docs.aws.amazon.com/AmazonECR/latest/userguide/service-quotas.html) +// in the Amazon Elastic Container Registry User Guide. +// +// - UnableToGetUpstreamImageException +// The image or images were unable to be pulled using the pull through cache +// rule. This is usually caused because of an issue with the Secrets Manager +// secret containing the credentials for the upstream registry. +// // See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/BatchGetImage func (c *ECR) BatchGetImage(input *BatchGetImageInput) (*BatchGetImageOutput, error) { req, out := c.BatchGetImageRequest(input) @@ -544,8 +554,9 @@ func (c *ECR) CreatePullThroughCacheRuleRequest(input *CreatePullThroughCacheRul // CreatePullThroughCacheRule API operation for Amazon EC2 Container Registry. // // Creates a pull through cache rule. A pull through cache rule provides a way -// to cache images from an external public registry in your Amazon ECR private -// registry. +// to cache images from an upstream registry source in your Amazon ECR private +// registry. For more information, see Using pull through cache rules (https://docs.aws.amazon.com/AmazonECR/latest/userguide/pull-through-cache.html) +// in the Amazon Elastic Container Registry User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -578,6 +589,18 @@ func (c *ECR) CreatePullThroughCacheRuleRequest(input *CreatePullThroughCacheRul // for your account. For more information, see Amazon ECR service quotas (https://docs.aws.amazon.com/AmazonECR/latest/userguide/service-quotas.html) // in the Amazon Elastic Container Registry User Guide. // +// - UnableToAccessSecretException +// The secret is unable to be accessed. Verify the resource permissions for +// the secret and try again. +// +// - SecretNotFoundException +// The ARN of the secret specified in the pull through cache rule was not found. +// Update the pull through cache rule with a valid secret ARN and try again. +// +// - UnableToDecryptSecretValueException +// The secret is accessible but is unable to be decrypted. Verify the resource +// permisisons and try again. +// // See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/CreatePullThroughCacheRule func (c *ECR) CreatePullThroughCacheRule(input *CreatePullThroughCacheRuleInput) (*CreatePullThroughCacheRuleOutput, error) { req, out := c.CreatePullThroughCacheRuleRequest(input) @@ -772,6 +795,9 @@ func (c *ECR) DeleteLifecyclePolicyRequest(input *DeleteLifecyclePolicyInput) (r // - LifecyclePolicyNotFoundException // The lifecycle policy could not be found, and no policy is set to the repository. // +// - ValidationException +// There was an exception validating this request. +// // See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/DeleteLifecyclePolicy func (c *ECR) DeleteLifecyclePolicy(input *DeleteLifecyclePolicyInput) (*DeleteLifecyclePolicyOutput, error) { req, out := c.DeleteLifecyclePolicyRequest(input) @@ -1016,9 +1042,9 @@ func (c *ECR) DeleteRepositoryRequest(input *DeleteRepositoryInput) (req *reques // DeleteRepository API operation for Amazon EC2 Container Registry. // -// Deletes a repository. If the repository contains images, you must either -// delete all images in the repository or use the force option to delete the -// repository. +// Deletes a repository. If the repository isn't empty, you must either delete +// the contents of the repository or use the force option to delete the repository +// and have Amazon ECR delete all of its contents on your behalf. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -2111,6 +2137,10 @@ func (c *ECR) GetDownloadUrlForLayerRequest(input *GetDownloadUrlForLayerInput) // The specified repository could not be found. Check the spelling of the specified // repository and ensure that you are performing operations on the correct registry. // +// - UnableToGetUpstreamLayerException +// There was an issue getting the upstream layer matching the pull through cache +// rule. +// // See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/GetDownloadUrlForLayer func (c *ECR) GetDownloadUrlForLayer(input *GetDownloadUrlForLayerInput) (*GetDownloadUrlForLayerOutput, error) { req, out := c.GetDownloadUrlForLayerRequest(input) @@ -2201,6 +2231,9 @@ func (c *ECR) GetLifecyclePolicyRequest(input *GetLifecyclePolicyInput) (req *re // - LifecyclePolicyNotFoundException // The lifecycle policy could not be found, and no policy is set to the repository. // +// - ValidationException +// There was an exception validating this request. +// // See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/GetLifecyclePolicy func (c *ECR) GetLifecyclePolicy(input *GetLifecyclePolicyInput) (*GetLifecyclePolicyOutput, error) { req, out := c.GetLifecyclePolicyRequest(input) @@ -2298,6 +2331,9 @@ func (c *ECR) GetLifecyclePolicyPreviewRequest(input *GetLifecyclePolicyPreviewI // - LifecyclePolicyPreviewNotFoundException // There is no dry run for this repository. // +// - ValidationException +// There was an exception validating this request. +// // See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/GetLifecyclePolicyPreview func (c *ECR) GetLifecyclePolicyPreview(input *GetLifecyclePolicyPreviewInput) (*GetLifecyclePolicyPreviewOutput, error) { req, out := c.GetLifecyclePolicyPreviewRequest(input) @@ -3344,6 +3380,9 @@ func (c *ECR) PutLifecyclePolicyRequest(input *PutLifecyclePolicyInput) (req *re // The specified repository could not be found. Check the spelling of the specified // repository and ensure that you are performing operations on the correct registry. // +// - ValidationException +// There was an exception validating this request. +// // See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/PutLifecyclePolicy func (c *ECR) PutLifecyclePolicy(input *PutLifecyclePolicyInput) (*PutLifecyclePolicyOutput, error) { req, out := c.PutLifecyclePolicyRequest(input) @@ -3906,6 +3945,9 @@ func (c *ECR) StartLifecyclePolicyPreviewRequest(input *StartLifecyclePolicyPrev // The previous lifecycle policy preview request has not completed. Wait and // try again. // +// - ValidationException +// There was an exception validating this request. +// // See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/StartLifecyclePolicyPreview func (c *ECR) StartLifecyclePolicyPreview(input *StartLifecyclePolicyPreviewInput) (*StartLifecyclePolicyPreviewOutput, error) { req, out := c.StartLifecyclePolicyPreviewRequest(input) @@ -4123,6 +4165,108 @@ func (c *ECR) UntagResourceWithContext(ctx aws.Context, input *UntagResourceInpu return out, req.Send() } +const opUpdatePullThroughCacheRule = "UpdatePullThroughCacheRule" + +// UpdatePullThroughCacheRuleRequest generates a "aws/request.Request" representing the +// client's request for the UpdatePullThroughCacheRule operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdatePullThroughCacheRule for more information on using the UpdatePullThroughCacheRule +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the UpdatePullThroughCacheRuleRequest method. +// req, resp := client.UpdatePullThroughCacheRuleRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/UpdatePullThroughCacheRule +func (c *ECR) UpdatePullThroughCacheRuleRequest(input *UpdatePullThroughCacheRuleInput) (req *request.Request, output *UpdatePullThroughCacheRuleOutput) { + op := &request.Operation{ + Name: opUpdatePullThroughCacheRule, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdatePullThroughCacheRuleInput{} + } + + output = &UpdatePullThroughCacheRuleOutput{} + req = c.newRequest(op, input, output) + return +} + +// UpdatePullThroughCacheRule API operation for Amazon EC2 Container Registry. +// +// Updates an existing pull through cache rule. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon EC2 Container Registry's +// API operation UpdatePullThroughCacheRule for usage and error information. +// +// Returned Error Types: +// +// - ServerException +// These errors are usually caused by a server-side issue. +// +// - InvalidParameterException +// The specified parameter is invalid. Review the available parameters for the +// API request. +// +// - ValidationException +// There was an exception validating this request. +// +// - UnableToAccessSecretException +// The secret is unable to be accessed. Verify the resource permissions for +// the secret and try again. +// +// - PullThroughCacheRuleNotFoundException +// The pull through cache rule was not found. Specify a valid pull through cache +// rule and try again. +// +// - SecretNotFoundException +// The ARN of the secret specified in the pull through cache rule was not found. +// Update the pull through cache rule with a valid secret ARN and try again. +// +// - UnableToDecryptSecretValueException +// The secret is accessible but is unable to be decrypted. Verify the resource +// permisisons and try again. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/UpdatePullThroughCacheRule +func (c *ECR) UpdatePullThroughCacheRule(input *UpdatePullThroughCacheRuleInput) (*UpdatePullThroughCacheRuleOutput, error) { + req, out := c.UpdatePullThroughCacheRuleRequest(input) + return out, req.Send() +} + +// UpdatePullThroughCacheRuleWithContext is the same as UpdatePullThroughCacheRule with the addition of +// the ability to pass a context and additional request options. +// +// See UpdatePullThroughCacheRule for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ECR) UpdatePullThroughCacheRuleWithContext(ctx aws.Context, input *UpdatePullThroughCacheRuleInput, opts ...request.Option) (*UpdatePullThroughCacheRuleOutput, error) { + req, out := c.UpdatePullThroughCacheRuleRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opUploadLayerPart = "UploadLayerPart" // UploadLayerPartRequest generates a "aws/request.Request" representing the @@ -4234,6 +4378,99 @@ func (c *ECR) UploadLayerPartWithContext(ctx aws.Context, input *UploadLayerPart return out, req.Send() } +const opValidatePullThroughCacheRule = "ValidatePullThroughCacheRule" + +// ValidatePullThroughCacheRuleRequest generates a "aws/request.Request" representing the +// client's request for the ValidatePullThroughCacheRule operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ValidatePullThroughCacheRule for more information on using the ValidatePullThroughCacheRule +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the ValidatePullThroughCacheRuleRequest method. +// req, resp := client.ValidatePullThroughCacheRuleRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/ValidatePullThroughCacheRule +func (c *ECR) ValidatePullThroughCacheRuleRequest(input *ValidatePullThroughCacheRuleInput) (req *request.Request, output *ValidatePullThroughCacheRuleOutput) { + op := &request.Operation{ + Name: opValidatePullThroughCacheRule, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ValidatePullThroughCacheRuleInput{} + } + + output = &ValidatePullThroughCacheRuleOutput{} + req = c.newRequest(op, input, output) + return +} + +// ValidatePullThroughCacheRule API operation for Amazon EC2 Container Registry. +// +// Validates an existing pull through cache rule for an upstream registry that +// requires authentication. This will retrieve the contents of the Amazon Web +// Services Secrets Manager secret, verify the syntax, and then validate that +// authentication to the upstream registry is successful. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon EC2 Container Registry's +// API operation ValidatePullThroughCacheRule for usage and error information. +// +// Returned Error Types: +// +// - ServerException +// These errors are usually caused by a server-side issue. +// +// - InvalidParameterException +// The specified parameter is invalid. Review the available parameters for the +// API request. +// +// - ValidationException +// There was an exception validating this request. +// +// - PullThroughCacheRuleNotFoundException +// The pull through cache rule was not found. Specify a valid pull through cache +// rule and try again. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/ValidatePullThroughCacheRule +func (c *ECR) ValidatePullThroughCacheRule(input *ValidatePullThroughCacheRuleInput) (*ValidatePullThroughCacheRuleOutput, error) { + req, out := c.ValidatePullThroughCacheRuleRequest(input) + return out, req.Send() +} + +// ValidatePullThroughCacheRuleWithContext is the same as ValidatePullThroughCacheRule with the addition of +// the ability to pass a context and additional request options. +// +// See ValidatePullThroughCacheRule for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ECR) ValidatePullThroughCacheRuleWithContext(ctx aws.Context, input *ValidatePullThroughCacheRuleInput, opts ...request.Option) (*ValidatePullThroughCacheRuleOutput, error) { + req, out := c.ValidatePullThroughCacheRuleRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + // This data type is used in the ImageScanFinding data type. type Attribute struct { _ struct{} `type:"structure"` @@ -5060,6 +5297,10 @@ func (s *CompleteLayerUploadOutput) SetUploadId(v string) *CompleteLayerUploadOu type CreatePullThroughCacheRuleInput struct { _ struct{} `type:"structure"` + // The Amazon Resource Name (ARN) of the Amazon Web Services Secrets Manager + // secret that identifies the credentials to authenticate to the upstream registry. + CredentialArn *string `locationName:"credentialArn" min:"50" type:"string"` + // The repository name prefix to use when caching images from the source registry. // // EcrRepositoryPrefix is a required field @@ -5070,8 +5311,24 @@ type CreatePullThroughCacheRuleInput struct { // registry is assumed. RegistryId *string `locationName:"registryId" type:"string"` + // The name of the upstream registry. + UpstreamRegistry *string `locationName:"upstreamRegistry" type:"string" enum:"UpstreamRegistry"` + // The registry URL of the upstream public registry to use as the source for - // the pull through cache rule. + // the pull through cache rule. The following is the syntax to use for each + // supported upstream registry. + // + // * Amazon ECR Public (ecr-public) - public.ecr.aws + // + // * Docker Hub (docker-hub) - registry-1.docker.io + // + // * Quay (quay) - quay.io + // + // * Kubernetes (k8s) - registry.k8s.io + // + // * GitHub Container Registry (github-container-registry) - ghcr.io + // + // * Microsoft Azure Container Registry (azure-container-registry) - .azurecr.io // // UpstreamRegistryUrl is a required field UpstreamRegistryUrl *string `locationName:"upstreamRegistryUrl" type:"string" required:"true"` @@ -5098,6 +5355,9 @@ func (s CreatePullThroughCacheRuleInput) GoString() string { // Validate inspects the fields of the type to determine if they are valid. func (s *CreatePullThroughCacheRuleInput) Validate() error { invalidParams := request.ErrInvalidParams{Context: "CreatePullThroughCacheRuleInput"} + if s.CredentialArn != nil && len(*s.CredentialArn) < 50 { + invalidParams.Add(request.NewErrParamMinLen("CredentialArn", 50)) + } if s.EcrRepositoryPrefix == nil { invalidParams.Add(request.NewErrParamRequired("EcrRepositoryPrefix")) } @@ -5114,6 +5374,12 @@ func (s *CreatePullThroughCacheRuleInput) Validate() error { return nil } +// SetCredentialArn sets the CredentialArn field's value. +func (s *CreatePullThroughCacheRuleInput) SetCredentialArn(v string) *CreatePullThroughCacheRuleInput { + s.CredentialArn = &v + return s +} + // SetEcrRepositoryPrefix sets the EcrRepositoryPrefix field's value. func (s *CreatePullThroughCacheRuleInput) SetEcrRepositoryPrefix(v string) *CreatePullThroughCacheRuleInput { s.EcrRepositoryPrefix = &v @@ -5126,6 +5392,12 @@ func (s *CreatePullThroughCacheRuleInput) SetRegistryId(v string) *CreatePullThr return s } +// SetUpstreamRegistry sets the UpstreamRegistry field's value. +func (s *CreatePullThroughCacheRuleInput) SetUpstreamRegistry(v string) *CreatePullThroughCacheRuleInput { + s.UpstreamRegistry = &v + return s +} + // SetUpstreamRegistryUrl sets the UpstreamRegistryUrl field's value. func (s *CreatePullThroughCacheRuleInput) SetUpstreamRegistryUrl(v string) *CreatePullThroughCacheRuleInput { s.UpstreamRegistryUrl = &v @@ -5139,12 +5411,20 @@ type CreatePullThroughCacheRuleOutput struct { // rule was created. CreatedAt *time.Time `locationName:"createdAt" type:"timestamp"` + // The Amazon Resource Name (ARN) of the Amazon Web Services Secrets Manager + // secret associated with the pull through cache rule. + CredentialArn *string `locationName:"credentialArn" min:"50" type:"string"` + // The Amazon ECR repository prefix associated with the pull through cache rule. EcrRepositoryPrefix *string `locationName:"ecrRepositoryPrefix" min:"2" type:"string"` // The registry ID associated with the request. RegistryId *string `locationName:"registryId" type:"string"` + // The name of the upstream registry associated with the pull through cache + // rule. + UpstreamRegistry *string `locationName:"upstreamRegistry" type:"string" enum:"UpstreamRegistry"` + // The upstream registry URL associated with the pull through cache rule. UpstreamRegistryUrl *string `locationName:"upstreamRegistryUrl" type:"string"` } @@ -5173,6 +5453,12 @@ func (s *CreatePullThroughCacheRuleOutput) SetCreatedAt(v time.Time) *CreatePull return s } +// SetCredentialArn sets the CredentialArn field's value. +func (s *CreatePullThroughCacheRuleOutput) SetCredentialArn(v string) *CreatePullThroughCacheRuleOutput { + s.CredentialArn = &v + return s +} + // SetEcrRepositoryPrefix sets the EcrRepositoryPrefix field's value. func (s *CreatePullThroughCacheRuleOutput) SetEcrRepositoryPrefix(v string) *CreatePullThroughCacheRuleOutput { s.EcrRepositoryPrefix = &v @@ -5185,6 +5471,12 @@ func (s *CreatePullThroughCacheRuleOutput) SetRegistryId(v string) *CreatePullTh return s } +// SetUpstreamRegistry sets the UpstreamRegistry field's value. +func (s *CreatePullThroughCacheRuleOutput) SetUpstreamRegistry(v string) *CreatePullThroughCacheRuleOutput { + s.UpstreamRegistry = &v + return s +} + // SetUpstreamRegistryUrl sets the UpstreamRegistryUrl field's value. func (s *CreatePullThroughCacheRuleOutput) SetUpstreamRegistryUrl(v string) *CreatePullThroughCacheRuleOutput { s.UpstreamRegistryUrl = &v @@ -5217,6 +5509,9 @@ type CreateRepositoryInput struct { // on its own (such as nginx-web-app) or it can be prepended with a namespace // to group the repository into a category (such as project-a/nginx-web-app). // + // The repository name must start with a letter and can only contain lowercase + // letters, numbers, hyphens, underscores, and forward slashes. + // // RepositoryName is a required field RepositoryName *string `locationName:"repositoryName" min:"2" type:"string" required:"true"` @@ -5259,6 +5554,16 @@ func (s *CreateRepositoryInput) Validate() error { invalidParams.AddNested("EncryptionConfiguration", err.(request.ErrInvalidParams)) } } + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } if invalidParams.Len() > 0 { return invalidParams @@ -5687,6 +5992,10 @@ type DeletePullThroughCacheRuleOutput struct { // The timestamp associated with the pull through cache rule. CreatedAt *time.Time `locationName:"createdAt" type:"timestamp"` + // The Amazon Resource Name (ARN) of the Amazon Web Services Secrets Manager + // secret associated with the pull through cache rule. + CredentialArn *string `locationName:"credentialArn" min:"50" type:"string"` + // The Amazon ECR repository prefix associated with the request. EcrRepositoryPrefix *string `locationName:"ecrRepositoryPrefix" min:"2" type:"string"` @@ -5721,6 +6030,12 @@ func (s *DeletePullThroughCacheRuleOutput) SetCreatedAt(v time.Time) *DeletePull return s } +// SetCredentialArn sets the CredentialArn field's value. +func (s *DeletePullThroughCacheRuleOutput) SetCredentialArn(v string) *DeletePullThroughCacheRuleOutput { + s.CredentialArn = &v + return s +} + // SetEcrRepositoryPrefix sets the EcrRepositoryPrefix field's value. func (s *DeletePullThroughCacheRuleOutput) SetEcrRepositoryPrefix(v string) *DeletePullThroughCacheRuleOutput { s.EcrRepositoryPrefix = &v @@ -5804,7 +6119,8 @@ func (s *DeleteRegistryPolicyOutput) SetRegistryId(v string) *DeleteRegistryPoli type DeleteRepositoryInput struct { _ struct{} `type:"structure"` - // If a repository contains images, forces the deletion. + // If true, deleting the repository force deletes the contents of the repository. + // If false, the repository must be empty before attempting to delete it. Force *bool `locationName:"force" type:"boolean"` // The Amazon Web Services account ID associated with the registry that contains @@ -10445,6 +10761,10 @@ type PullThroughCacheRule struct { // The date and time the pull through cache was created. CreatedAt *time.Time `locationName:"createdAt" type:"timestamp"` + // The ARN of the Secrets Manager secret associated with the pull through cache + // rule. + CredentialArn *string `locationName:"credentialArn" min:"50" type:"string"` + // The Amazon ECR repository prefix associated with the pull through cache rule. EcrRepositoryPrefix *string `locationName:"ecrRepositoryPrefix" min:"2" type:"string"` @@ -10452,6 +10772,14 @@ type PullThroughCacheRule struct { // through cache rule is associated with. RegistryId *string `locationName:"registryId" type:"string"` + // The date and time, in JavaScript date format, when the pull through cache + // rule was last updated. + UpdatedAt *time.Time `locationName:"updatedAt" type:"timestamp"` + + // The name of the upstream source registry associated with the pull through + // cache rule. + UpstreamRegistry *string `locationName:"upstreamRegistry" type:"string" enum:"UpstreamRegistry"` + // The upstream registry URL associated with the pull through cache rule. UpstreamRegistryUrl *string `locationName:"upstreamRegistryUrl" type:"string"` } @@ -10480,6 +10808,12 @@ func (s *PullThroughCacheRule) SetCreatedAt(v time.Time) *PullThroughCacheRule { return s } +// SetCredentialArn sets the CredentialArn field's value. +func (s *PullThroughCacheRule) SetCredentialArn(v string) *PullThroughCacheRule { + s.CredentialArn = &v + return s +} + // SetEcrRepositoryPrefix sets the EcrRepositoryPrefix field's value. func (s *PullThroughCacheRule) SetEcrRepositoryPrefix(v string) *PullThroughCacheRule { s.EcrRepositoryPrefix = &v @@ -10492,6 +10826,18 @@ func (s *PullThroughCacheRule) SetRegistryId(v string) *PullThroughCacheRule { return s } +// SetUpdatedAt sets the UpdatedAt field's value. +func (s *PullThroughCacheRule) SetUpdatedAt(v time.Time) *PullThroughCacheRule { + s.UpdatedAt = &v + return s +} + +// SetUpstreamRegistry sets the UpstreamRegistry field's value. +func (s *PullThroughCacheRule) SetUpstreamRegistry(v string) *PullThroughCacheRule { + s.UpstreamRegistry = &v + return s +} + // SetUpstreamRegistryUrl sets the UpstreamRegistryUrl field's value. func (s *PullThroughCacheRule) SetUpstreamRegistryUrl(v string) *PullThroughCacheRule { s.UpstreamRegistryUrl = &v @@ -11644,7 +11990,8 @@ type RegistryScanningRule struct { // The frequency that scans are performed at for a private registry. When the // ENHANCED scan type is specified, the supported scan frequencies are CONTINUOUS_SCAN // and SCAN_ON_PUSH. When the BASIC scan type is specified, the SCAN_ON_PUSH - // and MANUAL scan frequencies are supported. + // scan frequency is supported. If scan on push is not specified, then the MANUAL + // scan frequency is set by default. // // ScanFrequency is a required field ScanFrequency *string `locationName:"scanFrequency" type:"string" required:"true" enum:"ScanFrequency"` @@ -11969,7 +12316,7 @@ type Repository struct { // The Amazon Resource Name (ARN) that identifies the repository. The ARN contains // the arn:aws:ecr namespace, followed by the region of the repository, Amazon // Web Services account ID of the repository owner, repository namespace, and - // repository name. For example, arn:aws:ecr:region:012345678910:repository/test. + // repository name. For example, arn:aws:ecr:region:012345678910:repository-namespace/repository-name. RepositoryArn *string `locationName:"repositoryArn" type:"string"` // The name of the repository. @@ -12113,8 +12460,8 @@ func (s *RepositoryAlreadyExistsException) RequestID() string { // The filter settings used with image replication. Specifying a repository // filter to a replication rule provides a method for controlling which repositories -// in a private registry are replicated. If no repository filter is specified, -// all images in the repository are replicated. +// in a private registry are replicated. If no filters are added, the contents +// of all repositories are replicated. type RepositoryFilter struct { _ struct{} `type:"structure"` @@ -12753,12 +13100,12 @@ func (s *ScoreDetails) SetCvss(v *CvssScoreDetails) *ScoreDetails { return s } -// These errors are usually caused by a server-side issue. -type ServerException struct { +// The ARN of the secret specified in the pull through cache rule was not found. +// Update the pull through cache rule with a valid secret ARN and try again. +type SecretNotFoundException struct { _ struct{} `type:"structure"` RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` - // The error message associated with the exception. Message_ *string `locationName:"message" type:"string"` } @@ -12767,7 +13114,7 @@ type ServerException struct { // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". -func (s ServerException) String() string { +func (s SecretNotFoundException) String() string { return awsutil.Prettify(s) } @@ -12776,23 +13123,23 @@ func (s ServerException) String() string { // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". -func (s ServerException) GoString() string { +func (s SecretNotFoundException) GoString() string { return s.String() } -func newErrorServerException(v protocol.ResponseMetadata) error { - return &ServerException{ +func newErrorSecretNotFoundException(v protocol.ResponseMetadata) error { + return &SecretNotFoundException{ RespMetadata: v, } } // Code returns the exception type name. -func (s *ServerException) Code() string { - return "ServerException" +func (s *SecretNotFoundException) Code() string { + return "SecretNotFoundException" } // Message returns the exception's message. -func (s *ServerException) Message() string { +func (s *SecretNotFoundException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -12800,41 +13147,106 @@ func (s *ServerException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s *ServerException) OrigErr() error { +func (s *SecretNotFoundException) OrigErr() error { return nil } -func (s *ServerException) Error() string { +func (s *SecretNotFoundException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s *ServerException) StatusCode() int { +func (s *SecretNotFoundException) StatusCode() int { return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s *ServerException) RequestID() string { +func (s *SecretNotFoundException) RequestID() string { return s.RespMetadata.RequestID } -type SetRepositoryPolicyInput struct { - _ struct{} `type:"structure"` - - // If the policy you are attempting to set on a repository policy would prevent - // you from setting another policy in the future, you must force the SetRepositoryPolicy - // operation. This is intended to prevent accidental repository lock outs. - Force *bool `locationName:"force" type:"boolean"` +// These errors are usually caused by a server-side issue. +type ServerException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` - // The JSON repository policy text to apply to the repository. For more information, - // see Amazon ECR repository policies (https://docs.aws.amazon.com/AmazonECR/latest/userguide/repository-policy-examples.html) - // in the Amazon Elastic Container Registry User Guide. - // - // PolicyText is a required field - PolicyText *string `locationName:"policyText" type:"string" required:"true"` + // The error message associated with the exception. + Message_ *string `locationName:"message" type:"string"` +} - // The Amazon Web Services account ID associated with the registry that contains - // the repository. If you do not specify a registry, the default registry is +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ServerException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ServerException) GoString() string { + return s.String() +} + +func newErrorServerException(v protocol.ResponseMetadata) error { + return &ServerException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *ServerException) Code() string { + return "ServerException" +} + +// Message returns the exception's message. +func (s *ServerException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *ServerException) OrigErr() error { + return nil +} + +func (s *ServerException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *ServerException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *ServerException) RequestID() string { + return s.RespMetadata.RequestID +} + +type SetRepositoryPolicyInput struct { + _ struct{} `type:"structure"` + + // If the policy you are attempting to set on a repository policy would prevent + // you from setting another policy in the future, you must force the SetRepositoryPolicy + // operation. This is intended to prevent accidental repository lock outs. + Force *bool `locationName:"force" type:"boolean"` + + // The JSON repository policy text to apply to the repository. For more information, + // see Amazon ECR repository policies (https://docs.aws.amazon.com/AmazonECR/latest/userguide/repository-policy-examples.html) + // in the Amazon Elastic Container Registry User Guide. + // + // PolicyText is a required field + PolicyText *string `locationName:"policyText" type:"string" required:"true"` + + // The Amazon Web Services account ID associated with the registry that contains + // the repository. If you do not specify a registry, the default registry is // assumed. RegistryId *string `locationName:"registryId" type:"string"` @@ -13231,10 +13643,14 @@ type Tag struct { // One part of a key-value pair that make up a tag. A key is a general label // that acts like a category for more specific tag values. - Key *string `type:"string"` + // + // Key is a required field + Key *string `type:"string" required:"true"` // A value acts as a descriptor within a tag category (key). - Value *string `type:"string"` + // + // Value is a required field + Value *string `type:"string" required:"true"` } // String returns the string representation. @@ -13255,6 +13671,22 @@ func (s Tag) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *Tag) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Tag"} + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Value == nil { + invalidParams.Add(request.NewErrParamRequired("Value")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + // SetKey sets the Key field's value. func (s *Tag) SetKey(v string) *Tag { s.Key = &v @@ -13311,6 +13743,16 @@ func (s *TagResourceInput) Validate() error { if s.Tags == nil { invalidParams.Add(request.NewErrParamRequired("Tags")) } + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } if invalidParams.Len() > 0 { return invalidParams @@ -13343,18 +13785,279 @@ func (s TagResourceOutput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s TagResourceOutput) GoString() string { - return s.String() +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s TagResourceOutput) GoString() string { + return s.String() +} + +// The list of tags on the repository is over the limit. The maximum number +// of tags that can be applied to a repository is 50. +type TooManyTagsException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s TooManyTagsException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s TooManyTagsException) GoString() string { + return s.String() +} + +func newErrorTooManyTagsException(v protocol.ResponseMetadata) error { + return &TooManyTagsException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *TooManyTagsException) Code() string { + return "TooManyTagsException" +} + +// Message returns the exception's message. +func (s *TooManyTagsException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *TooManyTagsException) OrigErr() error { + return nil +} + +func (s *TooManyTagsException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *TooManyTagsException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *TooManyTagsException) RequestID() string { + return s.RespMetadata.RequestID +} + +// The secret is unable to be accessed. Verify the resource permissions for +// the secret and try again. +type UnableToAccessSecretException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s UnableToAccessSecretException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s UnableToAccessSecretException) GoString() string { + return s.String() +} + +func newErrorUnableToAccessSecretException(v protocol.ResponseMetadata) error { + return &UnableToAccessSecretException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *UnableToAccessSecretException) Code() string { + return "UnableToAccessSecretException" +} + +// Message returns the exception's message. +func (s *UnableToAccessSecretException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *UnableToAccessSecretException) OrigErr() error { + return nil +} + +func (s *UnableToAccessSecretException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *UnableToAccessSecretException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *UnableToAccessSecretException) RequestID() string { + return s.RespMetadata.RequestID +} + +// The secret is accessible but is unable to be decrypted. Verify the resource +// permisisons and try again. +type UnableToDecryptSecretValueException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s UnableToDecryptSecretValueException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s UnableToDecryptSecretValueException) GoString() string { + return s.String() +} + +func newErrorUnableToDecryptSecretValueException(v protocol.ResponseMetadata) error { + return &UnableToDecryptSecretValueException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *UnableToDecryptSecretValueException) Code() string { + return "UnableToDecryptSecretValueException" +} + +// Message returns the exception's message. +func (s *UnableToDecryptSecretValueException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *UnableToDecryptSecretValueException) OrigErr() error { + return nil +} + +func (s *UnableToDecryptSecretValueException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *UnableToDecryptSecretValueException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *UnableToDecryptSecretValueException) RequestID() string { + return s.RespMetadata.RequestID +} + +// The image or images were unable to be pulled using the pull through cache +// rule. This is usually caused because of an issue with the Secrets Manager +// secret containing the credentials for the upstream registry. +type UnableToGetUpstreamImageException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s UnableToGetUpstreamImageException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s UnableToGetUpstreamImageException) GoString() string { + return s.String() +} + +func newErrorUnableToGetUpstreamImageException(v protocol.ResponseMetadata) error { + return &UnableToGetUpstreamImageException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *UnableToGetUpstreamImageException) Code() string { + return "UnableToGetUpstreamImageException" +} + +// Message returns the exception's message. +func (s *UnableToGetUpstreamImageException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *UnableToGetUpstreamImageException) OrigErr() error { + return nil +} + +func (s *UnableToGetUpstreamImageException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } -// The list of tags on the repository is over the limit. The maximum number -// of tags that can be applied to a repository is 50. -type TooManyTagsException struct { +// Status code returns the HTTP status code for the request's response error. +func (s *UnableToGetUpstreamImageException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *UnableToGetUpstreamImageException) RequestID() string { + return s.RespMetadata.RequestID +} + +// There was an issue getting the upstream layer matching the pull through cache +// rule. +type UnableToGetUpstreamLayerException struct { _ struct{} `type:"structure"` RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` @@ -13366,7 +14069,7 @@ type TooManyTagsException struct { // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". -func (s TooManyTagsException) String() string { +func (s UnableToGetUpstreamLayerException) String() string { return awsutil.Prettify(s) } @@ -13375,23 +14078,23 @@ func (s TooManyTagsException) String() string { // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". -func (s TooManyTagsException) GoString() string { +func (s UnableToGetUpstreamLayerException) GoString() string { return s.String() } -func newErrorTooManyTagsException(v protocol.ResponseMetadata) error { - return &TooManyTagsException{ +func newErrorUnableToGetUpstreamLayerException(v protocol.ResponseMetadata) error { + return &UnableToGetUpstreamLayerException{ RespMetadata: v, } } // Code returns the exception type name. -func (s *TooManyTagsException) Code() string { - return "TooManyTagsException" +func (s *UnableToGetUpstreamLayerException) Code() string { + return "UnableToGetUpstreamLayerException" } // Message returns the exception's message. -func (s *TooManyTagsException) Message() string { +func (s *UnableToGetUpstreamLayerException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -13399,21 +14102,21 @@ func (s *TooManyTagsException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s *TooManyTagsException) OrigErr() error { +func (s *UnableToGetUpstreamLayerException) OrigErr() error { return nil } -func (s *TooManyTagsException) Error() string { +func (s *UnableToGetUpstreamLayerException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s *TooManyTagsException) StatusCode() int { +func (s *UnableToGetUpstreamLayerException) StatusCode() int { return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s *TooManyTagsException) RequestID() string { +func (s *UnableToGetUpstreamLayerException) RequestID() string { return s.RespMetadata.RequestID } @@ -13628,6 +14331,144 @@ func (s UntagResourceOutput) GoString() string { return s.String() } +type UpdatePullThroughCacheRuleInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the Amazon Web Services Secrets Manager + // secret that identifies the credentials to authenticate to the upstream registry. + // + // CredentialArn is a required field + CredentialArn *string `locationName:"credentialArn" min:"50" type:"string" required:"true"` + + // The repository name prefix to use when caching images from the source registry. + // + // EcrRepositoryPrefix is a required field + EcrRepositoryPrefix *string `locationName:"ecrRepositoryPrefix" min:"2" type:"string" required:"true"` + + // The Amazon Web Services account ID associated with the registry associated + // with the pull through cache rule. If you do not specify a registry, the default + // registry is assumed. + RegistryId *string `locationName:"registryId" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s UpdatePullThroughCacheRuleInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s UpdatePullThroughCacheRuleInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdatePullThroughCacheRuleInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdatePullThroughCacheRuleInput"} + if s.CredentialArn == nil { + invalidParams.Add(request.NewErrParamRequired("CredentialArn")) + } + if s.CredentialArn != nil && len(*s.CredentialArn) < 50 { + invalidParams.Add(request.NewErrParamMinLen("CredentialArn", 50)) + } + if s.EcrRepositoryPrefix == nil { + invalidParams.Add(request.NewErrParamRequired("EcrRepositoryPrefix")) + } + if s.EcrRepositoryPrefix != nil && len(*s.EcrRepositoryPrefix) < 2 { + invalidParams.Add(request.NewErrParamMinLen("EcrRepositoryPrefix", 2)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCredentialArn sets the CredentialArn field's value. +func (s *UpdatePullThroughCacheRuleInput) SetCredentialArn(v string) *UpdatePullThroughCacheRuleInput { + s.CredentialArn = &v + return s +} + +// SetEcrRepositoryPrefix sets the EcrRepositoryPrefix field's value. +func (s *UpdatePullThroughCacheRuleInput) SetEcrRepositoryPrefix(v string) *UpdatePullThroughCacheRuleInput { + s.EcrRepositoryPrefix = &v + return s +} + +// SetRegistryId sets the RegistryId field's value. +func (s *UpdatePullThroughCacheRuleInput) SetRegistryId(v string) *UpdatePullThroughCacheRuleInput { + s.RegistryId = &v + return s +} + +type UpdatePullThroughCacheRuleOutput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the Amazon Web Services Secrets Manager + // secret associated with the pull through cache rule. + CredentialArn *string `locationName:"credentialArn" min:"50" type:"string"` + + // The Amazon ECR repository prefix associated with the pull through cache rule. + EcrRepositoryPrefix *string `locationName:"ecrRepositoryPrefix" min:"2" type:"string"` + + // The registry ID associated with the request. + RegistryId *string `locationName:"registryId" type:"string"` + + // The date and time, in JavaScript date format, when the pull through cache + // rule was updated. + UpdatedAt *time.Time `locationName:"updatedAt" type:"timestamp"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s UpdatePullThroughCacheRuleOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s UpdatePullThroughCacheRuleOutput) GoString() string { + return s.String() +} + +// SetCredentialArn sets the CredentialArn field's value. +func (s *UpdatePullThroughCacheRuleOutput) SetCredentialArn(v string) *UpdatePullThroughCacheRuleOutput { + s.CredentialArn = &v + return s +} + +// SetEcrRepositoryPrefix sets the EcrRepositoryPrefix field's value. +func (s *UpdatePullThroughCacheRuleOutput) SetEcrRepositoryPrefix(v string) *UpdatePullThroughCacheRuleOutput { + s.EcrRepositoryPrefix = &v + return s +} + +// SetRegistryId sets the RegistryId field's value. +func (s *UpdatePullThroughCacheRuleOutput) SetRegistryId(v string) *UpdatePullThroughCacheRuleOutput { + s.RegistryId = &v + return s +} + +// SetUpdatedAt sets the UpdatedAt field's value. +func (s *UpdatePullThroughCacheRuleOutput) SetUpdatedAt(v time.Time) *UpdatePullThroughCacheRuleOutput { + s.UpdatedAt = &v + return s +} + type UploadLayerPartInput struct { _ struct{} `type:"structure"` @@ -13872,6 +14713,147 @@ func (s *UploadNotFoundException) RequestID() string { return s.RespMetadata.RequestID } +type ValidatePullThroughCacheRuleInput struct { + _ struct{} `type:"structure"` + + // The repository name prefix associated with the pull through cache rule. + // + // EcrRepositoryPrefix is a required field + EcrRepositoryPrefix *string `locationName:"ecrRepositoryPrefix" min:"2" type:"string" required:"true"` + + // The registry ID associated with the pull through cache rule. If you do not + // specify a registry, the default registry is assumed. + RegistryId *string `locationName:"registryId" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ValidatePullThroughCacheRuleInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ValidatePullThroughCacheRuleInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ValidatePullThroughCacheRuleInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ValidatePullThroughCacheRuleInput"} + if s.EcrRepositoryPrefix == nil { + invalidParams.Add(request.NewErrParamRequired("EcrRepositoryPrefix")) + } + if s.EcrRepositoryPrefix != nil && len(*s.EcrRepositoryPrefix) < 2 { + invalidParams.Add(request.NewErrParamMinLen("EcrRepositoryPrefix", 2)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetEcrRepositoryPrefix sets the EcrRepositoryPrefix field's value. +func (s *ValidatePullThroughCacheRuleInput) SetEcrRepositoryPrefix(v string) *ValidatePullThroughCacheRuleInput { + s.EcrRepositoryPrefix = &v + return s +} + +// SetRegistryId sets the RegistryId field's value. +func (s *ValidatePullThroughCacheRuleInput) SetRegistryId(v string) *ValidatePullThroughCacheRuleInput { + s.RegistryId = &v + return s +} + +type ValidatePullThroughCacheRuleOutput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the Amazon Web Services Secrets Manager + // secret associated with the pull through cache rule. + CredentialArn *string `locationName:"credentialArn" min:"50" type:"string"` + + // The Amazon ECR repository prefix associated with the pull through cache rule. + EcrRepositoryPrefix *string `locationName:"ecrRepositoryPrefix" min:"2" type:"string"` + + // The reason the validation failed. For more details about possible causes + // and how to address them, see Using pull through cache rules (https://docs.aws.amazon.com/AmazonECR/latest/userguide/pull-through-cache.html) + // in the Amazon Elastic Container Registry User Guide. + Failure *string `locationName:"failure" type:"string"` + + // Whether or not the pull through cache rule was validated. If true, Amazon + // ECR was able to reach the upstream registry and authentication was successful. + // If false, there was an issue and validation failed. The failure reason indicates + // the cause. + IsValid *bool `locationName:"isValid" type:"boolean"` + + // The registry ID associated with the request. + RegistryId *string `locationName:"registryId" type:"string"` + + // The upstream registry URL associated with the pull through cache rule. + UpstreamRegistryUrl *string `locationName:"upstreamRegistryUrl" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ValidatePullThroughCacheRuleOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ValidatePullThroughCacheRuleOutput) GoString() string { + return s.String() +} + +// SetCredentialArn sets the CredentialArn field's value. +func (s *ValidatePullThroughCacheRuleOutput) SetCredentialArn(v string) *ValidatePullThroughCacheRuleOutput { + s.CredentialArn = &v + return s +} + +// SetEcrRepositoryPrefix sets the EcrRepositoryPrefix field's value. +func (s *ValidatePullThroughCacheRuleOutput) SetEcrRepositoryPrefix(v string) *ValidatePullThroughCacheRuleOutput { + s.EcrRepositoryPrefix = &v + return s +} + +// SetFailure sets the Failure field's value. +func (s *ValidatePullThroughCacheRuleOutput) SetFailure(v string) *ValidatePullThroughCacheRuleOutput { + s.Failure = &v + return s +} + +// SetIsValid sets the IsValid field's value. +func (s *ValidatePullThroughCacheRuleOutput) SetIsValid(v bool) *ValidatePullThroughCacheRuleOutput { + s.IsValid = &v + return s +} + +// SetRegistryId sets the RegistryId field's value. +func (s *ValidatePullThroughCacheRuleOutput) SetRegistryId(v string) *ValidatePullThroughCacheRuleOutput { + s.RegistryId = &v + return s +} + +// SetUpstreamRegistryUrl sets the UpstreamRegistryUrl field's value. +func (s *ValidatePullThroughCacheRuleOutput) SetUpstreamRegistryUrl(v string) *ValidatePullThroughCacheRuleOutput { + s.UpstreamRegistryUrl = &v + return s +} + // There was an exception validating this request. type ValidationException struct { _ struct{} `type:"structure"` @@ -14112,6 +15094,15 @@ const ( // ImageFailureCodeKmsError is a ImageFailureCode enum value ImageFailureCodeKmsError = "KmsError" + + // ImageFailureCodeUpstreamAccessDenied is a ImageFailureCode enum value + ImageFailureCodeUpstreamAccessDenied = "UpstreamAccessDenied" + + // ImageFailureCodeUpstreamTooManyRequests is a ImageFailureCode enum value + ImageFailureCodeUpstreamTooManyRequests = "UpstreamTooManyRequests" + + // ImageFailureCodeUpstreamUnavailable is a ImageFailureCode enum value + ImageFailureCodeUpstreamUnavailable = "UpstreamUnavailable" ) // ImageFailureCode_Values returns all elements of the ImageFailureCode enum @@ -14124,6 +15115,9 @@ func ImageFailureCode_Values() []string { ImageFailureCodeMissingDigestAndTag, ImageFailureCodeImageReferencedByManifestList, ImageFailureCodeKmsError, + ImageFailureCodeUpstreamAccessDenied, + ImageFailureCodeUpstreamTooManyRequests, + ImageFailureCodeUpstreamUnavailable, } } @@ -14350,3 +15344,35 @@ func TagStatus_Values() []string { TagStatusAny, } } + +const ( + // UpstreamRegistryEcrPublic is a UpstreamRegistry enum value + UpstreamRegistryEcrPublic = "ecr-public" + + // UpstreamRegistryQuay is a UpstreamRegistry enum value + UpstreamRegistryQuay = "quay" + + // UpstreamRegistryK8s is a UpstreamRegistry enum value + UpstreamRegistryK8s = "k8s" + + // UpstreamRegistryDockerHub is a UpstreamRegistry enum value + UpstreamRegistryDockerHub = "docker-hub" + + // UpstreamRegistryGithubContainerRegistry is a UpstreamRegistry enum value + UpstreamRegistryGithubContainerRegistry = "github-container-registry" + + // UpstreamRegistryAzureContainerRegistry is a UpstreamRegistry enum value + UpstreamRegistryAzureContainerRegistry = "azure-container-registry" +) + +// UpstreamRegistry_Values returns all elements of the UpstreamRegistry enum +func UpstreamRegistry_Values() []string { + return []string{ + UpstreamRegistryEcrPublic, + UpstreamRegistryQuay, + UpstreamRegistryK8s, + UpstreamRegistryDockerHub, + UpstreamRegistryGithubContainerRegistry, + UpstreamRegistryAzureContainerRegistry, + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/ecr/errors.go b/vendor/github.com/aws/aws-sdk-go/service/ecr/errors.go index a8392ade8f..4e2bed930f 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/ecr/errors.go +++ b/vendor/github.com/aws/aws-sdk-go/service/ecr/errors.go @@ -189,6 +189,13 @@ const ( // enabled on the repository and try again. ErrCodeScanNotFoundException = "ScanNotFoundException" + // ErrCodeSecretNotFoundException for service response error code + // "SecretNotFoundException". + // + // The ARN of the secret specified in the pull through cache rule was not found. + // Update the pull through cache rule with a valid secret ARN and try again. + ErrCodeSecretNotFoundException = "SecretNotFoundException" + // ErrCodeServerException for service response error code // "ServerException". // @@ -202,6 +209,35 @@ const ( // of tags that can be applied to a repository is 50. ErrCodeTooManyTagsException = "TooManyTagsException" + // ErrCodeUnableToAccessSecretException for service response error code + // "UnableToAccessSecretException". + // + // The secret is unable to be accessed. Verify the resource permissions for + // the secret and try again. + ErrCodeUnableToAccessSecretException = "UnableToAccessSecretException" + + // ErrCodeUnableToDecryptSecretValueException for service response error code + // "UnableToDecryptSecretValueException". + // + // The secret is accessible but is unable to be decrypted. Verify the resource + // permisisons and try again. + ErrCodeUnableToDecryptSecretValueException = "UnableToDecryptSecretValueException" + + // ErrCodeUnableToGetUpstreamImageException for service response error code + // "UnableToGetUpstreamImageException". + // + // The image or images were unable to be pulled using the pull through cache + // rule. This is usually caused because of an issue with the Secrets Manager + // secret containing the credentials for the upstream registry. + ErrCodeUnableToGetUpstreamImageException = "UnableToGetUpstreamImageException" + + // ErrCodeUnableToGetUpstreamLayerException for service response error code + // "UnableToGetUpstreamLayerException". + // + // There was an issue getting the upstream layer matching the pull through cache + // rule. + ErrCodeUnableToGetUpstreamLayerException = "UnableToGetUpstreamLayerException" + // ErrCodeUnsupportedImageTypeException for service response error code // "UnsupportedImageTypeException". // @@ -256,8 +292,13 @@ var exceptionFromCode = map[string]func(protocol.ResponseMetadata) error{ "RepositoryNotFoundException": newErrorRepositoryNotFoundException, "RepositoryPolicyNotFoundException": newErrorRepositoryPolicyNotFoundException, "ScanNotFoundException": newErrorScanNotFoundException, + "SecretNotFoundException": newErrorSecretNotFoundException, "ServerException": newErrorServerException, "TooManyTagsException": newErrorTooManyTagsException, + "UnableToAccessSecretException": newErrorUnableToAccessSecretException, + "UnableToDecryptSecretValueException": newErrorUnableToDecryptSecretValueException, + "UnableToGetUpstreamImageException": newErrorUnableToGetUpstreamImageException, + "UnableToGetUpstreamLayerException": newErrorUnableToGetUpstreamLayerException, "UnsupportedImageTypeException": newErrorUnsupportedImageTypeException, "UnsupportedUpstreamRegistryException": newErrorUnsupportedUpstreamRegistryException, "UploadNotFoundException": newErrorUploadNotFoundException, diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/api.go b/vendor/github.com/aws/aws-sdk-go/service/s3/api.go index 2882d45568..01ec8099e4 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/api.go @@ -67,19 +67,47 @@ func (c *S3) AbortMultipartUploadRequest(input *AbortMultipartUploadInput) (req // AbortMultipartUpload API operation for Amazon Simple Storage Service. // -// This action aborts a multipart upload. After a multipart upload is aborted, +// This operation aborts a multipart upload. After a multipart upload is aborted, // no additional parts can be uploaded using that upload ID. The storage consumed // by any previously uploaded parts will be freed. However, if any part uploads // are currently in progress, those part uploads might or might not succeed. // As a result, it might be necessary to abort a given multipart upload multiple // times in order to completely free all storage consumed by all parts. // -// To verify that all parts have been removed, so you don't get charged for +// To verify that all parts have been removed and prevent getting charged for // the part storage, you should call the ListParts (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html) -// action and ensure that the parts list is empty. +// API operation and ensure that the parts list is empty. // -// For information about permissions required to use the multipart upload, see -// Multipart Upload and Permissions (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html). +// Directory buckets - For directory buckets, you must make requests for this +// API operation to the Zonal endpoint. These endpoints support virtual-hosted-style +// requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name +// . Path-style requests are not supported. For more information, see Regional +// and Zonal endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) +// in the Amazon S3 User Guide. +// +// Permissions +// +// - General purpose bucket permissions - For information about permissions +// required to use the multipart upload, see Multipart Upload and Permissions +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html) +// in the Amazon S3 User Guide. +// +// - Directory bucket permissions - To grant access to this API operation +// on a directory bucket, we recommend that you use the CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) +// API operation for session-based authorization. Specifically, you grant +// the s3express:CreateSession permission to the directory bucket in a bucket +// policy or an IAM identity-based policy. Then, you make the CreateSession +// API call on the bucket to obtain a session token. With the session token +// in your request header, you can make API requests to this operation. After +// the session token expires, you make another CreateSession API call to +// generate a new session token for use. Amazon Web Services CLI or SDKs +// create session and refresh the session token automatically to avoid service +// interruptions when a session expires. For more information about authorization, +// see CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html). +// +// # HTTP Host header syntax +// +// Directory buckets - The HTTP Host header syntax is Bucket_name.s3express-az_id.region.amazonaws.com. // // The following operations are related to AbortMultipartUpload: // @@ -173,60 +201,93 @@ func (c *S3) CompleteMultipartUploadRequest(input *CompleteMultipartUploadInput) // // You first initiate the multipart upload and then upload all parts using the // UploadPart (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) +// operation or the UploadPartCopy (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPartCopy.html) // operation. After successfully uploading all relevant parts of an upload, -// you call this action to complete the upload. Upon receiving this request, -// Amazon S3 concatenates all the parts in ascending order by part number to -// create a new object. In the Complete Multipart Upload request, you must provide -// the parts list. You must ensure that the parts list is complete. This action -// concatenates the parts that you provide in the list. For each part in the -// list, you must provide the part number and the ETag value, returned after -// that part was uploaded. -// -// Processing of a Complete Multipart Upload request could take several minutes -// to complete. After Amazon S3 begins processing the request, it sends an HTTP +// you call this CompleteMultipartUpload operation to complete the upload. Upon +// receiving this request, Amazon S3 concatenates all the parts in ascending +// order by part number to create a new object. In the CompleteMultipartUpload +// request, you must provide the parts list and ensure that the parts list is +// complete. The CompleteMultipartUpload API operation concatenates the parts +// that you provide in the list. For each part in the list, you must provide +// the PartNumber value and the ETag value that are returned after that part +// was uploaded. +// +// The processing of a CompleteMultipartUpload request could take several minutes +// to finalize. After Amazon S3 begins processing the request, it sends an HTTP // response header that specifies a 200 OK response. While processing is in // progress, Amazon S3 periodically sends white space characters to keep the // connection from timing out. A request could fail after the initial 200 OK // response has been sent. This means that a 200 OK response can contain either -// a success or an error. If you call the S3 API directly, make sure to design -// your application to parse the contents of the response and handle it appropriately. +// a success or an error. The error response might be embedded in the 200 OK +// response. If you call this API operation directly, make sure to design your +// application to parse the contents of the response and handle it appropriately. // If you use Amazon Web Services SDKs, SDKs handle this condition. The SDKs // detect the embedded error and apply error handling per your configuration // settings (including automatically retrying the request as appropriate). If -// the condition persists, the SDKs throws an exception (or, for the SDKs that -// don't use exceptions, they return the error). +// the condition persists, the SDKs throw an exception (or, for the SDKs that +// don't use exceptions, they return an error). // // Note that if CompleteMultipartUpload fails, applications should be prepared // to retry the failed requests. For more information, see Amazon S3 Error Best // Practices (https://docs.aws.amazon.com/AmazonS3/latest/dev/ErrorBestPractices.html). // -// You cannot use Content-Type: application/x-www-form-urlencoded with Complete -// Multipart Upload requests. Also, if you do not provide a Content-Type header, -// CompleteMultipartUpload returns a 200 OK response. +// You can't use Content-Type: application/x-www-form-urlencoded for the CompleteMultipartUpload +// requests. Also, if you don't provide a Content-Type header, CompleteMultipartUpload +// can still return a 200 OK response. // // For more information about multipart uploads, see Uploading Objects Using -// Multipart Upload (https://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html). +// Multipart Upload (https://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html) +// in the Amazon S3 User Guide. // -// For information about permissions required to use the multipart upload API, -// see Multipart Upload and Permissions (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html). +// Directory buckets - For directory buckets, you must make requests for this +// API operation to the Zonal endpoint. These endpoints support virtual-hosted-style +// requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name +// . Path-style requests are not supported. For more information, see Regional +// and Zonal endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) +// in the Amazon S3 User Guide. +// +// Permissions +// +// - General purpose bucket permissions - For information about permissions +// required to use the multipart upload API, see Multipart Upload and Permissions +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html) +// in the Amazon S3 User Guide. // -// CompleteMultipartUpload has the following special errors: +// - Directory bucket permissions - To grant access to this API operation +// on a directory bucket, we recommend that you use the CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) +// API operation for session-based authorization. Specifically, you grant +// the s3express:CreateSession permission to the directory bucket in a bucket +// policy or an IAM identity-based policy. Then, you make the CreateSession +// API call on the bucket to obtain a session token. With the session token +// in your request header, you can make API requests to this operation. After +// the session token expires, you make another CreateSession API call to +// generate a new session token for use. Amazon Web Services CLI or SDKs +// create session and refresh the session token automatically to avoid service +// interruptions when a session expires. For more information about authorization, +// see CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html). // -// - Error code: EntityTooSmall Description: Your proposed upload is smaller +// Special errors +// +// - Error Code: EntityTooSmall Description: Your proposed upload is smaller // than the minimum allowed object size. Each part must be at least 5 MB -// in size, except the last part. 400 Bad Request +// in size, except the last part. HTTP Status Code: 400 Bad Request // -// - Error code: InvalidPart Description: One or more of the specified parts +// - Error Code: InvalidPart Description: One or more of the specified parts // could not be found. The part might not have been uploaded, or the specified -// entity tag might not have matched the part's entity tag. 400 Bad Request +// ETag might not have matched the uploaded part's ETag. HTTP Status Code: +// 400 Bad Request // -// - Error code: InvalidPartOrder Description: The list of parts was not +// - Error Code: InvalidPartOrder Description: The list of parts was not // in ascending order. The parts list must be specified in order by part -// number. 400 Bad Request +// number. HTTP Status Code: 400 Bad Request // -// - Error code: NoSuchUpload Description: The specified multipart upload +// - Error Code: NoSuchUpload Description: The specified multipart upload // does not exist. The upload ID might be invalid, or the multipart upload -// might have been aborted or completed. 404 Not Found +// might have been aborted or completed. HTTP Status Code: 404 Not Found +// +// # HTTP Host header syntax +// +// Directory buckets - The HTTP Host header syntax is Bucket_name.s3express-az_id.region.amazonaws.com. // // The following operations are related to CompleteMultipartUpload: // @@ -319,184 +380,108 @@ func (c *S3) CopyObjectRequest(input *CopyObjectInput) (req *request.Request, ou // upload Upload Part - Copy (UploadPartCopy) API. For more information, see // Copy Object Using the REST Multipart Upload API (https://docs.aws.amazon.com/AmazonS3/latest/dev/CopyingObjctsUsingRESTMPUapi.html). // -// All copy requests must be authenticated. Additionally, you must have read -// access to the source object and write access to the destination bucket. For -// more information, see REST Authentication (https://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html). -// Both the Region that you want to copy the object from and the Region that -// you want to copy the object to must be enabled for your account. +// You can copy individual objects between general purpose buckets, between +// directory buckets, and between general purpose buckets and directory buckets. // -// A copy request might return an error when Amazon S3 receives the copy request -// or while Amazon S3 is copying the files. If the error occurs before the copy -// action starts, you receive a standard Amazon S3 error. If the error occurs -// during the copy operation, the error response is embedded in the 200 OK response. -// This means that a 200 OK response can contain either a success or an error. -// If you call the S3 API directly, make sure to design your application to -// parse the contents of the response and handle it appropriately. If you use -// Amazon Web Services SDKs, SDKs handle this condition. The SDKs detect the -// embedded error and apply error handling per your configuration settings (including -// automatically retrying the request as appropriate). If the condition persists, -// the SDKs throws an exception (or, for the SDKs that don't use exceptions, -// they return the error). -// -// If the copy is successful, you receive a response with information about -// the copied object. -// -// If the request is an HTTP 1.1 request, the response is chunk encoded. If -// it were not, it would not contain the content-length, and you would need -// to read the entire body. +// Directory buckets - For directory buckets, you must make requests for this +// API operation to the Zonal endpoint. These endpoints support virtual-hosted-style +// requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name +// . Path-style requests are not supported. For more information, see Regional +// and Zonal endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) +// in the Amazon S3 User Guide. // -// The copy request charge is based on the storage class and Region that you -// specify for the destination object. The request can also result in a data -// retrieval charge for the source if the source storage class bills for data -// retrieval. For pricing information, see Amazon S3 pricing (http://aws.amazon.com/s3/pricing/). +// Both the Region that you want to copy the object from and the Region that +// you want to copy the object to must be enabled for your account. // // Amazon S3 transfer acceleration does not support cross-Region copies. If // you request a cross-Region copy using a transfer acceleration endpoint, you // get a 400 Bad Request error. For more information, see Transfer Acceleration // (https://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html). // -// # Metadata -// -// When copying an object, you can preserve all metadata (the default) or specify -// new metadata. However, the access control list (ACL) is not preserved and -// is set to private for the user making the request. To override the default -// ACL setting, specify a new ACL when generating a copy request. For more information, -// see Using ACLs (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3_ACLs_UsingACLs.html). -// -// To specify whether you want the object metadata copied from the source object -// or replaced with metadata provided in the request, you can optionally add -// the x-amz-metadata-directive header. When you grant permissions, you can -// use the s3:x-amz-metadata-directive condition key to enforce certain metadata -// behavior when objects are uploaded. For more information, see Specifying -// Conditions in a Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/amazon-s3-policy-keys.html) -// in the Amazon S3 User Guide. For a complete list of Amazon S3-specific condition -// keys, see Actions, Resources, and Condition Keys for Amazon S3 (https://docs.aws.amazon.com/AmazonS3/latest/dev/list_amazons3.html). -// -// x-amz-website-redirect-location is unique to each object and must be specified -// in the request headers to copy the value. -// -// x-amz-copy-source-if Headers -// -// To only copy an object under certain conditions, such as whether the Etag -// matches or whether the object was modified before or after a specified date, -// use the following request parameters: -// -// - x-amz-copy-source-if-match -// -// - x-amz-copy-source-if-none-match +// # Authentication and authorization // -// - x-amz-copy-source-if-unmodified-since -// -// - x-amz-copy-source-if-modified-since -// -// If both the x-amz-copy-source-if-match and x-amz-copy-source-if-unmodified-since -// headers are present in the request and evaluate as follows, Amazon S3 returns -// 200 OK and copies the data: -// -// - x-amz-copy-source-if-match condition evaluates to true -// -// - x-amz-copy-source-if-unmodified-since condition evaluates to false -// -// If both the x-amz-copy-source-if-none-match and x-amz-copy-source-if-modified-since -// headers are present in the request and evaluate as follows, Amazon S3 returns -// the 412 Precondition Failed response code: -// -// - x-amz-copy-source-if-none-match condition evaluates to false -// -// - x-amz-copy-source-if-modified-since condition evaluates to true -// -// All headers with the x-amz- prefix, including x-amz-copy-source, must be -// signed. -// -// # Server-side encryption -// -// Amazon S3 automatically encrypts all new objects that are copied to an S3 -// bucket. When copying an object, if you don't specify encryption information -// in your copy request, the encryption setting of the target object is set -// to the default encryption configuration of the destination bucket. By default, -// all buckets have a base level of encryption configuration that uses server-side -// encryption with Amazon S3 managed keys (SSE-S3). If the destination bucket -// has a default encryption configuration that uses server-side encryption with -// Key Management Service (KMS) keys (SSE-KMS), dual-layer server-side encryption -// with Amazon Web Services KMS keys (DSSE-KMS), or server-side encryption with -// customer-provided encryption keys (SSE-C), Amazon S3 uses the corresponding -// KMS key, or a customer-provided key to encrypt the target object copy. -// -// When you perform a CopyObject operation, if you want to use a different type -// of encryption setting for the target object, you can use other appropriate -// encryption-related headers to encrypt the target object with a KMS key, an -// Amazon S3 managed key, or a customer-provided key. With server-side encryption, -// Amazon S3 encrypts your data as it writes your data to disks in its data -// centers and decrypts the data when you access it. If the encryption setting -// in your request is different from the default encryption configuration of -// the destination bucket, the encryption setting in your request takes precedence. -// If the source object for the copy is stored in Amazon S3 using SSE-C, you -// must provide the necessary encryption information in your request so that -// Amazon S3 can decrypt the object for copying. For more information about -// server-side encryption, see Using Server-Side Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/serv-side-encryption.html). -// -// If a target object uses SSE-KMS, you can enable an S3 Bucket Key for the -// object. For more information, see Amazon S3 Bucket Keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-key.html) -// in the Amazon S3 User Guide. -// -// # Access Control List (ACL)-Specific Request Headers -// -// When copying an object, you can optionally use headers to grant ACL-based -// permissions. By default, all objects are private. Only the owner has full -// access control. When adding a new object, you can grant permissions to individual -// Amazon Web Services accounts or to predefined groups that are defined by -// Amazon S3. These permissions are then added to the ACL on the object. For -// more information, see Access Control List (ACL) Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html) -// and Managing ACLs Using the REST API (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-using-rest-api.html). -// -// If the bucket that you're copying objects to uses the bucket owner enforced -// setting for S3 Object Ownership, ACLs are disabled and no longer affect permissions. -// Buckets that use this setting only accept PUT requests that don't specify -// an ACL or PUT requests that specify bucket owner full control ACLs, such -// as the bucket-owner-full-control canned ACL or an equivalent form of this -// ACL expressed in the XML format. -// -// For more information, see Controlling ownership of objects and disabling -// ACLs (https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html) -// in the Amazon S3 User Guide. -// -// If your bucket uses the bucket owner enforced setting for Object Ownership, -// all objects written to the bucket by any account will be owned by the bucket -// owner. +// All CopyObject requests must be authenticated and signed by using IAM credentials +// (access key ID and secret access key for the IAM identities). All headers +// with the x-amz- prefix, including x-amz-copy-source, must be signed. For +// more information, see REST Authentication (https://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html). // -// # Checksums +// Directory buckets - You must use the IAM credentials to authenticate and +// authorize your access to the CopyObject API operation, instead of using the +// temporary security credentials through the CreateSession API operation. // -// When copying an object, if it has a checksum, that checksum will be copied -// to the new object by default. When you copy the object over, you can optionally -// specify a different checksum algorithm to use with the x-amz-checksum-algorithm -// header. +// Amazon Web Services CLI or SDKs handles authentication and authorization +// on your behalf. // -// # Storage Class Options +// # Permissions // -// You can use the CopyObject action to change the storage class of an object -// that is already stored in Amazon S3 by using the StorageClass parameter. -// For more information, see Storage Classes (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html) -// in the Amazon S3 User Guide. +// You must have read access to the source object and write access to the destination +// bucket. // -// If the source object's storage class is GLACIER, you must restore a copy -// of this object before you can use it as a source object for the copy operation. -// For more information, see RestoreObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_RestoreObject.html). -// For more information, see Copying Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/CopyingObjectsExamples.html). +// - General purpose bucket permissions - You must have permissions in an +// IAM policy based on the source and destination bucket types in a CopyObject +// operation. If the source object is in a general purpose bucket, you must +// have s3:GetObject permission to read the source object that is being copied. +// If the destination bucket is a general purpose bucket, you must have s3:PubObject +// permission to write the object copy to the destination bucket. +// +// - Directory bucket permissions - You must have permissions in a bucket +// policy or an IAM identity-based policy based on the source and destination +// bucket types in a CopyObject operation. If the source object that you +// want to copy is in a directory bucket, you must have the s3express:CreateSession +// permission in the Action element of a policy to read the object. By default, +// the session is in the ReadWrite mode. If you want to restrict the access, +// you can explicitly set the s3express:SessionMode condition key to ReadOnly +// on the copy source bucket. If the copy destination is a directory bucket, +// you must have the s3express:CreateSession permission in the Action element +// of a policy to write the object to the destination. The s3express:SessionMode +// condition key can't be set to ReadOnly on the copy destination bucket. +// For example policies, see Example bucket policies for S3 Express One Zone +// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam-example-bucket-policies.html) +// and Amazon Web Services Identity and Access Management (IAM) identity-based +// policies for S3 Express One Zone (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam-identity-policies.html) +// in the Amazon S3 User Guide. // -// # Versioning +// # Response and special errors +// +// When the request is an HTTP 1.1 request, the response is chunk encoded. When +// the request is not an HTTP 1.1 request, the response would not contain the +// Content-Length. You always need to read the entire response body to check +// if the copy succeeds. to keep the connection alive while we copy the data. +// +// - If the copy is successful, you receive a response with information about +// the copied object. +// +// - A copy request might return an error when Amazon S3 receives the copy +// request or while Amazon S3 is copying the files. A 200 OK response can +// contain either a success or an error. If the error occurs before the copy +// action starts, you receive a standard Amazon S3 error. If the error occurs +// during the copy operation, the error response is embedded in the 200 OK +// response. For example, in a cross-region copy, you may encounter throttling +// and receive a 200 OK response. For more information, see Resolve the Error +// 200 response when copying objects to Amazon S3 (repost.aws/knowledge-center/s3-resolve-200-internalerror). +// The 200 OK status code means the copy was accepted, but it doesn't mean +// the copy is complete. Another example is when you disconnect from Amazon +// S3 before the copy is complete, Amazon S3 might cancel the copy and you +// may receive a 200 OK response. You must stay connected to Amazon S3 until +// the entire response is successfully received and processed. If you call +// this API operation directly, make sure to design your application to parse +// the content of the response and handle it appropriately. If you use Amazon +// Web Services SDKs, SDKs handle this condition. The SDKs detect the embedded +// error and apply error handling per your configuration settings (including +// automatically retrying the request as appropriate). If the condition persists, +// the SDKs throw an exception (or, for the SDKs that don't use exceptions, +// they return an error). +// +// # Charge // -// By default, x-amz-copy-source header identifies the current version of an -// object to copy. If the current version is a delete marker, Amazon S3 behaves -// as if the object was deleted. To copy a different version, use the versionId -// subresource. +// The copy request charge is based on the storage class and Region that you +// specify for the destination object. The request can also result in a data +// retrieval charge for the source if the source storage class bills for data +// retrieval. For pricing information, see Amazon S3 pricing (http://aws.amazon.com/s3/pricing/). // -// If you enable versioning on the target bucket, Amazon S3 generates a unique -// version ID for the object being copied. This version ID is different from -// the version ID of the source object. Amazon S3 returns the version ID of -// the copied object in the x-amz-version-id response header in the response. +// # HTTP Host header syntax // -// If you do not enable versioning or suspend it on the target bucket, the version -// ID that Amazon S3 generates is always null. +// Directory buckets - The HTTP Host header syntax is Bucket_name.s3express-az_id.region.amazonaws.com. // // The following operations are related to CopyObject: // @@ -581,77 +566,89 @@ func (c *S3) CreateBucketRequest(input *CreateBucketInput) (req *request.Request // CreateBucket API operation for Amazon Simple Storage Service. // -// Creates a new S3 bucket. To create a bucket, you must register with Amazon -// S3 and have a valid Amazon Web Services Access Key ID to authenticate requests. +// This action creates an Amazon S3 bucket. To create an Amazon S3 on Outposts +// bucket, see CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_CreateBucket.html). +// +// Creates a new S3 bucket. To create a bucket, you must set up Amazon S3 and +// have a valid Amazon Web Services Access Key ID to authenticate requests. // Anonymous requests are never allowed to create buckets. By creating the bucket, // you become the bucket owner. // -// Not every string is an acceptable bucket name. For information about bucket -// naming restrictions, see Bucket naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/bucketnamingrules.html). -// -// If you want to create an Amazon S3 on Outposts bucket, see Create Bucket -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_CreateBucket.html). -// -// By default, the bucket is created in the US East (N. Virginia) Region. You -// can optionally specify a Region in the request body. You might choose a Region -// to optimize latency, minimize costs, or address regulatory requirements. -// For example, if you reside in Europe, you will probably find it advantageous -// to create buckets in the Europe (Ireland) Region. For more information, see -// Accessing a bucket (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html#access-bucket-intro). -// -// If you send your create bucket request to the s3.amazonaws.com endpoint, -// the request goes to the us-east-1 Region. Accordingly, the signature calculations -// in Signature Version 4 must use us-east-1 as the Region, even if the location -// constraint in the request specifies another Region where the bucket is to -// be created. If you create a bucket in a Region other than US East (N. Virginia), -// your application must be able to handle 307 redirect. For more information, -// see Virtual hosting of buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/VirtualHosting.html). +// There are two types of buckets: general purpose buckets and directory buckets. +// For more information about these bucket types, see Creating, configuring, +// and working with Amazon S3 buckets (https://docs.aws.amazon.com/AmazonS3/latest/userguide/creating-buckets-s3.html) +// in the Amazon S3 User Guide. // -// # Permissions +// - General purpose buckets - If you send your CreateBucket request to the +// s3.amazonaws.com global endpoint, the request goes to the us-east-1 Region. +// So the signature calculations in Signature Version 4 must use us-east-1 +// as the Region, even if the location constraint in the request specifies +// another Region where the bucket is to be created. If you create a bucket +// in a Region other than US East (N. Virginia), your application must be +// able to handle 307 redirect. For more information, see Virtual hosting +// of buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/VirtualHosting.html) +// in the Amazon S3 User Guide. // -// In addition to s3:CreateBucket, the following permissions are required when -// your CreateBucket request includes specific headers: -// -// - Access control lists (ACLs) - If your CreateBucket request specifies -// access control list (ACL) permissions and the ACL is public-read, public-read-write, -// authenticated-read, or if you specify access permissions explicitly through -// any other ACL, both s3:CreateBucket and s3:PutBucketAcl permissions are -// needed. If the ACL for the CreateBucket request is private or if the request -// doesn't specify any ACLs, only s3:CreateBucket permission is needed. -// -// - Object Lock - If ObjectLockEnabledForBucket is set to true in your CreateBucket -// request, s3:PutBucketObjectLockConfiguration and s3:PutBucketVersioning -// permissions are required. -// -// - S3 Object Ownership - If your CreateBucket request includes the x-amz-object-ownership -// header, then the s3:PutBucketOwnershipControls permission is required. -// By default, ObjectOwnership is set to BucketOWnerEnforced and ACLs are -// disabled. We recommend keeping ACLs disabled, except in uncommon use cases -// where you must control access for each object individually. If you want -// to change the ObjectOwnership setting, you can use the x-amz-object-ownership -// header in your CreateBucket request to set the ObjectOwnership setting -// of your choice. For more information about S3 Object Ownership, see Controlling -// object ownership (https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html) +// - Directory buckets - For directory buckets, you must make requests for +// this API operation to the Regional endpoint. These endpoints support path-style +// requests in the format https://s3express-control.region_code.amazonaws.com/bucket-name +// . Virtual-hosted-style requests aren't supported. For more information, +// see Regional and Zonal endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) // in the Amazon S3 User Guide. // -// - S3 Block Public Access - If your specific use case requires granting -// public access to your S3 resources, you can disable Block Public Access. -// You can create a new bucket with Block Public Access enabled, then separately -// call the DeletePublicAccessBlock (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeletePublicAccessBlock.html) +// Permissions +// +// - General purpose bucket permissions - In addition to the s3:CreateBucket +// permission, the following permissions are required in a policy when your +// CreateBucket request includes specific headers: Access control lists (ACLs) +// +// - In your CreateBucket request, if you specify an access control list +// (ACL) and set it to public-read, public-read-write, authenticated-read, +// or if you explicitly specify any other custom ACLs, both s3:CreateBucket +// and s3:PutBucketAcl permissions are required. In your CreateBucket request, +// if you set the ACL to private, or if you don't specify any ACLs, only +// the s3:CreateBucket permission is required. Object Lock - In your CreateBucket +// request, if you set x-amz-bucket-object-lock-enabled to true, the s3:PutBucketObjectLockConfiguration +// and s3:PutBucketVersioning permissions are required. S3 Object Ownership +// +// - If your CreateBucket request includes the x-amz-object-ownership header, +// then the s3:PutBucketOwnershipControls permission is required. If your +// CreateBucket request sets BucketOwnerEnforced for Amazon S3 Object Ownership +// and specifies a bucket ACL that provides access to an external Amazon +// Web Services account, your request fails with a 400 error and returns +// the InvalidBucketAcLWithObjectOwnership error code. For more information, +// see Setting Object Ownership on an existing bucket (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-ownership-existing-bucket.html) +// in the Amazon S3 User Guide. S3 Block Public Access - If your specific +// use case requires granting public access to your S3 resources, you can +// disable Block Public Access. Specifically, you can create a new bucket +// with Block Public Access enabled, then separately call the DeletePublicAccessBlock +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeletePublicAccessBlock.html) // API. To use this operation, you must have the s3:PutBucketPublicAccessBlock -// permission. By default, all Block Public Access settings are enabled for -// new buckets. To avoid inadvertent exposure of your resources, we recommend -// keeping the S3 Block Public Access settings enabled. For more information -// about S3 Block Public Access, see Blocking public access to your Amazon -// S3 storage (https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html) +// permission. For more information about S3 Block Public Access, see Blocking +// public access to your Amazon S3 storage (https://docs.aws.amazon.com/AmazonS3/latest/userguide/access-control-block-public-access.html) // in the Amazon S3 User Guide. // -// If your CreateBucket request sets BucketOwnerEnforced for Amazon S3 Object -// Ownership and specifies a bucket ACL that provides access to an external -// Amazon Web Services account, your request fails with a 400 error and returns -// the InvalidBucketAcLWithObjectOwnership error code. For more information, -// see Setting Object Ownership on an existing bucket (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-ownership-existing-bucket.html) -// in the Amazon S3 User Guide. +// - Directory bucket permissions - You must have the s3express:CreateBucket +// permission in an IAM identity-based policy instead of a bucket policy. +// Cross-account access to this API operation isn't supported. This operation +// can only be performed by the Amazon Web Services account that owns the +// resource. For more information about directory bucket policies and permissions, +// see Amazon Web Services Identity and Access Management (IAM) for S3 Express +// One Zone (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam.html) +// in the Amazon S3 User Guide. The permissions for ACLs, Object Lock, S3 +// Object Ownership, and S3 Block Public Access are not supported for directory +// buckets. For directory buckets, all Block Public Access settings are enabled +// at the bucket level and S3 Object Ownership is set to Bucket owner enforced +// (ACLs disabled). These settings can't be modified. For more information +// about permissions for creating and working with directory buckets, see +// Directory buckets (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-buckets-overview.html) +// in the Amazon S3 User Guide. For more information about supported S3 features +// for directory buckets, see Features of S3 Express One Zone (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-one-zone.html#s3-express-features) +// in the Amazon S3 User Guide. +// +// # HTTP Host header syntax +// +// Directory buckets - The HTTP Host header syntax is s3express-control.region.amazonaws.com. // // The following operations are related to CreateBucket: // @@ -749,164 +746,139 @@ func (c *S3) CreateMultipartUploadRequest(input *CreateMultipartUploadInput) (re // You specify this upload ID in each of your subsequent upload part requests // (see UploadPart (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html)). // You also include this upload ID in the final request to either complete or -// abort the multipart upload request. +// abort the multipart upload request. For more information about multipart +// uploads, see Multipart Upload Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html) +// in the Amazon S3 User Guide. // -// For more information about multipart uploads, see Multipart Upload Overview -// (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html). +// After you initiate a multipart upload and upload one or more parts, to stop +// being charged for storing the uploaded parts, you must either complete or +// abort the multipart upload. Amazon S3 frees up the space used to store the +// parts and stops charging you for storing them only after you either complete +// or abort a multipart upload. // // If you have configured a lifecycle rule to abort incomplete multipart uploads, -// the upload must complete within the number of days specified in the bucket -// lifecycle configuration. Otherwise, the incomplete multipart upload becomes -// eligible for an abort action and Amazon S3 aborts the multipart upload. For -// more information, see Aborting Incomplete Multipart Uploads Using a Bucket -// Lifecycle Configuration (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config). +// the created multipart upload must be completed within the number of days +// specified in the bucket lifecycle configuration. Otherwise, the incomplete +// multipart upload becomes eligible for an abort action and Amazon S3 aborts +// the multipart upload. For more information, see Aborting Incomplete Multipart +// Uploads Using a Bucket Lifecycle Configuration (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config). +// +// - Directory buckets - S3 Lifecycle is not supported by directory buckets. +// +// - Directory buckets - For directory buckets, you must make requests for +// this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style +// requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name +// . Path-style requests are not supported. For more information, see Regional +// and Zonal endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) +// in the Amazon S3 User Guide. // -// For information about the permissions required to use the multipart upload -// API, see Multipart Upload and Permissions (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html). +// # Request signing // // For request signing, multipart upload is just a series of regular requests. // You initiate a multipart upload, send one or more requests to upload parts, // and then complete the multipart upload process. You sign each request individually. // There is nothing special about signing multipart upload requests. For more // information about signing, see Authenticating Requests (Amazon Web Services -// Signature Version 4) (https://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html). -// -// After you initiate a multipart upload and upload one or more parts, to stop -// being charged for storing the uploaded parts, you must either complete or -// abort the multipart upload. Amazon S3 frees up the space used to store the -// parts and stop charging you for storing them only after you either complete -// or abort a multipart upload. -// -// Server-side encryption is for data encryption at rest. Amazon S3 encrypts -// your data as it writes it to disks in its data centers and decrypts it when -// you access it. Amazon S3 automatically encrypts all new objects that are -// uploaded to an S3 bucket. When doing a multipart upload, if you don't specify -// encryption information in your request, the encryption setting of the uploaded -// parts is set to the default encryption configuration of the destination bucket. -// By default, all buckets have a base level of encryption configuration that -// uses server-side encryption with Amazon S3 managed keys (SSE-S3). If the -// destination bucket has a default encryption configuration that uses server-side -// encryption with an Key Management Service (KMS) key (SSE-KMS), or a customer-provided -// encryption key (SSE-C), Amazon S3 uses the corresponding KMS key, or a customer-provided -// key to encrypt the uploaded parts. When you perform a CreateMultipartUpload -// operation, if you want to use a different type of encryption setting for -// the uploaded parts, you can request that Amazon S3 encrypts the object with -// a KMS key, an Amazon S3 managed key, or a customer-provided key. If the encryption -// setting in your request is different from the default encryption configuration -// of the destination bucket, the encryption setting in your request takes precedence. -// If you choose to provide your own encryption key, the request headers you -// provide in UploadPart (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) -// and UploadPartCopy (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPartCopy.html) -// requests must match the headers you used in the request to initiate the upload -// by using CreateMultipartUpload. You can request that Amazon S3 save the uploaded -// parts encrypted with server-side encryption with an Amazon S3 managed key -// (SSE-S3), an Key Management Service (KMS) key (SSE-KMS), or a customer-provided -// encryption key (SSE-C). -// -// To perform a multipart upload with encryption by using an Amazon Web Services -// KMS key, the requester must have permission to the kms:Decrypt and kms:GenerateDataKey* -// actions on the key. These permissions are required because Amazon S3 must -// decrypt and read data from the encrypted file parts before it completes the -// multipart upload. For more information, see Multipart upload API and permissions -// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/mpuoverview.html#mpuAndPermissions) -// and Protecting data using server-side encryption with Amazon Web Services -// KMS (https://docs.aws.amazon.com/AmazonS3/latest/userguide/UsingKMSEncryption.html) +// Signature Version 4) (https://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html) // in the Amazon S3 User Guide. // -// If your Identity and Access Management (IAM) user or role is in the same -// Amazon Web Services account as the KMS key, then you must have these permissions -// on the key policy. If your IAM user or role belongs to a different account -// than the key, then you must have the permissions on both the key policy and -// your IAM user or role. -// -// For more information, see Protecting Data Using Server-Side Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/serv-side-encryption.html). -// -// # Access Permissions -// -// When copying an object, you can optionally specify the accounts or groups -// that should be granted specific permissions on the new object. There are -// two ways to grant the permissions using the request headers: -// -// - Specify a canned ACL with the x-amz-acl request header. For more information, -// see Canned ACL (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL). -// -// - Specify access permissions explicitly with the x-amz-grant-read, x-amz-grant-read-acp, -// x-amz-grant-write-acp, and x-amz-grant-full-control headers. These parameters -// map to the set of permissions that Amazon S3 supports in an ACL. For more -// information, see Access Control List (ACL) Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html). -// -// You can use either a canned ACL or specify access permissions explicitly. -// You cannot do both. -// -// # Server-Side- Encryption-Specific Request Headers -// -// Amazon S3 encrypts data by using server-side encryption with an Amazon S3 -// managed key (SSE-S3) by default. Server-side encryption is for data encryption -// at rest. Amazon S3 encrypts your data as it writes it to disks in its data -// centers and decrypts it when you access it. You can request that Amazon S3 -// encrypts data at rest by using server-side encryption with other key options. -// The option you use depends on whether you want to use KMS keys (SSE-KMS) -// or provide your own encryption keys (SSE-C). +// Permissions +// +// - General purpose bucket permissions - For information about the permissions +// required to use the multipart upload API, see Multipart upload and permissions +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html) +// in the Amazon S3 User Guide. To perform a multipart upload with encryption +// by using an Amazon Web Services KMS key, the requester must have permission +// to the kms:Decrypt and kms:GenerateDataKey* actions on the key. These +// permissions are required because Amazon S3 must decrypt and read data +// from the encrypted file parts before it completes the multipart upload. +// For more information, see Multipart upload API and permissions (https://docs.aws.amazon.com/AmazonS3/latest/userguide/mpuoverview.html#mpuAndPermissions) +// and Protecting data using server-side encryption with Amazon Web Services +// KMS (https://docs.aws.amazon.com/AmazonS3/latest/userguide/UsingKMSEncryption.html) +// in the Amazon S3 User Guide. // -// - Use KMS keys (SSE-KMS) that include the Amazon Web Services managed +// - Directory bucket permissions - To grant access to this API operation +// on a directory bucket, we recommend that you use the CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) +// API operation for session-based authorization. Specifically, you grant +// the s3express:CreateSession permission to the directory bucket in a bucket +// policy or an IAM identity-based policy. Then, you make the CreateSession +// API call on the bucket to obtain a session token. With the session token +// in your request header, you can make API requests to this operation. After +// the session token expires, you make another CreateSession API call to +// generate a new session token for use. Amazon Web Services CLI or SDKs +// create session and refresh the session token automatically to avoid service +// interruptions when a session expires. For more information about authorization, +// see CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html). +// +// Encryption +// +// - General purpose buckets - Server-side encryption is for data encryption +// at rest. Amazon S3 encrypts your data as it writes it to disks in its +// data centers and decrypts it when you access it. Amazon S3 automatically +// encrypts all new objects that are uploaded to an S3 bucket. When doing +// a multipart upload, if you don't specify encryption information in your +// request, the encryption setting of the uploaded parts is set to the default +// encryption configuration of the destination bucket. By default, all buckets +// have a base level of encryption configuration that uses server-side encryption +// with Amazon S3 managed keys (SSE-S3). If the destination bucket has a +// default encryption configuration that uses server-side encryption with +// an Key Management Service (KMS) key (SSE-KMS), or a customer-provided +// encryption key (SSE-C), Amazon S3 uses the corresponding KMS key, or a +// customer-provided key to encrypt the uploaded parts. When you perform +// a CreateMultipartUpload operation, if you want to use a different type +// of encryption setting for the uploaded parts, you can request that Amazon +// S3 encrypts the object with a different encryption key (such as an Amazon +// S3 managed key, a KMS key, or a customer-provided key). When the encryption +// setting in your request is different from the default encryption configuration +// of the destination bucket, the encryption setting in your request takes +// precedence. If you choose to provide your own encryption key, the request +// headers you provide in UploadPart (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) +// and UploadPartCopy (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPartCopy.html) +// requests must match the headers you used in the CreateMultipartUpload +// request. Use KMS keys (SSE-KMS) that include the Amazon Web Services managed // key (aws/s3) and KMS customer managed keys stored in Key Management Service // (KMS) – If you want Amazon Web Services to manage the keys used to encrypt // data, specify the following headers in the request. x-amz-server-side-encryption // x-amz-server-side-encryption-aws-kms-key-id x-amz-server-side-encryption-context // If you specify x-amz-server-side-encryption:aws:kms, but don't provide // x-amz-server-side-encryption-aws-kms-key-id, Amazon S3 uses the Amazon -// Web Services managed key (aws/s3 key) in KMS to protect the data. All -// GET and PUT requests for an object protected by KMS fail if you don't -// make them by using Secure Sockets Layer (SSL), Transport Layer Security -// (TLS), or Signature Version 4. For more information about server-side -// encryption with KMS keys (SSE-KMS), see Protecting Data Using Server-Side -// Encryption with KMS keys (https://docs.aws.amazon.com/AmazonS3/latest/userguide/UsingKMSEncryption.html). -// -// - Use customer-provided encryption keys (SSE-C) – If you want to manage -// your own encryption keys, provide all the following headers in the request. -// x-amz-server-side-encryption-customer-algorithm x-amz-server-side-encryption-customer-key -// x-amz-server-side-encryption-customer-key-MD5 For more information about -// server-side encryption with customer-provided encryption keys (SSE-C), -// see Protecting data using server-side encryption with customer-provided -// encryption keys (SSE-C) (https://docs.aws.amazon.com/AmazonS3/latest/userguide/ServerSideEncryptionCustomerKeys.html). -// -// # Access-Control-List (ACL)-Specific Request Headers -// -// You also can use the following access control–related headers with this -// operation. By default, all objects are private. Only the owner has full access -// control. When adding a new object, you can grant permissions to individual -// Amazon Web Services accounts or to predefined groups defined by Amazon S3. -// These permissions are then added to the access control list (ACL) on the -// object. For more information, see Using ACLs (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3_ACLs_UsingACLs.html). -// With this operation, you can grant access permissions using one of the following -// two methods: -// -// - Specify a canned ACL (x-amz-acl) — Amazon S3 supports a set of predefined -// ACLs, known as canned ACLs. Each canned ACL has a predefined set of grantees -// and permissions. For more information, see Canned ACL (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL). -// -// - Specify access permissions explicitly — To explicitly grant access -// permissions to specific Amazon Web Services accounts or groups, use the -// following headers. Each header maps to specific permissions that Amazon -// S3 supports in an ACL. For more information, see Access Control List (ACL) -// Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html). -// In the header, you specify a list of grantees who get the specific permission. -// To grant permissions explicitly, use: x-amz-grant-read x-amz-grant-write -// x-amz-grant-read-acp x-amz-grant-write-acp x-amz-grant-full-control You -// specify each grantee as a type=value pair, where the type is one of the -// following: id – if the value specified is the canonical user ID of an -// Amazon Web Services account uri – if you are granting permissions to -// a predefined group emailAddress – if the value specified is the email -// address of an Amazon Web Services account Using email addresses to specify -// a grantee is only supported in the following Amazon Web Services Regions: -// US East (N. Virginia) US West (N. California) US West (Oregon) Asia Pacific -// (Singapore) Asia Pacific (Sydney) Asia Pacific (Tokyo) Europe (Ireland) -// South America (São Paulo) For a list of all the Amazon S3 supported Regions -// and endpoints, see Regions and Endpoints (https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region) -// in the Amazon Web Services General Reference. For example, the following -// x-amz-grant-read header grants the Amazon Web Services accounts identified -// by account IDs permissions to read object data and its metadata: x-amz-grant-read: -// id="11112222333", id="444455556666" +// Web Services managed key (aws/s3 key) in KMS to protect the data. To perform +// a multipart upload with encryption by using an Amazon Web Services KMS +// key, the requester must have permission to the kms:Decrypt and kms:GenerateDataKey* +// actions on the key. These permissions are required because Amazon S3 must +// decrypt and read data from the encrypted file parts before it completes +// the multipart upload. For more information, see Multipart upload API and +// permissions (https://docs.aws.amazon.com/AmazonS3/latest/userguide/mpuoverview.html#mpuAndPermissions) +// and Protecting data using server-side encryption with Amazon Web Services +// KMS (https://docs.aws.amazon.com/AmazonS3/latest/userguide/UsingKMSEncryption.html) +// in the Amazon S3 User Guide. If your Identity and Access Management (IAM) +// user or role is in the same Amazon Web Services account as the KMS key, +// then you must have these permissions on the key policy. If your IAM user +// or role is in a different account from the key, then you must have the +// permissions on both the key policy and your IAM user or role. All GET +// and PUT requests for an object protected by KMS fail if you don't make +// them by using Secure Sockets Layer (SSL), Transport Layer Security (TLS), +// or Signature Version 4. For information about configuring any of the officially +// supported Amazon Web Services SDKs and Amazon Web Services CLI, see Specifying +// the Signature Version in Request Authentication (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version) +// in the Amazon S3 User Guide. For more information about server-side encryption +// with KMS keys (SSE-KMS), see Protecting Data Using Server-Side Encryption +// with KMS keys (https://docs.aws.amazon.com/AmazonS3/latest/userguide/UsingKMSEncryption.html) +// in the Amazon S3 User Guide. Use customer-provided encryption keys (SSE-C) +// – If you want to manage your own encryption keys, provide all the following +// headers in the request. x-amz-server-side-encryption-customer-algorithm +// x-amz-server-side-encryption-customer-key x-amz-server-side-encryption-customer-key-MD5 +// For more information about server-side encryption with customer-provided +// encryption keys (SSE-C), see Protecting data using server-side encryption +// with customer-provided encryption keys (SSE-C) (https://docs.aws.amazon.com/AmazonS3/latest/userguide/ServerSideEncryptionCustomerKeys.html) +// in the Amazon S3 User Guide. +// +// - Directory buckets -For directory buckets, only server-side encryption +// with Amazon S3 managed keys (SSE-S3) (AES256) is supported. +// +// # HTTP Host header syntax +// +// Directory buckets - The HTTP Host header syntax is Bucket_name.s3express-az_id.region.amazonaws.com. // // The following operations are related to CreateMultipartUpload: // @@ -948,6 +920,152 @@ func (c *S3) CreateMultipartUploadWithContext(ctx aws.Context, input *CreateMult return out, req.Send() } +const opCreateSession = "CreateSession" + +// CreateSessionRequest generates a "aws/request.Request" representing the +// client's request for the CreateSession operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateSession for more information on using the CreateSession +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the CreateSessionRequest method. +// req, resp := client.CreateSessionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CreateSession +func (c *S3) CreateSessionRequest(input *CreateSessionInput) (req *request.Request, output *CreateSessionOutput) { + op := &request.Operation{ + Name: opCreateSession, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?session", + } + + if input == nil { + input = &CreateSessionInput{} + } + + output = &CreateSessionOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreateSession API operation for Amazon Simple Storage Service. +// +// Creates a session that establishes temporary security credentials to support +// fast authentication and authorization for the Zonal endpoint APIs on directory +// buckets. For more information about Zonal endpoint APIs that include the +// Availability Zone in the request endpoint, see S3 Express One Zone APIs (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-APIs.html) +// in the Amazon S3 User Guide. +// +// To make Zonal endpoint API requests on a directory bucket, use the CreateSession +// API operation. Specifically, you grant s3express:CreateSession permission +// to a bucket in a bucket policy or an IAM identity-based policy. Then, you +// use IAM credentials to make the CreateSession API request on the bucket, +// which returns temporary security credentials that include the access key +// ID, secret access key, session token, and expiration. These credentials have +// associated permissions to access the Zonal endpoint APIs. After the session +// is created, you don’t need to use other policies to grant permissions to +// each Zonal endpoint API individually. Instead, in your Zonal endpoint API +// requests, you sign your requests by applying the temporary security credentials +// of the session to the request headers and following the SigV4 protocol for +// authentication. You also apply the session token to the x-amz-s3session-token +// request header for authorization. Temporary security credentials are scoped +// to the bucket and expire after 5 minutes. After the expiration time, any +// calls that you make with those credentials will fail. You must use IAM credentials +// again to make a CreateSession API request that generates a new set of temporary +// credentials for use. Temporary credentials cannot be extended or refreshed +// beyond the original specified interval. +// +// If you use Amazon Web Services SDKs, SDKs handle the session token refreshes +// automatically to avoid service interruptions when a session expires. We recommend +// that you use the Amazon Web Services SDKs to initiate and manage requests +// to the CreateSession API. For more information, see Performance guidelines +// and design patterns (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-optimizing-performance-guidelines-design-patterns.html#s3-express-optimizing-performance-session-authentication) +// in the Amazon S3 User Guide. +// +// - You must make requests for this API operation to the Zonal endpoint. +// These endpoints support virtual-hosted-style requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com. +// Path-style requests are not supported. For more information, see Regional +// and Zonal endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) +// in the Amazon S3 User Guide. +// +// - CopyObject API operation - Unlike other Zonal endpoint APIs, the CopyObject +// API operation doesn't use the temporary security credentials returned +// from the CreateSession API operation for authentication and authorization. +// For information about authentication and authorization of the CopyObject +// API operation on directory buckets, see CopyObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html). +// +// - HeadBucket API operation - Unlike other Zonal endpoint APIs, the HeadBucket +// API operation doesn't use the temporary security credentials returned +// from the CreateSession API operation for authentication and authorization. +// For information about authentication and authorization of the HeadBucket +// API operation on directory buckets, see HeadBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_HeadBucket.html). +// +// # Permissions +// +// To obtain temporary security credentials, you must create a bucket policy +// or an IAM identity-based policy that grants s3express:CreateSession permission +// to the bucket. In a policy, you can have the s3express:SessionMode condition +// key to control who can create a ReadWrite or ReadOnly session. For more information +// about ReadWrite or ReadOnly sessions, see x-amz-create-session-mode (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html#API_CreateSession_RequestParameters). +// For example policies, see Example bucket policies for S3 Express One Zone +// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam-example-bucket-policies.html) +// and Amazon Web Services Identity and Access Management (IAM) identity-based +// policies for S3 Express One Zone (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam-identity-policies.html) +// in the Amazon S3 User Guide. +// +// To grant cross-account access to Zonal endpoint APIs, the bucket policy should +// also grant both accounts the s3express:CreateSession permission. +// +// # HTTP Host header syntax +// +// Directory buckets - The HTTP Host header syntax is Bucket_name.s3express-az_id.region.amazonaws.com. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation CreateSession for usage and error information. +// +// Returned Error Codes: +// - ErrCodeNoSuchBucket "NoSuchBucket" +// The specified bucket does not exist. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CreateSession +func (c *S3) CreateSession(input *CreateSessionInput) (*CreateSessionOutput, error) { + req, out := c.CreateSessionRequest(input) + return out, req.Send() +} + +// CreateSessionWithContext is the same as CreateSession with the addition of +// the ability to pass a context and additional request options. +// +// See CreateSession for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) CreateSessionWithContext(ctx aws.Context, input *CreateSessionInput, opts ...request.Option) (*CreateSessionOutput, error) { + req, out := c.CreateSessionRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opDeleteBucket = "DeleteBucket" // DeleteBucketRequest generates a "aws/request.Request" representing the @@ -995,6 +1113,35 @@ func (c *S3) DeleteBucketRequest(input *DeleteBucketInput) (req *request.Request // Deletes the S3 bucket. All objects (including all object versions and delete // markers) in the bucket must be deleted before the bucket itself can be deleted. // +// - Directory buckets - If multipart uploads in a directory bucket are in +// progress, you can't delete the bucket until all the in-progress multipart +// uploads are aborted or completed. +// +// - Directory buckets - For directory buckets, you must make requests for +// this API operation to the Regional endpoint. These endpoints support path-style +// requests in the format https://s3express-control.region_code.amazonaws.com/bucket-name +// . Virtual-hosted-style requests aren't supported. For more information, +// see Regional and Zonal endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) +// in the Amazon S3 User Guide. +// +// Permissions +// +// - General purpose bucket permissions - You must have the s3:DeleteBucket +// permission on the specified bucket in a policy. +// +// - Directory bucket permissions - You must have the s3express:DeleteBucket +// permission in an IAM identity-based policy instead of a bucket policy. +// Cross-account access to this API operation isn't supported. This operation +// can only be performed by the Amazon Web Services account that owns the +// resource. For more information about directory bucket policies and permissions, +// see Amazon Web Services Identity and Access Management (IAM) for S3 Express +// One Zone (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam.html) +// in the Amazon S3 User Guide. +// +// # HTTP Host header syntax +// +// Directory buckets - The HTTP Host header syntax is s3express-control.region.amazonaws.com. +// // The following operations are related to DeleteBucket: // // - CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html) @@ -1073,6 +1220,8 @@ func (c *S3) DeleteBucketAnalyticsConfigurationRequest(input *DeleteBucketAnalyt // DeleteBucketAnalyticsConfiguration API operation for Amazon Simple Storage Service. // +// This operation is not supported by directory buckets. +// // Deletes an analytics configuration for the bucket (specified by the analytics // configuration ID). // @@ -1165,6 +1314,8 @@ func (c *S3) DeleteBucketCorsRequest(input *DeleteBucketCorsInput) (req *request // DeleteBucketCors API operation for Amazon Simple Storage Service. // +// This operation is not supported by directory buckets. +// // Deletes the cors configuration information set for the bucket. // // To use this operation, you must have permission to perform the s3:PutBucketCORS @@ -1252,6 +1403,8 @@ func (c *S3) DeleteBucketEncryptionRequest(input *DeleteBucketEncryptionInput) ( // DeleteBucketEncryption API operation for Amazon Simple Storage Service. // +// This operation is not supported by directory buckets. +// // This implementation of the DELETE action resets the default encryption for // the bucket as server-side encryption with Amazon S3 managed keys (SSE-S3). // For information about the bucket default encryption feature, see Amazon S3 @@ -1343,6 +1496,8 @@ func (c *S3) DeleteBucketIntelligentTieringConfigurationRequest(input *DeleteBuc // DeleteBucketIntelligentTieringConfiguration API operation for Amazon Simple Storage Service. // +// This operation is not supported by directory buckets. +// // Deletes the S3 Intelligent-Tiering configuration from the specified bucket. // // The S3 Intelligent-Tiering storage class is designed to optimize storage @@ -1442,6 +1597,8 @@ func (c *S3) DeleteBucketInventoryConfigurationRequest(input *DeleteBucketInvent // DeleteBucketInventoryConfiguration API operation for Amazon Simple Storage Service. // +// This operation is not supported by directory buckets. +// // Deletes an inventory configuration (identified by the inventory ID) from // the bucket. // @@ -1534,6 +1691,8 @@ func (c *S3) DeleteBucketLifecycleRequest(input *DeleteBucketLifecycleInput) (re // DeleteBucketLifecycle API operation for Amazon Simple Storage Service. // +// This operation is not supported by directory buckets. +// // Deletes the lifecycle configuration from the specified bucket. Amazon S3 // removes all the lifecycle configuration rules in the lifecycle subresource // associated with the bucket. Your objects never expire, and Amazon S3 no longer @@ -1628,6 +1787,8 @@ func (c *S3) DeleteBucketMetricsConfigurationRequest(input *DeleteBucketMetricsC // DeleteBucketMetricsConfiguration API operation for Amazon Simple Storage Service. // +// This operation is not supported by directory buckets. +// // Deletes a metrics configuration for the Amazon CloudWatch request metrics // (specified by the metrics configuration ID) from the bucket. Note that this // doesn't include the daily storage metrics. @@ -1723,6 +1884,8 @@ func (c *S3) DeleteBucketOwnershipControlsRequest(input *DeleteBucketOwnershipCo // DeleteBucketOwnershipControls API operation for Amazon Simple Storage Service. // +// This operation is not supported by directory buckets. +// // Removes OwnershipControls for an Amazon S3 bucket. To use this operation, // you must have the s3:PutBucketOwnershipControls permission. For more information // about Amazon S3 permissions, see Specifying Permissions in a Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html). @@ -1808,11 +1971,21 @@ func (c *S3) DeleteBucketPolicyRequest(input *DeleteBucketPolicyInput) (req *req // DeleteBucketPolicy API operation for Amazon Simple Storage Service. // -// This implementation of the DELETE action uses the policy subresource to delete -// the policy of a specified bucket. If you are using an identity other than -// the root user of the Amazon Web Services account that owns the bucket, the -// calling identity must have the DeleteBucketPolicy permissions on the specified -// bucket and belong to the bucket owner's account to use this operation. +// Deletes the policy of a specified bucket. +// +// Directory buckets - For directory buckets, you must make requests for this +// API operation to the Regional endpoint. These endpoints support path-style +// requests in the format https://s3express-control.region_code.amazonaws.com/bucket-name +// . Virtual-hosted-style requests aren't supported. For more information, see +// Regional and Zonal endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) +// in the Amazon S3 User Guide. +// +// # Permissions +// +// If you are using an identity other than the root user of the Amazon Web Services +// account that owns the bucket, the calling identity must both have the DeleteBucketPolicy +// permissions on the specified bucket and belong to the bucket owner's account +// in order to use this operation. // // If you don't have DeleteBucketPolicy permissions, Amazon S3 returns a 403 // Access Denied error. If you have the correct permissions, but you're not @@ -1827,8 +2000,23 @@ func (c *S3) DeleteBucketPolicyRequest(input *DeleteBucketPolicyInput) (req *req // these API actions by VPC endpoint policies and Amazon Web Services Organizations // policies. // -// For more information about bucket policies, see Using Bucket Policies and -// UserPolicies (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-iam-policies.html). +// - General purpose bucket permissions - The s3:DeleteBucketPolicy permission +// is required in a policy. For more information about general purpose buckets +// bucket policies, see Using Bucket Policies and User Policies (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-iam-policies.html) +// in the Amazon S3 User Guide. +// +// - Directory bucket permissions - To grant access to this API operation, +// you must have the s3express:DeleteBucketPolicy permission in an IAM identity-based +// policy instead of a bucket policy. Cross-account access to this API operation +// isn't supported. This operation can only be performed by the Amazon Web +// Services account that owns the resource. For more information about directory +// bucket policies and permissions, see Amazon Web Services Identity and +// Access Management (IAM) for S3 Express One Zone (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam.html) +// in the Amazon S3 User Guide. +// +// # HTTP Host header syntax +// +// Directory buckets - The HTTP Host header syntax is s3express-control.region.amazonaws.com. // // The following operations are related to DeleteBucketPolicy // @@ -1908,6 +2096,8 @@ func (c *S3) DeleteBucketReplicationRequest(input *DeleteBucketReplicationInput) // DeleteBucketReplication API operation for Amazon Simple Storage Service. // +// This operation is not supported by directory buckets. +// // Deletes the replication configuration from the bucket. // // To use this operation, you must have permissions to perform the s3:PutReplicationConfiguration @@ -2000,6 +2190,8 @@ func (c *S3) DeleteBucketTaggingRequest(input *DeleteBucketTaggingInput) (req *r // DeleteBucketTagging API operation for Amazon Simple Storage Service. // +// This operation is not supported by directory buckets. +// // Deletes the tags from the bucket. // // To use this operation, you must have permission to perform the s3:PutBucketTagging @@ -2084,6 +2276,8 @@ func (c *S3) DeleteBucketWebsiteRequest(input *DeleteBucketWebsiteInput) (req *r // DeleteBucketWebsite API operation for Amazon Simple Storage Service. // +// This operation is not supported by directory buckets. +// // This action removes the website configuration for a bucket. Amazon S3 returns // a 200 OK response upon successfully deleting a website configuration on the // specified bucket. You will get a 200 OK response if the website configuration @@ -2176,31 +2370,79 @@ func (c *S3) DeleteObjectRequest(input *DeleteObjectInput) (req *request.Request // DeleteObject API operation for Amazon Simple Storage Service. // -// Removes the null version (if there is one) of an object and inserts a delete -// marker, which becomes the latest version of the object. If there isn't a -// null version, Amazon S3 does not remove any objects but will still respond -// that the command was successful. +// Removes an object from a bucket. The behavior depends on the bucket's versioning +// state: // -// To remove a specific version, you must use the version Id subresource. Using -// this subresource permanently deletes the version. If the object deleted is -// a delete marker, Amazon S3 sets the response header, x-amz-delete-marker, +// - If versioning is enabled, the operation removes the null version (if +// there is one) of an object and inserts a delete marker, which becomes +// the latest version of the object. If there isn't a null version, Amazon +// S3 does not remove any objects but will still respond that the command +// was successful. +// +// - If versioning is suspended or not enabled, the operation permanently +// deletes the object. +// +// - Directory buckets - S3 Versioning isn't enabled and supported for directory +// buckets. For this API operation, only the null value of the version ID +// is supported by directory buckets. You can only specify null to the versionId +// query parameter in the request. +// +// - Directory buckets - For directory buckets, you must make requests for +// this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style +// requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name +// . Path-style requests are not supported. For more information, see Regional +// and Zonal endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) +// in the Amazon S3 User Guide. +// +// To remove a specific version, you must use the versionId query parameter. +// Using this query parameter permanently deletes the version. If the object +// deleted is a delete marker, Amazon S3 sets the response header x-amz-delete-marker // to true. // // If the object you want to delete is in a bucket where the bucket versioning // configuration is MFA Delete enabled, you must include the x-amz-mfa request // header in the DELETE versionId request. Requests that include x-amz-mfa must -// use HTTPS. +// use HTTPS. For more information about MFA Delete, see Using MFA Delete (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMFADelete.html) +// in the Amazon S3 User Guide. To see sample requests that use versioning, +// see Sample Request (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectDELETE.html#ExampleVersionObjectDelete). // -// For more information about MFA Delete, see Using MFA Delete (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMFADelete.html). -// To see sample requests that use versioning, see Sample Request (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectDELETE.html#ExampleVersionObjectDelete). +// Directory buckets - MFA delete is not supported by directory buckets. // -// You can delete objects by explicitly calling DELETE Object or configure its -// lifecycle (PutBucketLifecycle (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycle.html)) +// You can delete objects by explicitly calling DELETE Object or calling (PutBucketLifecycle +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycle.html)) // to enable Amazon S3 to remove them for you. If you want to block users or // accounts from removing or deleting objects from your bucket, you must deny // them the s3:DeleteObject, s3:DeleteObjectVersion, and s3:PutLifeCycleConfiguration // actions. // +// Directory buckets - S3 Lifecycle is not supported by directory buckets. +// +// Permissions +// +// - General purpose bucket permissions - The following permissions are required +// in your policies when your DeleteObjects request includes specific headers. +// s3:DeleteObject - To delete an object from a bucket, you must always have +// the s3:DeleteObject permission. s3:DeleteObjectVersion - To delete a specific +// version of an object from a versiong-enabled bucket, you must have the +// s3:DeleteObjectVersion permission. +// +// - Directory bucket permissions - To grant access to this API operation +// on a directory bucket, we recommend that you use the CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) +// API operation for session-based authorization. Specifically, you grant +// the s3express:CreateSession permission to the directory bucket in a bucket +// policy or an IAM identity-based policy. Then, you make the CreateSession +// API call on the bucket to obtain a session token. With the session token +// in your request header, you can make API requests to this operation. After +// the session token expires, you make another CreateSession API call to +// generate a new session token for use. Amazon Web Services CLI or SDKs +// create session and refresh the session token automatically to avoid service +// interruptions when a session expires. For more information about authorization, +// see CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html). +// +// # HTTP Host header syntax +// +// Directory buckets - The HTTP Host header syntax is Bucket_name.s3express-az_id.region.amazonaws.com. +// // The following action is related to DeleteObject: // // - PutObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html) @@ -2276,6 +2518,8 @@ func (c *S3) DeleteObjectTaggingRequest(input *DeleteObjectTaggingInput) (req *r // DeleteObjectTagging API operation for Amazon Simple Storage Service. // +// This operation is not supported by directory buckets. +// // Removes the entire tag set from the specified object. For more information // about managing object tags, see Object Tagging (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-tagging.html). // @@ -2367,36 +2611,82 @@ func (c *S3) DeleteObjectsRequest(input *DeleteObjectsInput) (req *request.Reque // DeleteObjects API operation for Amazon Simple Storage Service. // -// This action enables you to delete multiple objects from a bucket using a -// single HTTP request. If you know the object keys that you want to delete, -// then this action provides a suitable alternative to sending individual delete -// requests, reducing per-request overhead. +// This operation enables you to delete multiple objects from a bucket using +// a single HTTP request. If you know the object keys that you want to delete, +// then this operation provides a suitable alternative to sending individual +// delete requests, reducing per-request overhead. // -// The request contains a list of up to 1000 keys that you want to delete. In -// the XML, you provide the object key names, and optionally, version IDs if -// you want to delete a specific version of the object from a versioning-enabled -// bucket. For each key, Amazon S3 performs a delete action and returns the -// result of that delete, success, or failure, in the response. Note that if +// The request can contain a list of up to 1000 keys that you want to delete. +// In the XML, you provide the object key names, and optionally, version IDs +// if you want to delete a specific version of the object from a versioning-enabled +// bucket. For each key, Amazon S3 performs a delete operation and returns the +// result of that delete, success or failure, in the response. Note that if // the object specified in the request is not found, Amazon S3 returns the result // as deleted. // -// The action supports two modes for the response: verbose and quiet. By default, -// the action uses verbose mode in which the response includes the result of -// deletion of each key in your request. In quiet mode the response includes -// only keys where the delete action encountered an error. For a successful -// deletion, the action does not return any information about the delete in -// the response body. +// - Directory buckets - S3 Versioning isn't enabled and supported for directory +// buckets. +// +// - Directory buckets - For directory buckets, you must make requests for +// this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style +// requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name +// . Path-style requests are not supported. For more information, see Regional +// and Zonal endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) +// in the Amazon S3 User Guide. +// +// The operation supports two modes for the response: verbose and quiet. By +// default, the operation uses verbose mode in which the response includes the +// result of deletion of each key in your request. In quiet mode the response +// includes only keys where the delete operation encountered an error. For a +// successful deletion in a quiet mode, the operation does not return any information +// about the delete in the response body. // // When performing this action on an MFA Delete enabled bucket, that attempts // to delete any versioned objects, you must include an MFA token. If you do // not provide one, the entire request will fail, even if there are non-versioned // objects you are trying to delete. If you provide an invalid token, whether // there are versioned keys in the request or not, the entire Multi-Object Delete -// request will fail. For information about MFA Delete, see MFA Delete (https://docs.aws.amazon.com/AmazonS3/latest/dev/Versioning.html#MultiFactorAuthenticationDelete). +// request will fail. For information about MFA Delete, see MFA Delete (https://docs.aws.amazon.com/AmazonS3/latest/dev/Versioning.html#MultiFactorAuthenticationDelete) +// in the Amazon S3 User Guide. +// +// Directory buckets - MFA delete is not supported by directory buckets. +// +// Permissions +// +// - General purpose bucket permissions - The following permissions are required +// in your policies when your DeleteObjects request includes specific headers. +// s3:DeleteObject - To delete an object from a bucket, you must always specify +// the s3:DeleteObject permission. s3:DeleteObjectVersion - To delete a specific +// version of an object from a versiong-enabled bucket, you must specify +// the s3:DeleteObjectVersion permission. +// +// - Directory bucket permissions - To grant access to this API operation +// on a directory bucket, we recommend that you use the CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) +// API operation for session-based authorization. Specifically, you grant +// the s3express:CreateSession permission to the directory bucket in a bucket +// policy or an IAM identity-based policy. Then, you make the CreateSession +// API call on the bucket to obtain a session token. With the session token +// in your request header, you can make API requests to this operation. After +// the session token expires, you make another CreateSession API call to +// generate a new session token for use. Amazon Web Services CLI or SDKs +// create session and refresh the session token automatically to avoid service +// interruptions when a session expires. For more information about authorization, +// see CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html). +// +// Content-MD5 request header +// +// - General purpose bucket - The Content-MD5 request header is required +// for all Multi-Object Delete requests. Amazon S3 uses the header value +// to ensure that your request body has not been altered in transit. +// +// - Directory bucket - The Content-MD5 request header or a additional checksum +// request header (including x-amz-checksum-crc32, x-amz-checksum-crc32c, +// x-amz-checksum-sha1, or x-amz-checksum-sha256) is required for all Multi-Object +// Delete requests. +// +// # HTTP Host header syntax // -// Finally, the Content-MD5 header is required for all Multi-Object Delete requests. -// Amazon S3 uses the header value to ensure that your request body has not -// been altered in transit. +// Directory buckets - The HTTP Host header syntax is Bucket_name.s3express-az_id.region.amazonaws.com. // // The following operations are related to DeleteObjects: // @@ -2482,6 +2772,8 @@ func (c *S3) DeletePublicAccessBlockRequest(input *DeletePublicAccessBlockInput) // DeletePublicAccessBlock API operation for Amazon Simple Storage Service. // +// This operation is not supported by directory buckets. +// // Removes the PublicAccessBlock configuration for an Amazon S3 bucket. To use // this operation, you must have the s3:PutBucketPublicAccessBlock permission. // For more information about permissions, see Permissions Related to Bucket @@ -2569,6 +2861,8 @@ func (c *S3) GetBucketAccelerateConfigurationRequest(input *GetBucketAccelerateC // GetBucketAccelerateConfiguration API operation for Amazon Simple Storage Service. // +// This operation is not supported by directory buckets. +// // This implementation of the GET action uses the accelerate subresource to // return the Transfer Acceleration state of a bucket, which is either Enabled // or Suspended. Amazon S3 Transfer Acceleration is a bucket-level feature that @@ -2668,16 +2962,18 @@ func (c *S3) GetBucketAclRequest(input *GetBucketAclInput) (req *request.Request // GetBucketAcl API operation for Amazon Simple Storage Service. // +// This operation is not supported by directory buckets. +// // This implementation of the GET action uses the acl subresource to return // the access control list (ACL) of a bucket. To use GET to return the ACL of -// the bucket, you must have READ_ACP access to the bucket. If READ_ACP permission -// is granted to the anonymous user, you can return the ACL of the bucket without -// using an authorization header. +// the bucket, you must have the READ_ACP access to the bucket. If READ_ACP +// permission is granted to the anonymous user, you can return the ACL of the +// bucket without using an authorization header. // -// To use this API operation against an access point, provide the alias of the -// access point in place of the bucket name. +// When you use this API operation with an access point, provide the alias of +// the access point in place of the bucket name. // -// To use this API operation against an Object Lambda access point, provide +// When you use this API operation with an Object Lambda access point, provide // the alias of the Object Lambda access point in place of the bucket name. // If the Object Lambda access point alias in a request is not valid, the error // code InvalidAccessPointAliasError is returned. For more information about @@ -2764,6 +3060,8 @@ func (c *S3) GetBucketAnalyticsConfigurationRequest(input *GetBucketAnalyticsCon // GetBucketAnalyticsConfiguration API operation for Amazon Simple Storage Service. // +// This operation is not supported by directory buckets. +// // This implementation of the GET action returns an analytics configuration // (identified by the analytics configuration ID) from the bucket. // @@ -2857,6 +3155,8 @@ func (c *S3) GetBucketCorsRequest(input *GetBucketCorsInput) (req *request.Reque // GetBucketCors API operation for Amazon Simple Storage Service. // +// This operation is not supported by directory buckets. +// // Returns the Cross-Origin Resource Sharing (CORS) configuration information // set for the bucket. // @@ -2864,10 +3164,10 @@ func (c *S3) GetBucketCorsRequest(input *GetBucketCorsInput) (req *request.Reque // action. By default, the bucket owner has this permission and can grant it // to others. // -// To use this API operation against an access point, provide the alias of the -// access point in place of the bucket name. +// When you use this API operation with an access point, provide the alias of +// the access point in place of the bucket name. // -// To use this API operation against an Object Lambda access point, provide +// When you use this API operation with an Object Lambda access point, provide // the alias of the Object Lambda access point in place of the bucket name. // If the Object Lambda access point alias in a request is not valid, the error // code InvalidAccessPointAliasError is returned. For more information about @@ -2953,6 +3253,8 @@ func (c *S3) GetBucketEncryptionRequest(input *GetBucketEncryptionInput) (req *r // GetBucketEncryption API operation for Amazon Simple Storage Service. // +// This operation is not supported by directory buckets. +// // Returns the default encryption configuration for an Amazon S3 bucket. By // default, all buckets have a default encryption configuration that uses server-side // encryption with Amazon S3 managed keys (SSE-S3). For information about the @@ -3043,6 +3345,8 @@ func (c *S3) GetBucketIntelligentTieringConfigurationRequest(input *GetBucketInt // GetBucketIntelligentTieringConfiguration API operation for Amazon Simple Storage Service. // +// This operation is not supported by directory buckets. +// // Gets the S3 Intelligent-Tiering configuration from the specified bucket. // // The S3 Intelligent-Tiering storage class is designed to optimize storage @@ -3141,6 +3445,8 @@ func (c *S3) GetBucketInventoryConfigurationRequest(input *GetBucketInventoryCon // GetBucketInventoryConfiguration API operation for Amazon Simple Storage Service. // +// This operation is not supported by directory buckets. +// // Returns an inventory configuration (identified by the inventory configuration // ID) from the bucket. // @@ -3242,6 +3548,8 @@ func (c *S3) GetBucketLifecycleRequest(input *GetBucketLifecycleInput) (req *req // see the updated version of this topic. This topic is provided for backward // compatibility. // +// This operation is not supported by directory buckets. +// // Returns the lifecycle configuration information set on the bucket. For information // about lifecycle configuration, see Object Lifecycle Management (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html). // @@ -3340,6 +3648,8 @@ func (c *S3) GetBucketLifecycleConfigurationRequest(input *GetBucketLifecycleCon // GetBucketLifecycleConfiguration API operation for Amazon Simple Storage Service. // +// This operation is not supported by directory buckets. +// // Bucket lifecycle configuration now supports specifying a lifecycle rule using // an object key name prefix, one or more object tags, or a combination of both. // Accordingly, this section describes the latest API. The response describes @@ -3442,14 +3752,16 @@ func (c *S3) GetBucketLocationRequest(input *GetBucketLocationInput) (req *reque // GetBucketLocation API operation for Amazon Simple Storage Service. // +// This operation is not supported by directory buckets. +// // Returns the Region the bucket resides in. You set the bucket's Region using // the LocationConstraint request parameter in a CreateBucket request. For more // information, see CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html). // -// To use this API operation against an access point, provide the alias of the -// access point in place of the bucket name. +// When you use this API operation with an access point, provide the alias of +// the access point in place of the bucket name. // -// To use this API operation against an Object Lambda access point, provide +// When you use this API operation with an Object Lambda access point, provide // the alias of the Object Lambda access point in place of the bucket name. // If the Object Lambda access point alias in a request is not valid, the error // code InvalidAccessPointAliasError is returned. For more information about @@ -3536,6 +3848,8 @@ func (c *S3) GetBucketLoggingRequest(input *GetBucketLoggingInput) (req *request // GetBucketLogging API operation for Amazon Simple Storage Service. // +// This operation is not supported by directory buckets. +// // Returns the logging status of a bucket and the permissions users have to // view and modify that status. // @@ -3616,6 +3930,8 @@ func (c *S3) GetBucketMetricsConfigurationRequest(input *GetBucketMetricsConfigu // GetBucketMetricsConfiguration API operation for Amazon Simple Storage Service. // +// This operation is not supported by directory buckets. +// // Gets a metrics configuration (specified by the metrics configuration ID) // from the bucket. Note that this doesn't include the daily storage metrics. // @@ -3714,6 +4030,8 @@ func (c *S3) GetBucketNotificationRequest(input *GetBucketNotificationConfigurat // GetBucketNotification API operation for Amazon Simple Storage Service. // +// This operation is not supported by directory buckets. +// // No longer used, see GetBucketNotificationConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketNotificationConfiguration.html). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -3791,6 +4109,8 @@ func (c *S3) GetBucketNotificationConfigurationRequest(input *GetBucketNotificat // GetBucketNotificationConfiguration API operation for Amazon Simple Storage Service. // +// This operation is not supported by directory buckets. +// // Returns the notification configuration of a bucket. // // If notifications are not enabled on the bucket, the action returns an empty @@ -3801,10 +4121,10 @@ func (c *S3) GetBucketNotificationConfigurationRequest(input *GetBucketNotificat // to other users to read this configuration with the s3:GetBucketNotification // permission. // -// To use this API operation against an access point, provide the alias of the -// access point in place of the bucket name. +// When you use this API operation with an access point, provide the alias of +// the access point in place of the bucket name. // -// To use this API operation against an Object Lambda access point, provide +// When you use this API operation with an Object Lambda access point, provide // the alias of the Object Lambda access point in place of the bucket name. // If the Object Lambda access point alias in a request is not valid, the error // code InvalidAccessPointAliasError is returned. For more information about @@ -3889,6 +4209,8 @@ func (c *S3) GetBucketOwnershipControlsRequest(input *GetBucketOwnershipControls // GetBucketOwnershipControls API operation for Amazon Simple Storage Service. // +// This operation is not supported by directory buckets. +// // Retrieves OwnershipControls for an Amazon S3 bucket. To use this operation, // you must have the s3:GetBucketOwnershipControls permission. For more information // about Amazon S3 permissions, see Specifying permissions in a policy (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html). @@ -3973,10 +4295,21 @@ func (c *S3) GetBucketPolicyRequest(input *GetBucketPolicyInput) (req *request.R // GetBucketPolicy API operation for Amazon Simple Storage Service. // -// Returns the policy of a specified bucket. If you are using an identity other -// than the root user of the Amazon Web Services account that owns the bucket, -// the calling identity must have the GetBucketPolicy permissions on the specified -// bucket and belong to the bucket owner's account in order to use this operation. +// Returns the policy of a specified bucket. +// +// Directory buckets - For directory buckets, you must make requests for this +// API operation to the Regional endpoint. These endpoints support path-style +// requests in the format https://s3express-control.region_code.amazonaws.com/bucket-name +// . Virtual-hosted-style requests aren't supported. For more information, see +// Regional and Zonal endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) +// in the Amazon S3 User Guide. +// +// # Permissions +// +// If you are using an identity other than the root user of the Amazon Web Services +// account that owns the bucket, the calling identity must both have the GetBucketPolicy +// permissions on the specified bucket and belong to the bucket owner's account +// in order to use this operation. // // If you don't have GetBucketPolicy permissions, Amazon S3 returns a 403 Access // Denied error. If you have the correct permissions, but you're not using an @@ -3991,17 +4324,33 @@ func (c *S3) GetBucketPolicyRequest(input *GetBucketPolicyInput) (req *request.R // these API actions by VPC endpoint policies and Amazon Web Services Organizations // policies. // -// To use this API operation against an access point, provide the alias of the -// access point in place of the bucket name. +// - General purpose bucket permissions - The s3:GetBucketPolicy permission +// is required in a policy. For more information about general purpose buckets +// bucket policies, see Using Bucket Policies and User Policies (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-iam-policies.html) +// in the Amazon S3 User Guide. // -// To use this API operation against an Object Lambda access point, provide -// the alias of the Object Lambda access point in place of the bucket name. -// If the Object Lambda access point alias in a request is not valid, the error -// code InvalidAccessPointAliasError is returned. For more information about -// InvalidAccessPointAliasError, see List of Error Codes (https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList). +// - Directory bucket permissions - To grant access to this API operation, +// you must have the s3express:GetBucketPolicy permission in an IAM identity-based +// policy instead of a bucket policy. Cross-account access to this API operation +// isn't supported. This operation can only be performed by the Amazon Web +// Services account that owns the resource. For more information about directory +// bucket policies and permissions, see Amazon Web Services Identity and +// Access Management (IAM) for S3 Express One Zone (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam.html) +// in the Amazon S3 User Guide. +// +// # Example bucket policies +// +// General purpose buckets example bucket policies - See Bucket policy examples +// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/example-bucket-policies.html) +// in the Amazon S3 User Guide. +// +// Directory bucket example bucket policies - See Example bucket policies for +// S3 Express One Zone (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam-example-bucket-policies.html) +// in the Amazon S3 User Guide. +// +// # HTTP Host header syntax // -// For more information about bucket policies, see Using Bucket Policies and -// User Policies (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-iam-policies.html). +// Directory buckets - The HTTP Host header syntax is s3express-control.region.amazonaws.com. // // The following action is related to GetBucketPolicy: // @@ -4078,6 +4427,8 @@ func (c *S3) GetBucketPolicyStatusRequest(input *GetBucketPolicyStatusInput) (re // GetBucketPolicyStatus API operation for Amazon Simple Storage Service. // +// This operation is not supported by directory buckets. +// // Retrieves the policy status for an Amazon S3 bucket, indicating whether the // bucket is public. In order to use this operation, you must have the s3:GetBucketPolicyStatus // permission. For more information about Amazon S3 permissions, see Specifying @@ -4167,6 +4518,8 @@ func (c *S3) GetBucketReplicationRequest(input *GetBucketReplicationInput) (req // GetBucketReplication API operation for Amazon Simple Storage Service. // +// This operation is not supported by directory buckets. +// // Returns the replication configuration of a bucket. // // It can take a while to propagate the put or delete a replication configuration @@ -4264,6 +4617,8 @@ func (c *S3) GetBucketRequestPaymentRequest(input *GetBucketRequestPaymentInput) // GetBucketRequestPayment API operation for Amazon Simple Storage Service. // +// This operation is not supported by directory buckets. +// // Returns the request payment configuration of a bucket. To use this version // of the operation, you must be the bucket owner. For more information, see // Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/RequesterPaysBuckets.html). @@ -4343,6 +4698,8 @@ func (c *S3) GetBucketTaggingRequest(input *GetBucketTaggingInput) (req *request // GetBucketTagging API operation for Amazon Simple Storage Service. // +// This operation is not supported by directory buckets. +// // Returns the tag set associated with the bucket. // // To use this operation, you must have permission to perform the s3:GetBucketTagging @@ -4431,6 +4788,8 @@ func (c *S3) GetBucketVersioningRequest(input *GetBucketVersioningInput) (req *r // GetBucketVersioning API operation for Amazon Simple Storage Service. // +// This operation is not supported by directory buckets. +// // Returns the versioning state of a bucket. // // To retrieve the versioning state of a bucket, you must be the bucket owner. @@ -4518,6 +4877,8 @@ func (c *S3) GetBucketWebsiteRequest(input *GetBucketWebsiteInput) (req *request // GetBucketWebsite API operation for Amazon Simple Storage Service. // +// This operation is not supported by directory buckets. +// // Returns the website configuration for a bucket. To host website on Amazon // S3, you can configure a bucket as website by adding a website configuration. // For more information about hosting websites, see Hosting Websites on Amazon @@ -4605,113 +4966,106 @@ func (c *S3) GetObjectRequest(input *GetObjectInput) (req *request.Request, outp // GetObject API operation for Amazon Simple Storage Service. // -// Retrieves objects from Amazon S3. To use GET, you must have READ access to -// the object. If you grant READ access to the anonymous user, you can return -// the object without using an authorization header. -// -// An Amazon S3 bucket has no directory hierarchy such as you would find in -// a typical computer file system. You can, however, create a logical hierarchy -// by using object key names that imply a folder structure. For example, instead -// of naming an object sample.jpg, you can name it photos/2006/February/sample.jpg. -// -// To get an object from such a logical hierarchy, specify the full key name -// for the object in the GET operation. For a virtual hosted-style request example, -// if you have the object photos/2006/February/sample.jpg, specify the resource -// as /photos/2006/February/sample.jpg. For a path-style request example, if -// you have the object photos/2006/February/sample.jpg in the bucket named examplebucket, -// specify the resource as /examplebucket/photos/2006/February/sample.jpg. For -// more information about request types, see HTTP Host Header Bucket Specification -// (https://docs.aws.amazon.com/AmazonS3/latest/dev/VirtualHosting.html#VirtualHostingSpecifyBucket). -// -// For more information about returning the ACL of an object, see GetObjectAcl -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAcl.html). +// Retrieves an object from Amazon S3. // -// If the object you are retrieving is stored in the S3 Glacier Flexible Retrieval -// or S3 Glacier Deep Archive storage class, or S3 Intelligent-Tiering Archive -// or S3 Intelligent-Tiering Deep Archive tiers, before you can retrieve the -// object you must first restore a copy using RestoreObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_RestoreObject.html). -// Otherwise, this action returns an InvalidObjectState error. For information -// about restoring archived objects, see Restoring Archived Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/restoring-objects.html). -// -// Encryption request headers, like x-amz-server-side-encryption, should not -// be sent for GET requests if your object uses server-side encryption with -// Key Management Service (KMS) keys (SSE-KMS), dual-layer server-side encryption -// with Amazon Web Services KMS keys (DSSE-KMS), or server-side encryption with -// Amazon S3 managed encryption keys (SSE-S3). If your object does use these -// types of keys, you’ll get an HTTP 400 Bad Request error. -// -// If you encrypt an object by using server-side encryption with customer-provided -// encryption keys (SSE-C) when you store the object in Amazon S3, then when -// you GET the object, you must use the following headers: +// In the GetObject request, specify the full key name for the object. // -// - x-amz-server-side-encryption-customer-algorithm -// -// - x-amz-server-side-encryption-customer-key -// -// - x-amz-server-side-encryption-customer-key-MD5 -// -// For more information about SSE-C, see Server-Side Encryption (Using Customer-Provided -// Encryption Keys) (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html). -// -// Assuming you have the relevant permission to read object tags, the response -// also returns the x-amz-tagging-count header that provides the count of number -// of tags associated with the object. You can use GetObjectTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectTagging.html) -// to retrieve the tag set associated with an object. -// -// # Permissions -// -// You need the relevant read object (or version) permission for this operation. -// For more information, see Specifying Permissions in a Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html). -// If the object that you request doesn’t exist, the error that Amazon S3 -// returns depends on whether you also have the s3:ListBucket permission. -// -// If you have the s3:ListBucket permission on the bucket, Amazon S3 returns -// an HTTP status code 404 (Not Found) error. +// General purpose buckets - Both the virtual-hosted-style requests and the +// path-style requests are supported. For a virtual hosted-style request example, +// if you have the object photos/2006/February/sample.jpg, specify the object +// key name as /photos/2006/February/sample.jpg. For a path-style request example, +// if you have the object photos/2006/February/sample.jpg in the bucket named +// examplebucket, specify the object key name as /examplebucket/photos/2006/February/sample.jpg. +// For more information about request types, see HTTP Host Header Bucket Specification +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/VirtualHosting.html#VirtualHostingSpecifyBucket) +// in the Amazon S3 User Guide. // -// If you don’t have the s3:ListBucket permission, Amazon S3 returns an HTTP -// status code 403 ("access denied") error. +// Directory buckets - Only virtual-hosted-style requests are supported. For +// a virtual hosted-style request example, if you have the object photos/2006/February/sample.jpg +// in the bucket named examplebucket--use1-az5--x-s3, specify the object key +// name as /photos/2006/February/sample.jpg. Also, when you make requests to +// this API operation, your requests are sent to the Zonal endpoint. These endpoints +// support virtual-hosted-style requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name +// . Path-style requests are not supported. For more information, see Regional +// and Zonal endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) +// in the Amazon S3 User Guide. // -// # Versioning +// Permissions +// +// - General purpose bucket permissions - You must have the required permissions +// in a policy. To use GetObject, you must have the READ access to the object +// (or version). If you grant READ access to the anonymous user, the GetObject +// operation returns the object without using an authorization header. For +// more information, see Specifying permissions in a policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html) +// in the Amazon S3 User Guide. If you include a versionId in your request +// header, you must have the s3:GetObjectVersion permission to access a specific +// version of an object. The s3:GetObject permission is not required in this +// scenario. If you request the current version of an object without a specific +// versionId in the request header, only the s3:GetObject permission is required. +// The s3:GetObjectVersion permission is not required in this scenario. If +// the object that you request doesn’t exist, the error that Amazon S3 +// returns depends on whether you also have the s3:ListBucket permission. +// If you have the s3:ListBucket permission on the bucket, Amazon S3 returns +// an HTTP status code 404 Not Found error. If you don’t have the s3:ListBucket +// permission, Amazon S3 returns an HTTP status code 403 Access Denied error. +// +// - Directory bucket permissions - To grant access to this API operation +// on a directory bucket, we recommend that you use the CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) +// API operation for session-based authorization. Specifically, you grant +// the s3express:CreateSession permission to the directory bucket in a bucket +// policy or an IAM identity-based policy. Then, you make the CreateSession +// API call on the bucket to obtain a session token. With the session token +// in your request header, you can make API requests to this operation. After +// the session token expires, you make another CreateSession API call to +// generate a new session token for use. Amazon Web Services CLI or SDKs +// create session and refresh the session token automatically to avoid service +// interruptions when a session expires. For more information about authorization, +// see CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html). +// +// # Storage classes // -// By default, the GET action returns the current version of an object. To return -// a different version, use the versionId subresource. +// If the object you are retrieving is stored in the S3 Glacier Flexible Retrieval +// storage class, the S3 Glacier Deep Archive storage class, the S3 Intelligent-Tiering +// Archive Access tier, or the S3 Intelligent-Tiering Deep Archive Access tier, +// before you can retrieve the object you must first restore a copy using RestoreObject +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_RestoreObject.html). +// Otherwise, this operation returns an InvalidObjectState error. For information +// about restoring archived objects, see Restoring Archived Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/restoring-objects.html) +// in the Amazon S3 User Guide. // -// - If you supply a versionId, you need the s3:GetObjectVersion permission -// to access a specific version of an object. If you request a specific version, -// you do not need to have the s3:GetObject permission. If you request the -// current version without a specific version ID, only s3:GetObject permission -// is required. s3:GetObjectVersion permission won't be required. +// Directory buckets - For directory buckets, only the S3 Express One Zone storage +// class is supported to store newly created objects. Unsupported storage class +// values won't write a destination object and will respond with the HTTP status +// code 400 Bad Request. // -// - If the current version of the object is a delete marker, Amazon S3 behaves -// as if the object was deleted and includes x-amz-delete-marker: true in -// the response. +// # Encryption // -// For more information about versioning, see PutBucketVersioning (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketVersioning.html). +// Encryption request headers, like x-amz-server-side-encryption, should not +// be sent for the GetObject requests, if your object uses server-side encryption +// with Amazon S3 managed encryption keys (SSE-S3), server-side encryption with +// Key Management Service (KMS) keys (SSE-KMS), or dual-layer server-side encryption +// with Amazon Web Services KMS keys (DSSE-KMS). If you include the header in +// your GetObject requests for the object that uses these types of keys, you’ll +// get an HTTP 400 Bad Request error. // -// # Overriding Response Header Values +// # Overriding response header values through the request // // There are times when you want to override certain response header values -// in a GET response. For example, you might override the Content-Disposition -// response header value in your GET request. -// -// You can override values for a set of response headers using the following -// query parameters. These response header values are sent only on a successful -// request, that is, when status code 200 OK is returned. The set of headers -// you can override using these parameters is a subset of the headers that Amazon -// S3 accepts when you create an object. The response headers that you can override -// for the GET response are Content-Type, Content-Language, Expires, Cache-Control, -// Content-Disposition, and Content-Encoding. To override these header values -// in the GET response, you use the following request parameters. -// -// You must sign the request, either using an Authorization header or a presigned -// URL, when using these parameters. They cannot be used with an unsigned (anonymous) -// request. +// of a GetObject response. For example, you might override the Content-Disposition +// response header value through your GetObject request. // -// - response-content-type +// You can override values for a set of response headers. These modified response +// header values are included only in a successful response, that is, when the +// HTTP status code 200 OK is returned. The headers you can override using the +// following query parameters in the request are a subset of the headers that +// Amazon S3 accepts when you create an object. // -// - response-content-language +// The response headers that you can override for the GetObject response are +// Cache-Control, Content-Disposition, Content-Encoding, Content-Language, Content-Type, +// and Expires. // -// - response-expires +// To override values for a set of response headers in the GetObject response, +// you can use the following query parameters in the request. // // - response-cache-control // @@ -4719,17 +5073,19 @@ func (c *S3) GetObjectRequest(input *GetObjectInput) (req *request.Request, outp // // - response-content-encoding // -// # Overriding Response Header Values +// - response-content-language +// +// - response-content-type +// +// - response-expires // -// If both of the If-Match and If-Unmodified-Since headers are present in the -// request as follows: If-Match condition evaluates to true, and; If-Unmodified-Since -// condition evaluates to false; then, S3 returns 200 OK and the data requested. +// When you use these parameters, you must sign the request by using either +// an Authorization header or a presigned URL. These parameters cannot be used +// with an unsigned (anonymous) request. // -// If both of the If-None-Match and If-Modified-Since headers are present in -// the request as follows:If-None-Match condition evaluates to false, and; If-Modified-Since -// condition evaluates to true; then, S3 returns 304 Not Modified response code. +// # HTTP Host header syntax // -// For more information about conditional requests, see RFC 7232 (https://tools.ietf.org/html/rfc7232). +// Directory buckets - The HTTP Host header syntax is Bucket_name.s3express-az_id.region.amazonaws.com. // // The following operations are related to GetObject: // @@ -4752,6 +5108,15 @@ func (c *S3) GetObjectRequest(input *GetObjectInput) (req *request.Request, outp // - ErrCodeInvalidObjectState "InvalidObjectState" // Object is archived and inaccessible until restored. // +// If the object you are retrieving is stored in the S3 Glacier Flexible Retrieval +// storage class, the S3 Glacier Deep Archive storage class, the S3 Intelligent-Tiering +// Archive Access tier, or the S3 Intelligent-Tiering Deep Archive Access tier, +// before you can retrieve the object you must first restore a copy using RestoreObject +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_RestoreObject.html). +// Otherwise, this operation returns an InvalidObjectState error. For information +// about restoring archived objects, see Restoring Archived Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/restoring-objects.html) +// in the Amazon S3 User Guide. +// // See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObject func (c *S3) GetObject(input *GetObjectInput) (*GetObjectOutput, error) { req, out := c.GetObjectRequest(input) @@ -4817,13 +5182,15 @@ func (c *S3) GetObjectAclRequest(input *GetObjectAclInput) (req *request.Request // GetObjectAcl API operation for Amazon Simple Storage Service. // +// This operation is not supported by directory buckets. +// // Returns the access control list (ACL) of an object. To use this operation, // you must have s3:GetObjectAcl permissions or READ_ACP access to the object. // For more information, see Mapping of ACL permissions and access policy permissions // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/acl-overview.html#acl-access-policy-permission-mapping) // in the Amazon S3 User Guide // -// This action is not supported by Amazon S3 on Outposts. +// This functionality is not supported for Amazon S3 on Outposts. // // By default, GET returns ACL information about the current version of an object. // To return ACL information about a different version, use the versionId subresource. @@ -4921,16 +5288,65 @@ func (c *S3) GetObjectAttributesRequest(input *GetObjectAttributesInput) (req *r // GetObjectAttributes API operation for Amazon Simple Storage Service. // // Retrieves all the metadata from an object without returning the object itself. -// This action is useful if you're interested only in an object's metadata. -// To use GetObjectAttributes, you must have READ access to the object. +// This operation is useful if you're interested only in an object's metadata. // // GetObjectAttributes combines the functionality of HeadObject and ListParts. // All of the data returned with each of those individual calls can be returned // with a single call to GetObjectAttributes. // +// Directory buckets - For directory buckets, you must make requests for this +// API operation to the Zonal endpoint. These endpoints support virtual-hosted-style +// requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name +// . Path-style requests are not supported. For more information, see Regional +// and Zonal endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) +// in the Amazon S3 User Guide. +// +// Permissions +// +// - General purpose bucket permissions - To use GetObjectAttributes, you +// must have READ access to the object. The permissions that you need to +// use this operation with depend on whether the bucket is versioned. If +// the bucket is versioned, you need both the s3:GetObjectVersion and s3:GetObjectVersionAttributes +// permissions for this operation. If the bucket is not versioned, you need +// the s3:GetObject and s3:GetObjectAttributes permissions. For more information, +// see Specifying Permissions in a Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html) +// in the Amazon S3 User Guide. If the object that you request does not exist, +// the error Amazon S3 returns depends on whether you also have the s3:ListBucket +// permission. If you have the s3:ListBucket permission on the bucket, Amazon +// S3 returns an HTTP status code 404 Not Found ("no such key") error. If +// you don't have the s3:ListBucket permission, Amazon S3 returns an HTTP +// status code 403 Forbidden ("access denied") error. +// +// - Directory bucket permissions - To grant access to this API operation +// on a directory bucket, we recommend that you use the CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) +// API operation for session-based authorization. Specifically, you grant +// the s3express:CreateSession permission to the directory bucket in a bucket +// policy or an IAM identity-based policy. Then, you make the CreateSession +// API call on the bucket to obtain a session token. With the session token +// in your request header, you can make API requests to this operation. After +// the session token expires, you make another CreateSession API call to +// generate a new session token for use. Amazon Web Services CLI or SDKs +// create session and refresh the session token automatically to avoid service +// interruptions when a session expires. For more information about authorization, +// see CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html). +// +// # Encryption +// +// Encryption request headers, like x-amz-server-side-encryption, should not +// be sent for HEAD requests if your object uses server-side encryption with +// Key Management Service (KMS) keys (SSE-KMS), dual-layer server-side encryption +// with Amazon Web Services KMS keys (DSSE-KMS), or server-side encryption with +// Amazon S3 managed encryption keys (SSE-S3). The x-amz-server-side-encryption +// header is used when you PUT an object to S3 and want to specify the encryption +// method. If you include this header in a GET request for an object that uses +// these types of keys, you’ll get an HTTP 400 Bad Request error. It's because +// the encryption method can't be changed when you retrieve the object. +// // If you encrypt an object by using server-side encryption with customer-provided // encryption keys (SSE-C) when you store the object in Amazon S3, then when -// you retrieve the metadata from the object, you must use the following headers: +// you retrieve the metadata from the object, you must use the following headers +// to provide the encryption key for the server to be able to retrieve the object's +// metadata. The headers are: // // - x-amz-server-side-encryption-customer-algorithm // @@ -4942,47 +5358,35 @@ func (c *S3) GetObjectAttributesRequest(input *GetObjectAttributesInput) (req *r // Encryption Keys) (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html) // in the Amazon S3 User Guide. // -// - Encryption request headers, such as x-amz-server-side-encryption, should -// not be sent for GET requests if your object uses server-side encryption -// with Amazon Web Services KMS keys stored in Amazon Web Services Key Management -// Service (SSE-KMS) or server-side encryption with Amazon S3 managed keys -// (SSE-S3). If your object does use these types of keys, you'll get an HTTP -// 400 Bad Request error. +// Directory bucket permissions - For directory buckets, only server-side encryption +// with Amazon S3 managed keys (SSE-S3) (AES256) is supported. // -// - The last modified property in this case is the creation date of the -// object. +// # Versioning +// +// Directory buckets - S3 Versioning isn't enabled and supported for directory +// buckets. For this API operation, only the null value of the version ID is +// supported by directory buckets. You can only specify null to the versionId +// query parameter in the request. +// +// # Conditional request headers // // Consider the following when using request headers: // // - If both of the If-Match and If-Unmodified-Since headers are present // in the request as follows, then Amazon S3 returns the HTTP status code // 200 OK and the data requested: If-Match condition evaluates to true. If-Unmodified-Since -// condition evaluates to false. +// condition evaluates to false. For more information about conditional requests, +// see RFC 7232 (https://tools.ietf.org/html/rfc7232). // // - If both of the If-None-Match and If-Modified-Since headers are present // in the request as follows, then Amazon S3 returns the HTTP status code // 304 Not Modified: If-None-Match condition evaluates to false. If-Modified-Since -// condition evaluates to true. -// -// For more information about conditional requests, see RFC 7232 (https://tools.ietf.org/html/rfc7232). -// -// # Permissions -// -// The permissions that you need to use this operation depend on whether the -// bucket is versioned. If the bucket is versioned, you need both the s3:GetObjectVersion -// and s3:GetObjectVersionAttributes permissions for this operation. If the -// bucket is not versioned, you need the s3:GetObject and s3:GetObjectAttributes -// permissions. For more information, see Specifying Permissions in a Policy -// (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html) -// in the Amazon S3 User Guide. If the object that you request does not exist, -// the error Amazon S3 returns depends on whether you also have the s3:ListBucket -// permission. +// condition evaluates to true. For more information about conditional requests, +// see RFC 7232 (https://tools.ietf.org/html/rfc7232). // -// - If you have the s3:ListBucket permission on the bucket, Amazon S3 returns -// an HTTP status code 404 Not Found ("no such key") error. +// # HTTP Host header syntax // -// - If you don't have the s3:ListBucket permission, Amazon S3 returns an -// HTTP status code 403 Forbidden ("access denied") error. +// Directory buckets - The HTTP Host header syntax is Bucket_name.s3express-az_id.region.amazonaws.com. // // The following actions are related to GetObjectAttributes: // @@ -5078,10 +5482,12 @@ func (c *S3) GetObjectLegalHoldRequest(input *GetObjectLegalHoldInput) (req *req // GetObjectLegalHold API operation for Amazon Simple Storage Service. // +// This operation is not supported by directory buckets. +// // Gets an object's current legal hold status. For more information, see Locking // Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html). // -// This action is not supported by Amazon S3 on Outposts. +// This functionality is not supported for Amazon S3 on Outposts. // // The following action is related to GetObjectLegalHold: // @@ -5158,6 +5564,8 @@ func (c *S3) GetObjectLockConfigurationRequest(input *GetObjectLockConfiguration // GetObjectLockConfiguration API operation for Amazon Simple Storage Service. // +// This operation is not supported by directory buckets. +// // Gets the Object Lock configuration for a bucket. The rule specified in the // Object Lock configuration will be applied by default to every new object // placed in the specified bucket. For more information, see Locking Objects @@ -5238,10 +5646,12 @@ func (c *S3) GetObjectRetentionRequest(input *GetObjectRetentionInput) (req *req // GetObjectRetention API operation for Amazon Simple Storage Service. // +// This operation is not supported by directory buckets. +// // Retrieves an object's retention settings. For more information, see Locking // Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html). // -// This action is not supported by Amazon S3 on Outposts. +// This functionality is not supported for Amazon S3 on Outposts. // // The following action is related to GetObjectRetention: // @@ -5318,6 +5728,8 @@ func (c *S3) GetObjectTaggingRequest(input *GetObjectTaggingInput) (req *request // GetObjectTagging API operation for Amazon Simple Storage Service. // +// This operation is not supported by directory buckets. +// // Returns the tag-set of an object. You send the GET request against the tagging // subresource associated with the object. // @@ -5413,6 +5825,8 @@ func (c *S3) GetObjectTorrentRequest(input *GetObjectTorrentInput) (req *request // GetObjectTorrent API operation for Amazon Simple Storage Service. // +// This operation is not supported by directory buckets. +// // Returns torrent files from a bucket. BitTorrent can save you bandwidth when // you're distributing large files. // @@ -5422,7 +5836,7 @@ func (c *S3) GetObjectTorrentRequest(input *GetObjectTorrentInput) (req *request // // To use GET, you must have READ access to the object. // -// This action is not supported by Amazon S3 on Outposts. +// This functionality is not supported for Amazon S3 on Outposts. // // The following action is related to GetObjectTorrent: // @@ -5499,6 +5913,8 @@ func (c *S3) GetPublicAccessBlockRequest(input *GetPublicAccessBlockInput) (req // GetPublicAccessBlock API operation for Amazon Simple Storage Service. // +// This operation is not supported by directory buckets. +// // Retrieves the PublicAccessBlock configuration for an Amazon S3 bucket. To // use this operation, you must have the s3:GetBucketPublicAccessBlock permission. // For more information about Amazon S3 permissions, see Specifying Permissions @@ -5590,39 +6006,63 @@ func (c *S3) HeadBucketRequest(input *HeadBucketInput) (req *request.Request, ou output = &HeadBucketOutput{} req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } // HeadBucket API operation for Amazon Simple Storage Service. // -// This action is useful to determine if a bucket exists and you have permission -// to access it. The action returns a 200 OK if the bucket exists and you have -// permission to access it. +// You can use this operation to determine if a bucket exists and if you have +// permission to access it. The action returns a 200 OK if the bucket exists +// and you have permission to access it. // // If the bucket does not exist or you do not have permission to access it, // the HEAD request returns a generic 400 Bad Request, 403 Forbidden or 404 // Not Found code. A message body is not included, so you cannot determine the // exception beyond these error codes. // -// To use this operation, you must have permissions to perform the s3:ListBucket -// action. The bucket owner has this permission by default and can grant this -// permission to others. For more information about permissions, see Permissions -// Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) -// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html). +// Directory buckets - You must make requests for this API operation to the +// Zonal endpoint. These endpoints support virtual-hosted-style requests in +// the format https://bucket_name.s3express-az_id.region.amazonaws.com. Path-style +// requests are not supported. For more information, see Regional and Zonal +// endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) +// in the Amazon S3 User Guide. // -// To use this API operation against an access point, you must provide the alias -// of the access point in place of the bucket name or specify the access point -// ARN. When using the access point ARN, you must direct requests to the access -// point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. -// When using the Amazon Web Services SDKs, you provide the ARN in place of -// the bucket name. For more information, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html). +// # Authentication and authorization // -// To use this API operation against an Object Lambda access point, provide -// the alias of the Object Lambda access point in place of the bucket name. -// If the Object Lambda access point alias in a request is not valid, the error -// code InvalidAccessPointAliasError is returned. For more information about -// InvalidAccessPointAliasError, see List of Error Codes (https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList). +// All HeadBucket requests must be authenticated and signed by using IAM credentials +// (access key ID and secret access key for the IAM identities). All headers +// with the x-amz- prefix, including x-amz-copy-source, must be signed. For +// more information, see REST Authentication (https://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html). +// +// Directory bucket - You must use IAM credentials to authenticate and authorize +// your access to the HeadBucket API operation, instead of using the temporary +// security credentials through the CreateSession API operation. +// +// Amazon Web Services CLI or SDKs handles authentication and authorization +// on your behalf. +// +// Permissions +// +// - General purpose bucket permissions - To use this operation, you must +// have permissions to perform the s3:ListBucket action. The bucket owner +// has this permission by default and can grant this permission to others. +// For more information about permissions, see Managing access permissions +// to your Amazon S3 resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html) +// in the Amazon S3 User Guide. +// +// - Directory bucket permissions - You must have the s3express:CreateSession +// permission in the Action element of a policy. By default, the session +// is in the ReadWrite mode. If you want to restrict the access, you can +// explicitly set the s3express:SessionMode condition key to ReadOnly on +// the bucket. For more information about example bucket policies, see Example +// bucket policies for S3 Express One Zone (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam-example-bucket-policies.html) +// and Amazon Web Services Identity and Access Management (IAM) identity-based +// policies for S3 Express One Zone (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam-identity-policies.html) +// in the Amazon S3 User Guide. +// +// # HTTP Host header syntax +// +// Directory buckets - The HTTP Host header syntax is Bucket_name.s3express-az_id.region.amazonaws.com. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -5700,19 +6140,70 @@ func (c *S3) HeadObjectRequest(input *HeadObjectInput) (req *request.Request, ou // HeadObject API operation for Amazon Simple Storage Service. // -// The HEAD action retrieves metadata from an object without returning the object -// itself. This action is useful if you're only interested in an object's metadata. -// To use HEAD, you must have READ access to the object. +// The HEAD operation retrieves metadata from an object without returning the +// object itself. This operation is useful if you're interested only in an object's +// metadata. +// +// A HEAD request has the same options as a GET operation on an object. The +// response is identical to the GET response except that there is no response +// body. Because of this, if the HEAD request generates an error, it returns +// a generic code, such as 400 Bad Request, 403 Forbidden, 404 Not Found, 405 +// Method Not Allowed, 412 Precondition Failed, or 304 Not Modified. It's not +// possible to retrieve the exact exception of these error codes. +// +// Request headers are limited to 8 KB in size. For more information, see Common +// Request Headers (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTCommonRequestHeaders.html). +// +// Directory buckets - For directory buckets, you must make requests for this +// API operation to the Zonal endpoint. These endpoints support virtual-hosted-style +// requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name +// . Path-style requests are not supported. For more information, see Regional +// and Zonal endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) +// in the Amazon S3 User Guide. // -// A HEAD request has the same options as a GET action on an object. The response -// is identical to the GET response except that there is no response body. Because -// of this, if the HEAD request generates an error, it returns a generic 400 -// Bad Request, 403 Forbidden or 404 Not Found code. It is not possible to retrieve -// the exact exception beyond these error codes. +// Permissions +// +// - General purpose bucket permissions - To use HEAD, you must have the +// s3:GetObject permission. You need the relevant read object (or version) +// permission for this operation. For more information, see Actions, resources, +// and condition keys for Amazon S3 (https://docs.aws.amazon.com/AmazonS3/latest/dev/list_amazons3.html) +// in the Amazon S3 User Guide. If the object you request doesn't exist, +// the error that Amazon S3 returns depends on whether you also have the +// s3:ListBucket permission. If you have the s3:ListBucket permission on +// the bucket, Amazon S3 returns an HTTP status code 404 Not Found error. +// If you don’t have the s3:ListBucket permission, Amazon S3 returns an +// HTTP status code 403 Forbidden error. +// +// - Directory bucket permissions - To grant access to this API operation +// on a directory bucket, we recommend that you use the CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) +// API operation for session-based authorization. Specifically, you grant +// the s3express:CreateSession permission to the directory bucket in a bucket +// policy or an IAM identity-based policy. Then, you make the CreateSession +// API call on the bucket to obtain a session token. With the session token +// in your request header, you can make API requests to this operation. After +// the session token expires, you make another CreateSession API call to +// generate a new session token for use. Amazon Web Services CLI or SDKs +// create session and refresh the session token automatically to avoid service +// interruptions when a session expires. For more information about authorization, +// see CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html). +// +// # Encryption +// +// Encryption request headers, like x-amz-server-side-encryption, should not +// be sent for HEAD requests if your object uses server-side encryption with +// Key Management Service (KMS) keys (SSE-KMS), dual-layer server-side encryption +// with Amazon Web Services KMS keys (DSSE-KMS), or server-side encryption with +// Amazon S3 managed encryption keys (SSE-S3). The x-amz-server-side-encryption +// header is used when you PUT an object to S3 and want to specify the encryption +// method. If you include this header in a HEAD request for an object that uses +// these types of keys, you’ll get an HTTP 400 Bad Request error. It's because +// the encryption method can't be changed when you retrieve the object. // // If you encrypt an object by using server-side encryption with customer-provided // encryption keys (SSE-C) when you store the object in Amazon S3, then when -// you retrieve the metadata from the object, you must use the following headers: +// you retrieve the metadata from the object, you must use the following headers +// to provide the encryption key for the server to be able to retrieve the object's +// metadata. The headers are: // // - x-amz-server-side-encryption-customer-algorithm // @@ -5721,48 +6212,32 @@ func (c *S3) HeadObjectRequest(input *HeadObjectInput) (req *request.Request, ou // - x-amz-server-side-encryption-customer-key-MD5 // // For more information about SSE-C, see Server-Side Encryption (Using Customer-Provided -// Encryption Keys) (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html). -// -// - Encryption request headers, like x-amz-server-side-encryption, should -// not be sent for GET requests if your object uses server-side encryption -// with Key Management Service (KMS) keys (SSE-KMS), dual-layer server-side -// encryption with Amazon Web Services KMS keys (DSSE-KMS), or server-side -// encryption with Amazon S3 managed encryption keys (SSE-S3). If your object -// does use these types of keys, you’ll get an HTTP 400 Bad Request error. -// -// - The last modified property in this case is the creation date of the -// object. -// -// Request headers are limited to 8 KB in size. For more information, see Common -// Request Headers (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTCommonRequestHeaders.html). +// Encryption Keys) (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html) +// in the Amazon S3 User Guide. // -// Consider the following when using request headers: +// Directory bucket permissions - For directory buckets, only server-side encryption +// with Amazon S3 managed keys (SSE-S3) (AES256) is supported. // -// - Consideration 1 – If both of the If-Match and If-Unmodified-Since -// headers are present in the request as follows: If-Match condition evaluates -// to true, and; If-Unmodified-Since condition evaluates to false; Then Amazon -// S3 returns 200 OK and the data requested. +// Versioning // -// - Consideration 2 – If both of the If-None-Match and If-Modified-Since -// headers are present in the request as follows: If-None-Match condition -// evaluates to false, and; If-Modified-Since condition evaluates to true; -// Then Amazon S3 returns the 304 Not Modified response code. +// - If the current version of the object is a delete marker, Amazon S3 behaves +// as if the object was deleted and includes x-amz-delete-marker: true in +// the response. // -// For more information about conditional requests, see RFC 7232 (https://tools.ietf.org/html/rfc7232). +// - If the specified version is a delete marker, the response returns a +// 405 Method Not Allowed error and the Last-Modified: timestamp response +// header. // -// # Permissions +// - Directory buckets - Delete marker is not supported by directory buckets. // -// You need the relevant read object (or version) permission for this operation. -// For more information, see Actions, resources, and condition keys for Amazon -// S3 (https://docs.aws.amazon.com/AmazonS3/latest/dev/list_amazons3.html). -// If the object you request doesn't exist, the error that Amazon S3 returns -// depends on whether you also have the s3:ListBucket permission. +// - Directory buckets - S3 Versioning isn't enabled and supported for directory +// buckets. For this API operation, only the null value of the version ID +// is supported by directory buckets. You can only specify null to the versionId +// query parameter in the request. // -// - If you have the s3:ListBucket permission on the bucket, Amazon S3 returns -// an HTTP status code 404 error. +// # HTTP Host header syntax // -// - If you don’t have the s3:ListBucket permission, Amazon S3 returns -// an HTTP status code 403 error. +// Directory buckets - The HTTP Host header syntax is Bucket_name.s3express-az_id.region.amazonaws.com. // // The following actions are related to HeadObject: // @@ -5844,6 +6319,8 @@ func (c *S3) ListBucketAnalyticsConfigurationsRequest(input *ListBucketAnalytics // ListBucketAnalyticsConfigurations API operation for Amazon Simple Storage Service. // +// This operation is not supported by directory buckets. +// // Lists the analytics configurations for the bucket. You can have up to 1,000 // analytics configurations per bucket. // @@ -5943,6 +6420,8 @@ func (c *S3) ListBucketIntelligentTieringConfigurationsRequest(input *ListBucket // ListBucketIntelligentTieringConfigurations API operation for Amazon Simple Storage Service. // +// This operation is not supported by directory buckets. +// // Lists the S3 Intelligent-Tiering configuration from the specified bucket. // // The S3 Intelligent-Tiering storage class is designed to optimize storage @@ -6041,6 +6520,8 @@ func (c *S3) ListBucketInventoryConfigurationsRequest(input *ListBucketInventory // ListBucketInventoryConfigurations API operation for Amazon Simple Storage Service. // +// This operation is not supported by directory buckets. +// // Returns a list of inventory configurations for the bucket. You can have up // to 1,000 analytics configurations per bucket. // @@ -6140,6 +6621,8 @@ func (c *S3) ListBucketMetricsConfigurationsRequest(input *ListBucketMetricsConf // ListBucketMetricsConfigurations API operation for Amazon Simple Storage Service. // +// This operation is not supported by directory buckets. +// // Lists the metrics configurations for the bucket. The metrics configurations // are only for the request metrics of the bucket and do not provide information // on daily storage metrics. You can have up to 1,000 configurations per bucket. @@ -6240,6 +6723,8 @@ func (c *S3) ListBucketsRequest(input *ListBucketsInput) (req *request.Request, // ListBuckets API operation for Amazon Simple Storage Service. // +// This operation is not supported by directory buckets. +// // Returns a list of all buckets owned by the authenticated sender of the request. // To use this operation, you must have the s3:ListAllMyBuckets permission. // @@ -6274,6 +6759,160 @@ func (c *S3) ListBucketsWithContext(ctx aws.Context, input *ListBucketsInput, op return out, req.Send() } +const opListDirectoryBuckets = "ListDirectoryBuckets" + +// ListDirectoryBucketsRequest generates a "aws/request.Request" representing the +// client's request for the ListDirectoryBuckets operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListDirectoryBuckets for more information on using the ListDirectoryBuckets +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the ListDirectoryBucketsRequest method. +// req, resp := client.ListDirectoryBucketsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListDirectoryBuckets +func (c *S3) ListDirectoryBucketsRequest(input *ListDirectoryBucketsInput) (req *request.Request, output *ListDirectoryBucketsOutput) { + op := &request.Operation{ + Name: opListDirectoryBuckets, + HTTPMethod: "GET", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"ContinuationToken"}, + OutputTokens: []string{"ContinuationToken"}, + LimitToken: "MaxDirectoryBuckets", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListDirectoryBucketsInput{} + } + + output = &ListDirectoryBucketsOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListDirectoryBuckets API operation for Amazon Simple Storage Service. +// +// Returns a list of all Amazon S3 directory buckets owned by the authenticated +// sender of the request. For more information about directory buckets, see +// Directory buckets (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-buckets-overview.html) +// in the Amazon S3 User Guide. +// +// Directory buckets - For directory buckets, you must make requests for this +// API operation to the Regional endpoint. These endpoints support path-style +// requests in the format https://s3express-control.region_code.amazonaws.com/bucket-name +// . Virtual-hosted-style requests aren't supported. For more information, see +// Regional and Zonal endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) +// in the Amazon S3 User Guide. +// +// # Permissions +// +// You must have the s3express:ListAllMyDirectoryBuckets permission in an IAM +// identity-based policy instead of a bucket policy. Cross-account access to +// this API operation isn't supported. This operation can only be performed +// by the Amazon Web Services account that owns the resource. For more information +// about directory bucket policies and permissions, see Amazon Web Services +// Identity and Access Management (IAM) for S3 Express One Zone (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam.html) +// in the Amazon S3 User Guide. +// +// # HTTP Host header syntax +// +// Directory buckets - The HTTP Host header syntax is s3express-control.region.amazonaws.com. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation ListDirectoryBuckets for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListDirectoryBuckets +func (c *S3) ListDirectoryBuckets(input *ListDirectoryBucketsInput) (*ListDirectoryBucketsOutput, error) { + req, out := c.ListDirectoryBucketsRequest(input) + return out, req.Send() +} + +// ListDirectoryBucketsWithContext is the same as ListDirectoryBuckets with the addition of +// the ability to pass a context and additional request options. +// +// See ListDirectoryBuckets for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) ListDirectoryBucketsWithContext(ctx aws.Context, input *ListDirectoryBucketsInput, opts ...request.Option) (*ListDirectoryBucketsOutput, error) { + req, out := c.ListDirectoryBucketsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// ListDirectoryBucketsPages iterates over the pages of a ListDirectoryBuckets operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListDirectoryBuckets method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListDirectoryBuckets operation. +// pageNum := 0 +// err := client.ListDirectoryBucketsPages(params, +// func(page *s3.ListDirectoryBucketsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +func (c *S3) ListDirectoryBucketsPages(input *ListDirectoryBucketsInput, fn func(*ListDirectoryBucketsOutput, bool) bool) error { + return c.ListDirectoryBucketsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListDirectoryBucketsPagesWithContext same as ListDirectoryBucketsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) ListDirectoryBucketsPagesWithContext(ctx aws.Context, input *ListDirectoryBucketsInput, fn func(*ListDirectoryBucketsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListDirectoryBucketsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListDirectoryBucketsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListDirectoryBucketsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + const opListMultipartUploads = "ListMultipartUploads" // ListMultipartUploadsRequest generates a "aws/request.Request" representing the @@ -6323,28 +6962,79 @@ func (c *S3) ListMultipartUploadsRequest(input *ListMultipartUploadsInput) (req // ListMultipartUploads API operation for Amazon Simple Storage Service. // -// This action lists in-progress multipart uploads. An in-progress multipart -// upload is a multipart upload that has been initiated using the Initiate Multipart -// Upload request, but has not yet been completed or aborted. +// This operation lists in-progress multipart uploads in a bucket. An in-progress +// multipart upload is a multipart upload that has been initiated by the CreateMultipartUpload +// request, but has not yet been completed or aborted. +// +// Directory buckets - If multipart uploads in a directory bucket are in progress, +// you can't delete the bucket until all the in-progress multipart uploads are +// aborted or completed. +// +// The ListMultipartUploads operation returns a maximum of 1,000 multipart uploads +// in the response. The limit of 1,000 multipart uploads is also the default +// value. You can further limit the number of uploads in a response by specifying +// the max-uploads request parameter. If there are more than 1,000 multipart +// uploads that satisfy your ListMultipartUploads request, the response returns +// an IsTruncated element with the value of true, a NextKeyMarker element, and +// a NextUploadIdMarker element. To list the remaining multipart uploads, you +// need to make subsequent ListMultipartUploads requests. In these requests, +// include two query parameters: key-marker and upload-id-marker. Set the value +// of key-marker to the NextKeyMarker value from the previous response. Similarly, +// set the value of upload-id-marker to the NextUploadIdMarker value from the +// previous response. +// +// Directory buckets - The upload-id-marker element and the NextUploadIdMarker +// element aren't supported by directory buckets. To list the additional multipart +// uploads, you only need to set the value of key-marker to the NextKeyMarker +// value from the previous response. +// +// For more information about multipart uploads, see Uploading Objects Using +// Multipart Upload (https://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html) +// in the Amazon S3 User Guide. // -// This action returns at most 1,000 multipart uploads in the response. 1,000 -// multipart uploads is the maximum number of uploads a response can include, -// which is also the default value. You can further limit the number of uploads -// in a response by specifying the max-uploads parameter in the response. If -// additional multipart uploads satisfy the list criteria, the response will -// contain an IsTruncated element with the value true. To list the additional -// multipart uploads, use the key-marker and upload-id-marker request parameters. +// Directory buckets - For directory buckets, you must make requests for this +// API operation to the Zonal endpoint. These endpoints support virtual-hosted-style +// requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name +// . Path-style requests are not supported. For more information, see Regional +// and Zonal endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) +// in the Amazon S3 User Guide. // -// In the response, the uploads are sorted by key. If your application has initiated -// more than one multipart upload using the same object key, then uploads in -// the response are first sorted by key. Additionally, uploads are sorted in -// ascending order within each key by the upload initiation time. +// Permissions // -// For more information on multipart uploads, see Uploading Objects Using Multipart -// Upload (https://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html). +// - General purpose bucket permissions - For information about permissions +// required to use the multipart upload API, see Multipart Upload and Permissions +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html) +// in the Amazon S3 User Guide. +// +// - Directory bucket permissions - To grant access to this API operation +// on a directory bucket, we recommend that you use the CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) +// API operation for session-based authorization. Specifically, you grant +// the s3express:CreateSession permission to the directory bucket in a bucket +// policy or an IAM identity-based policy. Then, you make the CreateSession +// API call on the bucket to obtain a session token. With the session token +// in your request header, you can make API requests to this operation. After +// the session token expires, you make another CreateSession API call to +// generate a new session token for use. Amazon Web Services CLI or SDKs +// create session and refresh the session token automatically to avoid service +// interruptions when a session expires. For more information about authorization, +// see CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html). +// +// Sorting of multipart uploads in response +// +// - General purpose bucket - In the ListMultipartUploads response, the multipart +// uploads are sorted based on two criteria: Key-based sorting - Multipart +// uploads are initially sorted in ascending order based on their object +// keys. Time-based sorting - For uploads that share the same object key, +// they are further sorted in ascending order based on the upload initiation +// time. Among uploads with the same key, the one that was initiated first +// will appear before the ones that were initiated later. // -// For information on permissions required to use the multipart upload API, -// see Multipart Upload and Permissions (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html). +// - Directory bucket - In the ListMultipartUploads response, the multipart +// uploads aren't sorted lexicographically based on the object keys. +// +// # HTTP Host header syntax +// +// Directory buckets - The HTTP Host header syntax is Bucket_name.s3express-az_id.region.amazonaws.com. // // The following operations are related to ListMultipartUploads: // @@ -6486,6 +7176,8 @@ func (c *S3) ListObjectVersionsRequest(input *ListObjectVersionsInput) (req *req // ListObjectVersions API operation for Amazon Simple Storage Service. // +// This operation is not supported by directory buckets. +// // Returns metadata about all versions of the objects in a bucket. You can also // use request parameters as selection criteria to return metadata about a subset // of all the object versions. @@ -6498,8 +7190,6 @@ func (c *S3) ListObjectVersionsRequest(input *ListObjectVersionsInput) (req *req // // To use this operation, you must have READ access to the bucket. // -// This action is not supported by Amazon S3 on Outposts. -// // The following operations are related to ListObjectVersions: // // - ListObjectsV2 (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjectsV2.html) @@ -6638,6 +7328,8 @@ func (c *S3) ListObjectsRequest(input *ListObjectsInput) (req *request.Request, // ListObjects API operation for Amazon Simple Storage Service. // +// This operation is not supported by directory buckets. +// // Returns some or all (up to 1,000) of the objects in a bucket. You can use // the request parameters as selection criteria to return a subset of the objects // in a bucket. A 200 OK response can contain valid or invalid XML. Be sure @@ -6798,28 +7490,58 @@ func (c *S3) ListObjectsV2Request(input *ListObjectsV2Input) (req *request.Reque // You can use the request parameters as selection criteria to return a subset // of the objects in a bucket. A 200 OK response can contain valid or invalid // XML. Make sure to design your application to parse the contents of the response -// and handle it appropriately. Objects are returned sorted in an ascending -// order of the respective key names in the list. For more information about -// listing objects, see Listing object keys programmatically (https://docs.aws.amazon.com/AmazonS3/latest/userguide/ListingKeysUsingAPIs.html) +// and handle it appropriately. For more information about listing objects, +// see Listing object keys programmatically (https://docs.aws.amazon.com/AmazonS3/latest/userguide/ListingKeysUsingAPIs.html) +// in the Amazon S3 User Guide. To get a list of your buckets, see ListBuckets +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBuckets.html). +// +// Directory buckets - For directory buckets, you must make requests for this +// API operation to the Zonal endpoint. These endpoints support virtual-hosted-style +// requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name +// . Path-style requests are not supported. For more information, see Regional +// and Zonal endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) // in the Amazon S3 User Guide. // -// To use this operation, you must have READ access to the bucket. +// Permissions // -// To use this action in an Identity and Access Management (IAM) policy, you -// must have permission to perform the s3:ListBucket action. The bucket owner -// has this permission by default and can grant this permission to others. For -// more information about permissions, see Permissions Related to Bucket Subresource -// Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) -// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html) -// in the Amazon S3 User Guide. +// - General purpose bucket permissions - To use this operation, you must +// have READ access to the bucket. You must have permission to perform the +// s3:ListBucket action. The bucket owner has this permission by default +// and can grant this permission to others. For more information about permissions, +// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html) +// in the Amazon S3 User Guide. +// +// - Directory bucket permissions - To grant access to this API operation +// on a directory bucket, we recommend that you use the CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) +// API operation for session-based authorization. Specifically, you grant +// the s3express:CreateSession permission to the directory bucket in a bucket +// policy or an IAM identity-based policy. Then, you make the CreateSession +// API call on the bucket to obtain a session token. With the session token +// in your request header, you can make API requests to this operation. After +// the session token expires, you make another CreateSession API call to +// generate a new session token for use. Amazon Web Services CLI or SDKs +// create session and refresh the session token automatically to avoid service +// interruptions when a session expires. For more information about authorization, +// see CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html). +// +// Sorting order of returned objects +// +// - General purpose bucket - For general purpose buckets, ListObjectsV2 +// returns objects in lexicographical order based on their key names. +// +// - Directory bucket - For directory buckets, ListObjectsV2 does not return +// objects in lexicographical order. +// +// # HTTP Host header syntax +// +// Directory buckets - The HTTP Host header syntax is Bucket_name.s3express-az_id.region.amazonaws.com. // // This section describes the latest revision of this action. We recommend that // you use this revised API operation for application development. For backward // compatibility, Amazon S3 continues to support the prior version of this API // operation, ListObjects (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjects.html). // -// To get a list of your buckets, see ListBuckets (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBuckets.html). -// // The following operations are related to ListObjectsV2: // // - GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) @@ -6962,24 +7684,58 @@ func (c *S3) ListPartsRequest(input *ListPartsInput) (req *request.Request, outp // ListParts API operation for Amazon Simple Storage Service. // // Lists the parts that have been uploaded for a specific multipart upload. -// This operation must include the upload ID, which you obtain by sending the -// initiate multipart upload request (see CreateMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html)). -// This request returns a maximum of 1,000 uploaded parts. The default number -// of parts returned is 1,000 parts. You can restrict the number of parts returned -// by specifying the max-parts request parameter. If your multipart upload consists -// of more than 1,000 parts, the response returns an IsTruncated field with -// the value of true, and a NextPartNumberMarker element. In subsequent ListParts -// requests you can include the part-number-marker query string parameter and -// set its value to the NextPartNumberMarker field value from the previous response. -// -// If the upload was created using a checksum algorithm, you will need to have -// permission to the kms:Decrypt action for the request to succeed. +// +// To use this operation, you must provide the upload ID in the request. You +// obtain this uploadID by sending the initiate multipart upload request through +// CreateMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html). +// +// The ListParts request returns a maximum of 1,000 uploaded parts. The limit +// of 1,000 parts is also the default value. You can restrict the number of +// parts in a response by specifying the max-parts request parameter. If your +// multipart upload consists of more than 1,000 parts, the response returns +// an IsTruncated field with the value of true, and a NextPartNumberMarker element. +// To list remaining uploaded parts, in subsequent ListParts requests, include +// the part-number-marker query string parameter and set its value to the NextPartNumberMarker +// field value from the previous response. // // For more information on multipart uploads, see Uploading Objects Using Multipart -// Upload (https://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html). +// Upload (https://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html) +// in the Amazon S3 User Guide. +// +// Directory buckets - For directory buckets, you must make requests for this +// API operation to the Zonal endpoint. These endpoints support virtual-hosted-style +// requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name +// . Path-style requests are not supported. For more information, see Regional +// and Zonal endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) +// in the Amazon S3 User Guide. // -// For information on permissions required to use the multipart upload API, -// see Multipart Upload and Permissions (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html). +// Permissions +// +// - General purpose bucket permissions - For information about permissions +// required to use the multipart upload API, see Multipart Upload and Permissions +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html) +// in the Amazon S3 User Guide. If the upload was created using server-side +// encryption with Key Management Service (KMS) keys (SSE-KMS) or dual-layer +// server-side encryption with Amazon Web Services KMS keys (DSSE-KMS), you +// must have permission to the kms:Decrypt action for the ListParts request +// to succeed. +// +// - Directory bucket permissions - To grant access to this API operation +// on a directory bucket, we recommend that you use the CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) +// API operation for session-based authorization. Specifically, you grant +// the s3express:CreateSession permission to the directory bucket in a bucket +// policy or an IAM identity-based policy. Then, you make the CreateSession +// API call on the bucket to obtain a session token. With the session token +// in your request header, you can make API requests to this operation. After +// the session token expires, you make another CreateSession API call to +// generate a new session token for use. Amazon Web Services CLI or SDKs +// create session and refresh the session token automatically to avoid service +// interruptions when a session expires. For more information about authorization, +// see CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html). +// +// # HTTP Host header syntax +// +// Directory buckets - The HTTP Host header syntax is Bucket_name.s3express-az_id.region.amazonaws.com. // // The following operations are related to ListParts: // @@ -7118,6 +7874,8 @@ func (c *S3) PutBucketAccelerateConfigurationRequest(input *PutBucketAccelerateC // PutBucketAccelerateConfiguration API operation for Amazon Simple Storage Service. // +// This operation is not supported by directory buckets. +// // Sets the accelerate configuration of an existing bucket. Amazon S3 Transfer // Acceleration is a bucket-level feature that enables you to perform faster // data transfers to Amazon S3. @@ -7230,9 +7988,11 @@ func (c *S3) PutBucketAclRequest(input *PutBucketAclInput) (req *request.Request // PutBucketAcl API operation for Amazon Simple Storage Service. // +// This operation is not supported by directory buckets. +// // Sets the permissions on an existing bucket using access control lists (ACL). // For more information, see Using ACLs (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3_ACLs_UsingACLs.html). -// To set the ACL of a bucket, you must have WRITE_ACP permission. +// To set the ACL of a bucket, you must have the WRITE_ACP permission. // // You can use one of the following two ways to set a bucket's permissions: // @@ -7398,6 +8158,8 @@ func (c *S3) PutBucketAnalyticsConfigurationRequest(input *PutBucketAnalyticsCon // PutBucketAnalyticsConfiguration API operation for Amazon Simple Storage Service. // +// This operation is not supported by directory buckets. +// // Sets an analytics configuration for the bucket (specified by the analytics // configuration ID). You can have up to 1,000 analytics configurations per // bucket. @@ -7520,6 +8282,8 @@ func (c *S3) PutBucketCorsRequest(input *PutBucketCorsInput) (req *request.Reque // PutBucketCors API operation for Amazon Simple Storage Service. // +// This operation is not supported by directory buckets. +// // Sets the cors configuration for your bucket. If the configuration exists, // Amazon S3 replaces it. // @@ -7640,21 +8404,21 @@ func (c *S3) PutBucketEncryptionRequest(input *PutBucketEncryptionInput) (req *r // PutBucketEncryption API operation for Amazon Simple Storage Service. // +// This operation is not supported by directory buckets. +// // This action uses the encryption subresource to configure default encryption // and Amazon S3 Bucket Keys for an existing bucket. // // By default, all buckets have a default encryption configuration that uses // server-side encryption with Amazon S3 managed keys (SSE-S3). You can optionally // configure default encryption for a bucket by using server-side encryption -// with Key Management Service (KMS) keys (SSE-KMS), dual-layer server-side -// encryption with Amazon Web Services KMS keys (DSSE-KMS), or server-side encryption -// with customer-provided keys (SSE-C). If you specify default encryption by -// using SSE-KMS, you can also configure Amazon S3 Bucket Keys. For information -// about bucket default encryption, see Amazon S3 bucket default encryption -// (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html) -// in the Amazon S3 User Guide. For more information about S3 Bucket Keys, see -// Amazon S3 Bucket Keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-key.html) -// in the Amazon S3 User Guide. +// with Key Management Service (KMS) keys (SSE-KMS) or dual-layer server-side +// encryption with Amazon Web Services KMS keys (DSSE-KMS). If you specify default +// encryption by using SSE-KMS, you can also configure Amazon S3 Bucket Keys +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-key.html). If you +// use PutBucketEncryption to set your default bucket encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html) +// to SSE-KMS, you should verify that your KMS key ID is correct. Amazon S3 +// does not validate the KMS key ID provided in PutBucketEncryption requests. // // This action requires Amazon Web Services Signature Version 4. For more information, // see Authenticating Requests (Amazon Web Services Signature Version 4) (https://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html). @@ -7744,6 +8508,8 @@ func (c *S3) PutBucketIntelligentTieringConfigurationRequest(input *PutBucketInt // PutBucketIntelligentTieringConfiguration API operation for Amazon Simple Storage Service. // +// This operation is not supported by directory buckets. +// // Puts a S3 Intelligent-Tiering configuration to the specified bucket. You // can have up to 1,000 S3 Intelligent-Tiering configurations per bucket. // @@ -7869,6 +8635,8 @@ func (c *S3) PutBucketInventoryConfigurationRequest(input *PutBucketInventoryCon // PutBucketInventoryConfiguration API operation for Amazon Simple Storage Service. // +// This operation is not supported by directory buckets. +// // This implementation of the PUT action adds an inventory configuration (identified // by the inventory ID) to the bucket. You can have up to 1,000 inventory configurations // per bucket. @@ -8023,6 +8791,8 @@ func (c *S3) PutBucketLifecycleRequest(input *PutBucketLifecycleInput) (req *req // PutBucketLifecycle API operation for Amazon Simple Storage Service. // +// This operation is not supported by directory buckets. +// // For an updated version of this API, see PutBucketLifecycleConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html). // This version has been deprecated. Existing lifecycle configurations will // work. For new lifecycle configurations, use the updated API. @@ -8152,6 +8922,8 @@ func (c *S3) PutBucketLifecycleConfigurationRequest(input *PutBucketLifecycleCon // PutBucketLifecycleConfiguration API operation for Amazon Simple Storage Service. // +// This operation is not supported by directory buckets. +// // Creates a new lifecycle configuration for the bucket or replaces an existing // lifecycle configuration. Keep in mind that this will overwrite an existing // lifecycle configuration, so if you want to retain any configuration details, @@ -8295,6 +9067,8 @@ func (c *S3) PutBucketLoggingRequest(input *PutBucketLoggingInput) (req *request // PutBucketLogging API operation for Amazon Simple Storage Service. // +// This operation is not supported by directory buckets. +// // Set the logging parameters for a bucket and to specify permissions for who // can view and modify the logging parameters. All logs are saved to buckets // in the same Amazon Web Services Region as the source bucket. To set the logging @@ -8422,6 +9196,8 @@ func (c *S3) PutBucketMetricsConfigurationRequest(input *PutBucketMetricsConfigu // PutBucketMetricsConfiguration API operation for Amazon Simple Storage Service. // +// This operation is not supported by directory buckets. +// // Sets a metrics configuration (specified by the metrics configuration ID) // for the bucket. You can have up to 1,000 metrics configurations per bucket. // If you're updating an existing metrics configuration, note that this is a @@ -8532,6 +9308,8 @@ func (c *S3) PutBucketNotificationRequest(input *PutBucketNotificationInput) (re // PutBucketNotification API operation for Amazon Simple Storage Service. // +// This operation is not supported by directory buckets. +// // No longer used, see the PutBucketNotificationConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketNotificationConfiguration.html) // operation. // @@ -8611,6 +9389,8 @@ func (c *S3) PutBucketNotificationConfigurationRequest(input *PutBucketNotificat // PutBucketNotificationConfiguration API operation for Amazon Simple Storage Service. // +// This operation is not supported by directory buckets. +// // Enables notifications of specified events for a bucket. For more information // about event notifications, see Configuring Event Notifications (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html). // @@ -8740,6 +9520,8 @@ func (c *S3) PutBucketOwnershipControlsRequest(input *PutBucketOwnershipControls // PutBucketOwnershipControls API operation for Amazon Simple Storage Service. // +// This operation is not supported by directory buckets. +// // Creates or modifies OwnershipControls for an Amazon S3 bucket. To use this // operation, you must have the s3:PutBucketOwnershipControls permission. For // more information about Amazon S3 permissions, see Specifying permissions @@ -8830,11 +9612,21 @@ func (c *S3) PutBucketPolicyRequest(input *PutBucketPolicyInput) (req *request.R // PutBucketPolicy API operation for Amazon Simple Storage Service. // -// Applies an Amazon S3 bucket policy to an Amazon S3 bucket. If you are using -// an identity other than the root user of the Amazon Web Services account that -// owns the bucket, the calling identity must have the PutBucketPolicy permissions -// on the specified bucket and belong to the bucket owner's account in order -// to use this operation. +// Applies an Amazon S3 bucket policy to an Amazon S3 bucket. +// +// Directory buckets - For directory buckets, you must make requests for this +// API operation to the Regional endpoint. These endpoints support path-style +// requests in the format https://s3express-control.region_code.amazonaws.com/bucket-name +// . Virtual-hosted-style requests aren't supported. For more information, see +// Regional and Zonal endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) +// in the Amazon S3 User Guide. +// +// # Permissions +// +// If you are using an identity other than the root user of the Amazon Web Services +// account that owns the bucket, the calling identity must both have the PutBucketPolicy +// permissions on the specified bucket and belong to the bucket owner's account +// in order to use this operation. // // If you don't have PutBucketPolicy permissions, Amazon S3 returns a 403 Access // Denied error. If you have the correct permissions, but you're not using an @@ -8849,7 +9641,33 @@ func (c *S3) PutBucketPolicyRequest(input *PutBucketPolicyInput) (req *request.R // these API actions by VPC endpoint policies and Amazon Web Services Organizations // policies. // -// For more information, see Bucket policy examples (https://docs.aws.amazon.com/AmazonS3/latest/userguide/example-bucket-policies.html). +// - General purpose bucket permissions - The s3:PutBucketPolicy permission +// is required in a policy. For more information about general purpose buckets +// bucket policies, see Using Bucket Policies and User Policies (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-iam-policies.html) +// in the Amazon S3 User Guide. +// +// - Directory bucket permissions - To grant access to this API operation, +// you must have the s3express:PutBucketPolicy permission in an IAM identity-based +// policy instead of a bucket policy. Cross-account access to this API operation +// isn't supported. This operation can only be performed by the Amazon Web +// Services account that owns the resource. For more information about directory +// bucket policies and permissions, see Amazon Web Services Identity and +// Access Management (IAM) for S3 Express One Zone (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam.html) +// in the Amazon S3 User Guide. +// +// # Example bucket policies +// +// General purpose buckets example bucket policies - See Bucket policy examples +// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/example-bucket-policies.html) +// in the Amazon S3 User Guide. +// +// Directory bucket example bucket policies - See Example bucket policies for +// S3 Express One Zone (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam-example-bucket-policies.html) +// in the Amazon S3 User Guide. +// +// # HTTP Host header syntax +// +// Directory buckets - The HTTP Host header syntax is s3express-control.region.amazonaws.com. // // The following operations are related to PutBucketPolicy: // @@ -8933,6 +9751,8 @@ func (c *S3) PutBucketReplicationRequest(input *PutBucketReplicationInput) (req // PutBucketReplication API operation for Amazon Simple Storage Service. // +// This operation is not supported by directory buckets. +// // Creates a replication configuration or replaces an existing one. For more // information, see Replication (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication.html) // in the Amazon S3 User Guide. @@ -8941,6 +9761,9 @@ func (c *S3) PutBucketReplicationRequest(input *PutBucketReplicationInput) (req // configuration, you provide the name of the destination bucket or buckets // where you want Amazon S3 to replicate objects, the IAM role that Amazon S3 // can assume to replicate objects on your behalf, and other relevant information. +// You can invoke this request for a specific Amazon Web Services Region by +// using the aws:RequestedRegion (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_condition-keys.html#condition-keys-requestedregion) +// condition key. // // A replication configuration must include at least one rule, and can contain // a maximum of 1,000. Each rule identifies a subset of objects to replicate @@ -9069,6 +9892,8 @@ func (c *S3) PutBucketRequestPaymentRequest(input *PutBucketRequestPaymentInput) // PutBucketRequestPayment API operation for Amazon Simple Storage Service. // +// This operation is not supported by directory buckets. +// // Sets the request payment configuration for a bucket. By default, the bucket // owner pays for downloads from the bucket. This configuration parameter enables // the bucket owner (only) to specify that the person requesting the download @@ -9157,6 +9982,8 @@ func (c *S3) PutBucketTaggingRequest(input *PutBucketTaggingInput) (req *request // PutBucketTagging API operation for Amazon Simple Storage Service. // +// This operation is not supported by directory buckets. +// // Sets the tags for a bucket. // // Use tags to organize your Amazon Web Services bill to reflect your own cost @@ -9167,7 +9994,7 @@ func (c *S3) PutBucketTaggingRequest(input *PutBucketTaggingInput) (req *request // name, and then organize your billing information to see the total cost of // that application across several services. For more information, see Cost // Allocation and Tagging (https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/cost-alloc-tags.html) -// and Using Cost Allocation in Amazon S3 Bucket Tags (https://docs.aws.amazon.com/AmazonS3/latest/dev/CostAllocTagging.html). +// and Using Cost Allocation in Amazon S3 Bucket Tags (https://docs.aws.amazon.com/AmazonS3/latest/userguide/CostAllocTagging.html). // // When this operation sets the tags for a bucket, it will overwrite any current // tags the bucket already has. You cannot use this operation to add tags to @@ -9179,22 +10006,20 @@ func (c *S3) PutBucketTaggingRequest(input *PutBucketTaggingInput) (req *request // Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) // and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html). // -// PutBucketTagging has the following special errors: +// PutBucketTagging has the following special errors. For more Amazon S3 errors +// see, Error Responses (https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html). // -// - Error code: InvalidTagError Description: The tag provided was not a -// valid tag. This error can occur if the tag did not pass input validation. -// For information about tag restrictions, see User-Defined Tag Restrictions -// (https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/allocation-tag-restrictions.html) -// and Amazon Web Services-Generated Cost Allocation Tag Restrictions (https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/aws-tag-restrictions.html). +// - InvalidTag - The tag provided was not a valid tag. This error can occur +// if the tag did not pass input validation. For more information, see Using +// Cost Allocation in Amazon S3 Bucket Tags (https://docs.aws.amazon.com/AmazonS3/latest/userguide/CostAllocTagging.html). // -// - Error code: MalformedXMLError Description: The XML provided does not -// match the schema. +// - MalformedXML - The XML provided does not match the schema. // -// - Error code: OperationAbortedError Description: A conflicting conditional -// action is currently in progress against this resource. Please try again. +// - OperationAborted - A conflicting conditional action is currently in +// progress against this resource. Please try again. // -// - Error code: InternalError Description: The service was unable to apply -// the provided tag to the bucket. +// - InternalError - The service was unable to apply the provided tag to +// the bucket. // // The following operations are related to PutBucketTagging: // @@ -9278,6 +10103,8 @@ func (c *S3) PutBucketVersioningRequest(input *PutBucketVersioningInput) (req *r // PutBucketVersioning API operation for Amazon Simple Storage Service. // +// This operation is not supported by directory buckets. +// // Sets the versioning state of an existing bucket. // // You can set the versioning state with one of the following values: @@ -9389,6 +10216,8 @@ func (c *S3) PutBucketWebsiteRequest(input *PutBucketWebsiteInput) (req *request // PutBucketWebsite API operation for Amazon Simple Storage Service. // +// This operation is not supported by directory buckets. +// // Sets the configuration of the website that is specified in the website subresource. // To configure a bucket as a website, you can add this subresource on the bucket // with website configuration information such as the file name of the index @@ -9456,6 +10285,8 @@ func (c *S3) PutBucketWebsiteRequest(input *PutBucketWebsiteInput) (req *request // more information, see Configuring an Object Redirect (https://docs.aws.amazon.com/AmazonS3/latest/dev/how-to-page-redirect.html) // in the Amazon S3 User Guide. // +// The maximum request length is limited to 128 KB. +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -9527,87 +10358,83 @@ func (c *S3) PutObjectRequest(input *PutObjectInput) (req *request.Request, outp // PutObject API operation for Amazon Simple Storage Service. // -// Adds an object to a bucket. You must have WRITE permissions on a bucket to -// add an object to it. -// -// Amazon S3 never adds partial objects; if you receive a success response, -// Amazon S3 added the entire object to the bucket. You cannot use PutObject -// to only update a single piece of metadata for an existing object. You must -// put the entire object with updated metadata if you want to update some values. +// Adds an object to a bucket. // -// Amazon S3 is a distributed system. If it receives multiple write requests -// for the same object simultaneously, it overwrites all but the last object -// written. To prevent objects from being deleted or overwritten, you can use -// Amazon S3 Object Lock (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-lock.html). +// - Amazon S3 never adds partial objects; if you receive a success response, +// Amazon S3 added the entire object to the bucket. You cannot use PutObject +// to only update a single piece of metadata for an existing object. You +// must put the entire object with updated metadata if you want to update +// some values. // -// To ensure that data is not corrupted traversing the network, use the Content-MD5 -// header. When you use this header, Amazon S3 checks the object against the -// provided MD5 value and, if they do not match, returns an error. Additionally, -// you can calculate the MD5 while putting an object to Amazon S3 and compare -// the returned ETag to the calculated MD5 value. +// - If your bucket uses the bucket owner enforced setting for Object Ownership, +// ACLs are disabled and no longer affect permissions. All objects written +// to the bucket by any account will be owned by the bucket owner. // -// - To successfully complete the PutObject request, you must have the s3:PutObject -// in your IAM permissions. -// -// - To successfully change the objects acl of your PutObject request, you -// must have the s3:PutObjectAcl in your IAM permissions. -// -// - To successfully set the tag-set with your PutObject request, you must -// have the s3:PutObjectTagging in your IAM permissions. -// -// - The Content-MD5 header is required for any request to upload an object -// with a retention period configured using Amazon S3 Object Lock. For more -// information about Amazon S3 Object Lock, see Amazon S3 Object Lock Overview -// (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock-overview.html) +// - Directory buckets - For directory buckets, you must make requests for +// this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style +// requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name +// . Path-style requests are not supported. For more information, see Regional +// and Zonal endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) // in the Amazon S3 User Guide. // -// You have four mutually exclusive options to protect data using server-side -// encryption in Amazon S3, depending on how you choose to manage the encryption -// keys. Specifically, the encryption key options are Amazon S3 managed keys -// (SSE-S3), Amazon Web Services KMS keys (SSE-KMS or DSSE-KMS), and customer-provided -// keys (SSE-C). Amazon S3 encrypts data with server-side encryption by using -// Amazon S3 managed keys (SSE-S3) by default. You can optionally tell Amazon -// S3 to encrypt data at rest by using server-side encryption with other key -// options. For more information, see Using Server-Side Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingServerSideEncryption.html). -// -// When adding a new object, you can use headers to grant ACL-based permissions -// to individual Amazon Web Services accounts or to predefined groups defined -// by Amazon S3. These permissions are then added to the ACL on the object. -// By default, all objects are private. Only the owner has full access control. -// For more information, see Access Control List (ACL) Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html) -// and Managing ACLs Using the REST API (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-using-rest-api.html). -// -// If the bucket that you're uploading objects to uses the bucket owner enforced -// setting for S3 Object Ownership, ACLs are disabled and no longer affect permissions. -// Buckets that use this setting only accept PUT requests that don't specify -// an ACL or PUT requests that specify bucket owner full control ACLs, such -// as the bucket-owner-full-control canned ACL or an equivalent form of this -// ACL expressed in the XML format. PUT requests that contain other ACLs (for -// example, custom grants to certain Amazon Web Services accounts) fail and -// return a 400 error with the error code AccessControlListNotSupported. For -// more information, see Controlling ownership of objects and disabling ACLs -// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html) -// in the Amazon S3 User Guide. -// -// If your bucket uses the bucket owner enforced setting for Object Ownership, -// all objects written to the bucket by any account will be owned by the bucket -// owner. -// -// By default, Amazon S3 uses the STANDARD Storage Class to store newly created -// objects. The STANDARD storage class provides high durability and high availability. -// Depending on performance needs, you can specify a different Storage Class. -// Amazon S3 on Outposts only uses the OUTPOSTS Storage Class. For more information, -// see Storage Classes (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html) -// in the Amazon S3 User Guide. -// -// If you enable versioning for a bucket, Amazon S3 automatically generates -// a unique version ID for the object being stored. Amazon S3 returns this ID -// in the response. When you enable versioning for a bucket, if Amazon S3 receives -// multiple write requests for the same object simultaneously, it stores all -// of the objects. For more information about versioning, see Adding Objects -// to Versioning-Enabled Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/AddingObjectstoVersioningEnabledBuckets.html). -// For information about returning the versioning state of a bucket, see GetBucketVersioning -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketVersioning.html). +// Amazon S3 is a distributed system. If it receives multiple write requests +// for the same object simultaneously, it overwrites all but the last object +// written. However, Amazon S3 provides features that can modify this behavior: +// +// - S3 Object Lock - To prevent objects from being deleted or overwritten, +// you can use Amazon S3 Object Lock (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-lock.html) +// in the Amazon S3 User Guide. This functionality is not supported for directory +// buckets. +// +// - S3 Versioning - When you enable versioning for a bucket, if Amazon S3 +// receives multiple write requests for the same object simultaneously, it +// stores all versions of the objects. For each write request that is made +// to the same object, Amazon S3 automatically generates a unique version +// ID of that object being stored in Amazon S3. You can retrieve, replace, +// or delete any version of the object. For more information about versioning, +// see Adding Objects to Versioning-Enabled Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/AddingObjectstoVersioningEnabledBuckets.html) +// in the Amazon S3 User Guide. For information about returning the versioning +// state of a bucket, see GetBucketVersioning (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketVersioning.html). +// This functionality is not supported for directory buckets. +// +// Permissions +// +// - General purpose bucket permissions - The following permissions are required +// in your policies when your PutObject request includes specific headers. +// s3:PutObject - To successfully complete the PutObject request, you must +// always have the s3:PutObject permission on a bucket to add an object to +// it. s3:PutObjectAcl - To successfully change the objects ACL of your PutObject +// request, you must have the s3:PutObjectAcl. s3:PutObjectTagging - To successfully +// set the tag-set with your PutObject request, you must have the s3:PutObjectTagging. +// +// - Directory bucket permissions - To grant access to this API operation +// on a directory bucket, we recommend that you use the CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) +// API operation for session-based authorization. Specifically, you grant +// the s3express:CreateSession permission to the directory bucket in a bucket +// policy or an IAM identity-based policy. Then, you make the CreateSession +// API call on the bucket to obtain a session token. With the session token +// in your request header, you can make API requests to this operation. After +// the session token expires, you make another CreateSession API call to +// generate a new session token for use. Amazon Web Services CLI or SDKs +// create session and refresh the session token automatically to avoid service +// interruptions when a session expires. For more information about authorization, +// see CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html). +// +// Data integrity with Content-MD5 +// +// - General purpose bucket - To ensure that data is not corrupted traversing +// the network, use the Content-MD5 header. When you use this header, Amazon +// S3 checks the object against the provided MD5 value and, if they do not +// match, Amazon S3 returns an error. Alternatively, when the object's ETag +// is its MD5 digest, you can calculate the MD5 while putting the object +// to Amazon S3 and compare the returned ETag to the calculated MD5 value. +// +// - Directory bucket - This functionality is not supported for directory +// buckets. +// +// # HTTP Host header syntax +// +// Directory buckets - The HTTP Host header syntax is Bucket_name.s3express-az_id.region.amazonaws.com. // // For more information about related Amazon S3 APIs, see the following: // @@ -9690,13 +10517,15 @@ func (c *S3) PutObjectAclRequest(input *PutObjectAclInput) (req *request.Request // PutObjectAcl API operation for Amazon Simple Storage Service. // +// This operation is not supported by directory buckets. +// // Uses the acl subresource to set the access control list (ACL) permissions -// for a new or existing object in an S3 bucket. You must have WRITE_ACP permission -// to set the ACL of an object. For more information, see What permissions can -// I grant? (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#permissions) +// for a new or existing object in an S3 bucket. You must have the WRITE_ACP +// permission to set the ACL of an object. For more information, see What permissions +// can I grant? (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#permissions) // in the Amazon S3 User Guide. // -// This action is not supported by Amazon S3 on Outposts. +// This functionality is not supported for Amazon S3 on Outposts. // // Depending on your application needs, you can choose to set the ACL on an // object using either the request body or the headers. For example, if you @@ -9865,10 +10694,12 @@ func (c *S3) PutObjectLegalHoldRequest(input *PutObjectLegalHoldInput) (req *req // PutObjectLegalHold API operation for Amazon Simple Storage Service. // +// This operation is not supported by directory buckets. +// // Applies a legal hold configuration to the specified object. For more information, // see Locking Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html). // -// This action is not supported by Amazon S3 on Outposts. +// This functionality is not supported for Amazon S3 on Outposts. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -9945,6 +10776,8 @@ func (c *S3) PutObjectLockConfigurationRequest(input *PutObjectLockConfiguration // PutObjectLockConfiguration API operation for Amazon Simple Storage Service. // +// This operation is not supported by directory buckets. +// // Places an Object Lock configuration on the specified bucket. The rule specified // in the Object Lock configuration will be applied by default to every new // object placed in the specified bucket. For more information, see Locking @@ -9955,8 +10788,8 @@ func (c *S3) PutObjectLockConfigurationRequest(input *PutObjectLockConfiguration // - The DefaultRetention period can be either Days or Years but you must // select one. You cannot specify Days and Years at the same time. // -// - You can only enable Object Lock for new buckets. If you want to turn -// on Object Lock for an existing bucket, contact Amazon Web Services Support. +// - You can enable Object Lock for new or existing buckets. For more information, +// see Configuring Object Lock (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-lock-configure.html). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -10033,13 +10866,15 @@ func (c *S3) PutObjectRetentionRequest(input *PutObjectRetentionInput) (req *req // PutObjectRetention API operation for Amazon Simple Storage Service. // +// This operation is not supported by directory buckets. +// // Places an Object Retention configuration on an object. For more information, // see Locking Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html). // Users or accounts require the s3:PutObjectRetention permission in order to // place an Object Retention configuration on objects. Bypassing a Governance // Retention configuration requires the s3:BypassGovernanceRetention permission. // -// This action is not supported by Amazon S3 on Outposts. +// This functionality is not supported for Amazon S3 on Outposts. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -10116,12 +10951,15 @@ func (c *S3) PutObjectTaggingRequest(input *PutObjectTaggingInput) (req *request // PutObjectTagging API operation for Amazon Simple Storage Service. // -// Sets the supplied tag-set to an object that already exists in a bucket. +// This operation is not supported by directory buckets. // -// A tag is a key-value pair. You can associate tags with an object by sending -// a PUT request against the tagging subresource that is associated with the -// object. You can retrieve tags by sending a GET request. For more information, -// see GetObjectTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectTagging.html). +// Sets the supplied tag-set to an object that already exists in a bucket. A +// tag is a key-value pair. For more information, see Object Tagging (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-tagging.html). +// +// You can associate tags with an object by sending a PUT request against the +// tagging subresource that is associated with the object. You can retrieve +// tags by sending a GET request. For more information, see GetObjectTagging +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectTagging.html). // // For tagging-related restrictions related to characters and encodings, see // Tag Restrictions (https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/allocation-tag-restrictions.html). @@ -10134,22 +10972,20 @@ func (c *S3) PutObjectTaggingRequest(input *PutObjectTaggingInput) (req *request // To put tags of any other version, use the versionId query parameter. You // also need permission for the s3:PutObjectVersionTagging action. // -// For information about the Amazon S3 object tagging feature, see Object Tagging -// (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-tagging.html). -// -// PutObjectTagging has the following special errors: +// PutObjectTagging has the following special errors. For more Amazon S3 errors +// see, Error Responses (https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html). // -// - Code: InvalidTagError Cause: The tag provided was not a valid tag. This -// error can occur if the tag did not pass input validation. For more information, -// see Object Tagging (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-tagging.html). +// - InvalidTag - The tag provided was not a valid tag. This error can occur +// if the tag did not pass input validation. For more information, see Object +// Tagging (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-tagging.html). // -// - Code: MalformedXMLError Cause: The XML provided does not match the schema. +// - MalformedXML - The XML provided does not match the schema. // -// - Code: OperationAbortedError Cause: A conflicting conditional action -// is currently in progress against this resource. Please try again. +// - OperationAborted - A conflicting conditional action is currently in +// progress against this resource. Please try again. // -// - Code: InternalError Cause: The service was unable to apply the provided -// tag to the object. +// - InternalError - The service was unable to apply the provided tag to +// the object. // // The following operations are related to PutObjectTagging: // @@ -10233,6 +11069,8 @@ func (c *S3) PutPublicAccessBlockRequest(input *PutPublicAccessBlockInput) (req // PutPublicAccessBlock API operation for Amazon Simple Storage Service. // +// This operation is not supported by directory buckets. +// // Creates or modifies the PublicAccessBlock configuration for an Amazon S3 // bucket. To use this operation, you must have the s3:PutBucketPublicAccessBlock // permission. For more information about Amazon S3 permissions, see Specifying @@ -10329,9 +11167,11 @@ func (c *S3) RestoreObjectRequest(input *RestoreObjectInput) (req *request.Reque // RestoreObject API operation for Amazon Simple Storage Service. // +// This operation is not supported by directory buckets. +// // # Restores an archived copy of an object back into Amazon S3 // -// This action is not supported by Amazon S3 on Outposts. +// This functionality is not supported for Amazon S3 on Outposts. // // This action performs the following types of requests: // @@ -10591,6 +11431,8 @@ func (c *S3) SelectObjectContentRequest(input *SelectObjectContentInput) (req *r // SelectObjectContent API operation for Amazon Simple Storage Service. // +// This operation is not supported by directory buckets. +// // This action filters the contents of an Amazon S3 object based on a simple // structured query language (SQL) statement. In the request, along with the // SQL expression, you must also specify a data serialization format (JSON, @@ -10599,7 +11441,7 @@ func (c *S3) SelectObjectContentRequest(input *SelectObjectContentInput) (req *r // SQL expression. You must also specify the data serialization format for the // response. // -// This action is not supported by Amazon S3 on Outposts. +// This functionality is not supported for Amazon S3 on Outposts. // // For more information about Amazon S3 Select, see Selecting Content from Objects // (https://docs.aws.amazon.com/AmazonS3/latest/dev/selecting-content-from-objects.html) @@ -10608,7 +11450,7 @@ func (c *S3) SelectObjectContentRequest(input *SelectObjectContentInput) (req *r // // # Permissions // -// You must have s3:GetObject permission for this operation. Amazon S3 Select +// You must have the s3:GetObject permission for this operation. Amazon S3 Select // does not support anonymous access. For more information about permissions, // see Specifying Permissions in a Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html) // in the Amazon S3 User Guide. @@ -10921,15 +11763,15 @@ func (c *S3) UploadPartRequest(input *UploadPartInput) (req *request.Request, ou // // Uploads a part in a multipart upload. // -// In this operation, you provide part data in your request. However, you have -// an option to specify your existing Amazon S3 object as a data source for -// the part you are uploading. To upload a part from an existing object, you -// use the UploadPartCopy (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPartCopy.html) +// In this operation, you provide new data as a part of an object in your request. +// However, you have an option to specify your existing Amazon S3 object as +// a data source for the part you are uploading. To upload a part from an existing +// object, you use the UploadPartCopy (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPartCopy.html) // operation. // // You must initiate a multipart upload (see CreateMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html)) // before you can upload any part. In response to your initiate request, Amazon -// S3 returns an upload ID, a unique identifier, that you must include in your +// S3 returns an upload ID, a unique identifier that you must include in your // upload part request. // // Part numbers can be any number from 1 to 10,000, inclusive. A part number @@ -10941,18 +11783,8 @@ func (c *S3) UploadPartRequest(input *UploadPartInput) (req *request.Request, ou // upload specifications, see Multipart upload limits (https://docs.aws.amazon.com/AmazonS3/latest/userguide/qfacts.html) // in the Amazon S3 User Guide. // -// To ensure that data is not corrupted when traversing the network, specify -// the Content-MD5 header in the upload part request. Amazon S3 checks the part -// data against the provided MD5 value. If they do not match, Amazon S3 returns -// an error. -// -// If the upload request is signed with Signature Version 4, then Amazon Web -// Services S3 uses the x-amz-content-sha256 header as a checksum instead of -// Content-MD5. For more information see Authenticating Requests: Using the -// Authorization Header (Amazon Web Services Signature Version 4) (https://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-auth-using-authorization-header.html). -// -// Note: After you initiate multipart upload and upload one or more parts, you -// must either complete or abort multipart upload in order to stop getting charged +// After you initiate multipart upload and upload one or more parts, you must +// either complete or abort multipart upload in order to stop getting charged // for storage of the uploaded parts. Only after you either complete or abort // multipart upload, Amazon S3 frees up the parts storage and stops charging // you for the parts storage. @@ -10961,50 +11793,88 @@ func (c *S3) UploadPartRequest(input *UploadPartInput) (req *request.Request, ou // (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html) in the // Amazon S3 User Guide . // -// For information on the permissions required to use the multipart upload API, -// go to Multipart Upload and Permissions (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html) -// in the Amazon S3 User Guide. -// -// Server-side encryption is for data encryption at rest. Amazon S3 encrypts -// your data as it writes it to disks in its data centers and decrypts it when -// you access it. You have three mutually exclusive options to protect data -// using server-side encryption in Amazon S3, depending on how you choose to -// manage the encryption keys. Specifically, the encryption key options are -// Amazon S3 managed keys (SSE-S3), Amazon Web Services KMS keys (SSE-KMS), -// and Customer-Provided Keys (SSE-C). Amazon S3 encrypts data with server-side -// encryption using Amazon S3 managed keys (SSE-S3) by default. You can optionally -// tell Amazon S3 to encrypt data at rest using server-side encryption with -// other key options. The option you use depends on whether you want to use -// KMS keys (SSE-KMS) or provide your own encryption key (SSE-C). If you choose -// to provide your own encryption key, the request headers you provide in the -// request must match the headers you used in the request to initiate the upload -// by using CreateMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html). -// For more information, go to Using Server-Side Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingServerSideEncryption.html) +// Directory buckets - For directory buckets, you must make requests for this +// API operation to the Zonal endpoint. These endpoints support virtual-hosted-style +// requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name +// . Path-style requests are not supported. For more information, see Regional +// and Zonal endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) // in the Amazon S3 User Guide. // -// Server-side encryption is supported by the S3 Multipart Upload actions. Unless -// you are using a customer-provided encryption key (SSE-C), you don't need -// to specify the encryption parameters in each UploadPart request. Instead, -// you only need to specify the server-side encryption parameters in the initial -// Initiate Multipart request. For more information, see CreateMultipartUpload -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html). +// Permissions // -// If you requested server-side encryption using a customer-provided encryption -// key (SSE-C) in your initiate multipart upload request, you must provide identical -// encryption information in each part upload using the following headers. +// - General purpose bucket permissions - For information on the permissions +// required to use the multipart upload API, see Multipart Upload and Permissions +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html) +// in the Amazon S3 User Guide. // -// - x-amz-server-side-encryption-customer-algorithm +// - Directory bucket permissions - To grant access to this API operation +// on a directory bucket, we recommend that you use the CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) +// API operation for session-based authorization. Specifically, you grant +// the s3express:CreateSession permission to the directory bucket in a bucket +// policy or an IAM identity-based policy. Then, you make the CreateSession +// API call on the bucket to obtain a session token. With the session token +// in your request header, you can make API requests to this operation. After +// the session token expires, you make another CreateSession API call to +// generate a new session token for use. Amazon Web Services CLI or SDKs +// create session and refresh the session token automatically to avoid service +// interruptions when a session expires. For more information about authorization, +// see CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html). +// +// # Data integrity +// +// General purpose bucket - To ensure that data is not corrupted traversing +// the network, specify the Content-MD5 header in the upload part request. Amazon +// S3 checks the part data against the provided MD5 value. If they do not match, +// Amazon S3 returns an error. If the upload request is signed with Signature +// Version 4, then Amazon Web Services S3 uses the x-amz-content-sha256 header +// as a checksum instead of Content-MD5. For more information see Authenticating +// Requests: Using the Authorization Header (Amazon Web Services Signature Version +// 4) (https://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-auth-using-authorization-header.html). +// +// Directory buckets - MD5 is not supported by directory buckets. You can use +// checksum algorithms to check object integrity. +// +// Encryption +// +// - General purpose bucket - Server-side encryption is for data encryption +// at rest. Amazon S3 encrypts your data as it writes it to disks in its +// data centers and decrypts it when you access it. You have mutually exclusive +// options to protect data using server-side encryption in Amazon S3, depending +// on how you choose to manage the encryption keys. Specifically, the encryption +// key options are Amazon S3 managed keys (SSE-S3), Amazon Web Services KMS +// keys (SSE-KMS), and Customer-Provided Keys (SSE-C). Amazon S3 encrypts +// data with server-side encryption using Amazon S3 managed keys (SSE-S3) +// by default. You can optionally tell Amazon S3 to encrypt data at rest +// using server-side encryption with other key options. The option you use +// depends on whether you want to use KMS keys (SSE-KMS) or provide your +// own encryption key (SSE-C). Server-side encryption is supported by the +// S3 Multipart Upload operations. Unless you are using a customer-provided +// encryption key (SSE-C), you don't need to specify the encryption parameters +// in each UploadPart request. Instead, you only need to specify the server-side +// encryption parameters in the initial Initiate Multipart request. For more +// information, see CreateMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html). +// If you request server-side encryption using a customer-provided encryption +// key (SSE-C) in your initiate multipart upload request, you must provide +// identical encryption information in each part upload using the following +// request headers. x-amz-server-side-encryption-customer-algorithm x-amz-server-side-encryption-customer-key +// x-amz-server-side-encryption-customer-key-MD5 +// +// - Directory bucket - For directory buckets, only server-side encryption +// with Amazon S3 managed keys (SSE-S3) (AES256) is supported. +// +// For more information, see Using Server-Side Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingServerSideEncryption.html) +// in the Amazon S3 User Guide. // -// - x-amz-server-side-encryption-customer-key +// Special errors // -// - x-amz-server-side-encryption-customer-key-MD5 +// - Error Code: NoSuchUpload Description: The specified multipart upload +// does not exist. The upload ID might be invalid, or the multipart upload +// might have been aborted or completed. HTTP Status Code: 404 Not Found +// SOAP Fault Code Prefix: Client // -// UploadPart has the following special errors: +// # HTTP Host header syntax // -// - Code: NoSuchUpload Cause: The specified multipart upload does not exist. -// The upload ID might be invalid, or the multipart upload might have been -// aborted or completed. HTTP Status Code: 404 Not Found SOAP Fault Code -// Prefix: Client +// Directory buckets - The HTTP Host header syntax is Bucket_name.s3express-az_id.region.amazonaws.com. // // The following operations are related to UploadPart: // @@ -11089,81 +11959,105 @@ func (c *S3) UploadPartCopyRequest(input *UploadPartCopyInput) (req *request.Req // UploadPartCopy API operation for Amazon Simple Storage Service. // -// Uploads a part by copying data from an existing object as data source. You -// specify the data source by adding the request header x-amz-copy-source in -// your request and a byte range by adding the request header x-amz-copy-source-range +// Uploads a part by copying data from an existing object as data source. To +// specify the data source, you add the request header x-amz-copy-source in +// your request. To specify a byte range, you add the request header x-amz-copy-source-range // in your request. // // For information about maximum and minimum part sizes and other multipart // upload specifications, see Multipart upload limits (https://docs.aws.amazon.com/AmazonS3/latest/userguide/qfacts.html) // in the Amazon S3 User Guide. // -// Instead of using an existing object as part data, you might use the UploadPart -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) action -// and provide data in your request. +// Instead of copying data from an existing object as part data, you might use +// the UploadPart (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) +// action to upload new data as a part of an object in your request. // // You must initiate a multipart upload before you can upload any part. In response -// to your initiate request. Amazon S3 returns a unique identifier, the upload -// ID, that you must include in your upload part request. +// to your initiate request, Amazon S3 returns the upload ID, a unique identifier +// that you must include in your upload part request. +// +// For conceptual information about multipart uploads, see Uploading Objects +// Using Multipart Upload (https://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html) +// in the Amazon S3 User Guide. For information about copying objects using +// a single atomic action vs. a multipart upload, see Operations on Objects +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectOperations.html) in +// the Amazon S3 User Guide. // -// For more information about using the UploadPartCopy operation, see the following: +// Directory buckets - For directory buckets, you must make requests for this +// API operation to the Zonal endpoint. These endpoints support virtual-hosted-style +// requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name +// . Path-style requests are not supported. For more information, see Regional +// and Zonal endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) +// in the Amazon S3 User Guide. // -// - For conceptual information about multipart uploads, see Uploading Objects -// Using Multipart Upload (https://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html) -// in the Amazon S3 User Guide. +// # Authentication and authorization // -// - For information about permissions required to use the multipart upload -// API, see Multipart Upload and Permissions (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html) -// in the Amazon S3 User Guide. +// All UploadPartCopy requests must be authenticated and signed by using IAM +// credentials (access key ID and secret access key for the IAM identities). +// All headers with the x-amz- prefix, including x-amz-copy-source, must be +// signed. For more information, see REST Authentication (https://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html). // -// - For information about copying objects using a single atomic action vs. -// a multipart upload, see Operations on Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectOperations.html) -// in the Amazon S3 User Guide. +// Directory buckets - You must use IAM credentials to authenticate and authorize +// your access to the UploadPartCopy API operation, instead of using the temporary +// security credentials through the CreateSession API operation. // -// - For information about using server-side encryption with customer-provided -// encryption keys with the UploadPartCopy operation, see CopyObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html) -// and UploadPart (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html). +// Amazon Web Services CLI or SDKs handles authentication and authorization +// on your behalf. // -// Note the following additional considerations about the request headers x-amz-copy-source-if-match, -// x-amz-copy-source-if-none-match, x-amz-copy-source-if-unmodified-since, and -// x-amz-copy-source-if-modified-since: +// # Permissions // -// - Consideration 1 - If both of the x-amz-copy-source-if-match and x-amz-copy-source-if-unmodified-since -// headers are present in the request as follows: x-amz-copy-source-if-match -// condition evaluates to true, and; x-amz-copy-source-if-unmodified-since -// condition evaluates to false; Amazon S3 returns 200 OK and copies the -// data. +// You must have READ access to the source object and WRITE access to the destination +// bucket. // -// - Consideration 2 - If both of the x-amz-copy-source-if-none-match and -// x-amz-copy-source-if-modified-since headers are present in the request -// as follows: x-amz-copy-source-if-none-match condition evaluates to false, -// and; x-amz-copy-source-if-modified-since condition evaluates to true; -// Amazon S3 returns 412 Precondition Failed response code. +// - General purpose bucket permissions - You must have the permissions in +// a policy based on the bucket types of your source bucket and destination +// bucket in an UploadPartCopy operation. If the source object is in a general +// purpose bucket, you must have the s3:GetObject permission to read the +// source object that is being copied. If the destination bucket is a general +// purpose bucket, you must have the s3:PubObject permission to write the +// object copy to the destination bucket. For information about permissions +// required to use the multipart upload API, see Multipart Upload and Permissions +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html) +// in the Amazon S3 User Guide. // -// # Versioning +// - Directory bucket permissions - You must have permissions in a bucket +// policy or an IAM identity-based policy based on the source and destination +// bucket types in an UploadPartCopy operation. If the source object that +// you want to copy is in a directory bucket, you must have the s3express:CreateSession +// permission in the Action element of a policy to read the object . By default, +// the session is in the ReadWrite mode. If you want to restrict the access, +// you can explicitly set the s3express:SessionMode condition key to ReadOnly +// on the copy source bucket. If the copy destination is a directory bucket, +// you must have the s3express:CreateSession permission in the Action element +// of a policy to write the object to the destination. The s3express:SessionMode +// condition key cannot be set to ReadOnly on the copy destination. For example +// policies, see Example bucket policies for S3 Express One Zone (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam-example-bucket-policies.html) +// and Amazon Web Services Identity and Access Management (IAM) identity-based +// policies for S3 Express One Zone (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam-identity-policies.html) +// in the Amazon S3 User Guide. // -// If your bucket has versioning enabled, you could have multiple versions of -// the same object. By default, x-amz-copy-source identifies the current version -// of the object to copy. If the current version is a delete marker and you -// don't specify a versionId in the x-amz-copy-source, Amazon S3 returns a 404 -// error, because the object does not exist. If you specify versionId in the -// x-amz-copy-source and the versionId is a delete marker, Amazon S3 returns -// an HTTP 400 error, because you are not allowed to specify a delete marker -// as a version for the x-amz-copy-source. +// Encryption // -// You can optionally specify a specific version of the source object to copy -// by adding the versionId subresource as shown in the following example: +// - General purpose buckets - For information about using server-side encryption +// with customer-provided encryption keys with the UploadPartCopy operation, +// see CopyObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html) +// and UploadPart (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html). // -// x-amz-copy-source: /bucket/object?versionId=version id +// - Directory buckets - For directory buckets, only server-side encryption +// with Amazon S3 managed keys (SSE-S3) (AES256) is supported. // // Special errors // -// - Code: NoSuchUpload Cause: The specified multipart upload does not exist. -// The upload ID might be invalid, or the multipart upload might have been -// aborted or completed. HTTP Status Code: 404 Not Found +// - Error Code: NoSuchUpload Description: The specified multipart upload +// does not exist. The upload ID might be invalid, or the multipart upload +// might have been aborted or completed. HTTP Status Code: 404 Not Found +// +// - Error Code: InvalidRequest Description: The specified copy source is +// not supported as a byte-range copy source. HTTP Status Code: 400 Bad Request +// +// # HTTP Host header syntax // -// - Code: InvalidRequest Cause: The specified copy source is not supported -// as a byte-range copy source. HTTP Status Code: 400 Bad Request +// Directory buckets - The HTTP Host header syntax is Bucket_name.s3express-az_id.region.amazonaws.com. // // The following operations are related to UploadPartCopy: // @@ -11256,6 +12150,8 @@ func (c *S3) WriteGetObjectResponseRequest(input *WriteGetObjectResponseInput) ( // WriteGetObjectResponse API operation for Amazon Simple Storage Service. // +// This operation is not supported by directory buckets. +// // Passes transformed objects to a GetObject operation when using Object Lambda // access points. For information about Object Lambda access points, see Transforming // objects with Object Lambda access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/transforming-objects.html) @@ -11370,27 +12266,41 @@ type AbortMultipartUploadInput struct { // The bucket name to which the upload was taking place. // - // When using this action with an access point, you must direct requests to - // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // Directory buckets - When you use this operation with a directory bucket, + // you must use virtual-hosted-style requests in the format Bucket_name.s3express-az_id.region.amazonaws.com. + // Path-style requests are not supported. Directory bucket names must be unique + // in the chosen Availability Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3 + // (for example, DOC-EXAMPLE-BUCKET--usw2-az2--x-s3). For information about + // bucket naming restrictions, see Directory bucket naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) + // in the Amazon S3 User Guide. + // + // Access points - When you use this action with an access point, you must provide + // the alias of the access point in place of the bucket name or specify the + // access point ARN. When using the access point ARN, you must direct requests + // to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. // When using this action with an access point through the Amazon Web Services // SDKs, you provide the access point ARN in place of the bucket name. For more // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) // in the Amazon S3 User Guide. // - // When you use this action with Amazon S3 on Outposts, you must direct requests - // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form - // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When - // you use this action with S3 on Outposts through the Amazon Web Services SDKs, - // you provide the Outposts access point ARN in place of the bucket name. For - // more information about S3 on Outposts ARNs, see What is S3 on Outposts? (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // Access points and Object Lambda access points are not supported by directory + // buckets. + // + // S3 on Outposts - When you use this action with Amazon S3 on Outposts, you + // must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname + // takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. + // When you use this action with S3 on Outposts through the Amazon Web Services + // SDKs, you provide the Outposts access point ARN in place of the bucket name. + // For more information about S3 on Outposts ARNs, see What is S3 on Outposts? + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) // in the Amazon S3 User Guide. // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` // Key of the object for which the multipart upload was initiated. @@ -11399,10 +12309,14 @@ type AbortMultipartUploadInput struct { Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` // Confirms that the requester knows that they will be charged for the request. - // Bucket owners need not specify this parameter in their requests. For information - // about downloading objects from Requester Pays buckets, see Downloading Objects + // Bucket owners need not specify this parameter in their requests. If either + // the source or destination S3 bucket has Requester Pays enabled, the requester + // will pay for corresponding charges to copy the object. For information about + // downloading objects from Requester Pays buckets, see Downloading Objects // in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) // in the Amazon S3 User Guide. + // + // This functionality is not supported for directory buckets. RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` // Upload ID that identifies the multipart upload. @@ -11523,6 +12437,8 @@ type AbortMultipartUploadOutput struct { // If present, indicates that the requester was successfully charged for the // request. + // + // This functionality is not supported for directory buckets. RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` } @@ -12057,9 +12973,7 @@ func (s *AnalyticsS3BucketDestination) SetPrefix(v string) *AnalyticsS3BucketDes return s } -// In terms of implementation, a Bucket is a resource. An Amazon S3 bucket name -// is globally unique, and the namespace is shared by all Amazon Web Services -// accounts. +// In terms of implementation, a Bucket is a resource. type Bucket struct { _ struct{} `type:"structure"` @@ -12101,6 +13015,51 @@ func (s *Bucket) SetName(v string) *Bucket { return s } +// Specifies the information about the bucket that will be created. For more +// information about directory buckets, see Directory buckets (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-buckets-overview.html) +// in the Amazon S3 User Guide. +// +// This functionality is only supported by directory buckets. +type BucketInfo struct { + _ struct{} `type:"structure"` + + // The number of Availability Zone that's used for redundancy for the bucket. + DataRedundancy *string `type:"string" enum:"DataRedundancy"` + + // The type of bucket. + Type *string `type:"string" enum:"BucketType"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s BucketInfo) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s BucketInfo) GoString() string { + return s.String() +} + +// SetDataRedundancy sets the DataRedundancy field's value. +func (s *BucketInfo) SetDataRedundancy(v string) *BucketInfo { + s.DataRedundancy = &v + return s +} + +// SetType sets the Type field's value. +func (s *BucketInfo) SetType(v string) *BucketInfo { + s.Type = &v + return s +} + // Specifies the lifecycle configuration for objects in an Amazon S3 bucket. // For more information, see Object Lifecycle Management (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html) // in the Amazon S3 User Guide. @@ -12572,34 +13531,42 @@ type Checksum struct { _ struct{} `type:"structure"` // The base64-encoded, 32-bit CRC32 checksum of the object. This will only be - // present if it was uploaded with the object. With multipart uploads, this - // may not be a checksum value of the object. For more information about how - // checksums are calculated with multipart uploads, see Checking object integrity - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // present if it was uploaded with the object. When you use an API operation + // on an object that was uploaded using multipart uploads, this value may not + // be a direct checksum value of the full object. Instead, it's a calculation + // based on the checksum values of each individual part. For more information + // about how checksums are calculated with multipart uploads, see Checking object + // integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) // in the Amazon S3 User Guide. ChecksumCRC32 *string `type:"string"` // The base64-encoded, 32-bit CRC32C checksum of the object. This will only - // be present if it was uploaded with the object. With multipart uploads, this - // may not be a checksum value of the object. For more information about how - // checksums are calculated with multipart uploads, see Checking object integrity - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // be present if it was uploaded with the object. When you use an API operation + // on an object that was uploaded using multipart uploads, this value may not + // be a direct checksum value of the full object. Instead, it's a calculation + // based on the checksum values of each individual part. For more information + // about how checksums are calculated with multipart uploads, see Checking object + // integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) // in the Amazon S3 User Guide. ChecksumCRC32C *string `type:"string"` // The base64-encoded, 160-bit SHA-1 digest of the object. This will only be - // present if it was uploaded with the object. With multipart uploads, this - // may not be a checksum value of the object. For more information about how - // checksums are calculated with multipart uploads, see Checking object integrity - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // present if it was uploaded with the object. When you use the API operation + // on an object that was uploaded using multipart uploads, this value may not + // be a direct checksum value of the full object. Instead, it's a calculation + // based on the checksum values of each individual part. For more information + // about how checksums are calculated with multipart uploads, see Checking object + // integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) // in the Amazon S3 User Guide. ChecksumSHA1 *string `type:"string"` // The base64-encoded, 256-bit SHA-256 digest of the object. This will only - // be present if it was uploaded with the object. With multipart uploads, this - // may not be a checksum value of the object. For more information about how - // checksums are calculated with multipart uploads, see Checking object integrity - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // be present if it was uploaded with the object. When you use an API operation + // on an object that was uploaded using multipart uploads, this value may not + // be a direct checksum value of the full object. Instead, it's a calculation + // based on the checksum values of each individual part. For more information + // about how checksums are calculated with multipart uploads, see Checking object + // integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) // in the Amazon S3 User Guide. ChecksumSHA256 *string `type:"string"` } @@ -12759,19 +13726,33 @@ type CompleteMultipartUploadInput struct { // Name of the bucket to which the multipart upload was initiated. // - // When using this action with an access point, you must direct requests to - // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // Directory buckets - When you use this operation with a directory bucket, + // you must use virtual-hosted-style requests in the format Bucket_name.s3express-az_id.region.amazonaws.com. + // Path-style requests are not supported. Directory bucket names must be unique + // in the chosen Availability Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3 + // (for example, DOC-EXAMPLE-BUCKET--usw2-az2--x-s3). For information about + // bucket naming restrictions, see Directory bucket naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) + // in the Amazon S3 User Guide. + // + // Access points - When you use this action with an access point, you must provide + // the alias of the access point in place of the bucket name or specify the + // access point ARN. When using the access point ARN, you must direct requests + // to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. // When using this action with an access point through the Amazon Web Services // SDKs, you provide the access point ARN in place of the bucket name. For more // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) // in the Amazon S3 User Guide. // - // When you use this action with Amazon S3 on Outposts, you must direct requests - // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form - // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When - // you use this action with S3 on Outposts through the Amazon Web Services SDKs, - // you provide the Outposts access point ARN in place of the bucket name. For - // more information about S3 on Outposts ARNs, see What is S3 on Outposts? (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // Access points and Object Lambda access points are not supported by directory + // buckets. + // + // S3 on Outposts - When you use this action with Amazon S3 on Outposts, you + // must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname + // takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. + // When you use this action with S3 on Outposts through the Amazon Web Services + // SDKs, you provide the Outposts access point ARN in place of the bucket name. + // For more information about S3 on Outposts ARNs, see What is S3 on Outposts? + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) // in the Amazon S3 User Guide. // // Bucket is a required field @@ -12805,9 +13786,9 @@ type CompleteMultipartUploadInput struct { // in the Amazon S3 User Guide. ChecksumSHA256 *string `location:"header" locationName:"x-amz-checksum-sha256" type:"string"` - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` // Object key for which the multipart upload was initiated. @@ -12819,16 +13800,23 @@ type CompleteMultipartUploadInput struct { MultipartUpload *CompletedMultipartUpload `locationName:"CompleteMultipartUpload" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` // Confirms that the requester knows that they will be charged for the request. - // Bucket owners need not specify this parameter in their requests. For information - // about downloading objects from Requester Pays buckets, see Downloading Objects + // Bucket owners need not specify this parameter in their requests. If either + // the source or destination S3 bucket has Requester Pays enabled, the requester + // will pay for corresponding charges to copy the object. For information about + // downloading objects from Requester Pays buckets, see Downloading Objects // in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) // in the Amazon S3 User Guide. + // + // This functionality is not supported for directory buckets. RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` // The server-side encryption (SSE) algorithm used to encrypt the object. This - // parameter is needed only when the object was created using a checksum algorithm. - // For more information, see Protecting data using SSE-C keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html) + // parameter is required only when the object was created using a checksum algorithm + // or if your bucket policy requires the use of SSE-C. For more information, + // see Protecting data using SSE-C keys (https://docs.aws.amazon.com/AmazonS3/latest/userguide/ServerSideEncryptionCustomerKeys.html#ssec-require-condition-key) // in the Amazon S3 User Guide. + // + // This functionality is not supported for directory buckets. SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` // The server-side encryption (SSE) customer managed key. This parameter is @@ -12836,6 +13824,8 @@ type CompleteMultipartUploadInput struct { // information, see Protecting data using SSE-C keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html) // in the Amazon S3 User Guide. // + // This functionality is not supported for directory buckets. + // // SSECustomerKey is a sensitive parameter and its value will be // replaced with "sensitive" in string returned by CompleteMultipartUploadInput's // String and GoString methods. @@ -12845,6 +13835,8 @@ type CompleteMultipartUploadInput struct { // is needed only when the object was created using a checksum algorithm. For // more information, see Protecting data using SSE-C keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html) // in the Amazon S3 User Guide. + // + // This functionality is not supported for directory buckets. SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` // ID for the initiated multipart upload. @@ -13021,55 +14013,52 @@ type CompleteMultipartUploadOutput struct { // The name of the bucket that contains the newly created object. Does not return // the access point ARN or access point alias if used. // - // When using this action with an access point, you must direct requests to - // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. - // When using this action with an access point through the Amazon Web Services - // SDKs, you provide the access point ARN in place of the bucket name. For more - // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) - // in the Amazon S3 User Guide. - // - // When you use this action with Amazon S3 on Outposts, you must direct requests - // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form - // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When - // you use this action with S3 on Outposts through the Amazon Web Services SDKs, - // you provide the Outposts access point ARN in place of the bucket name. For - // more information about S3 on Outposts ARNs, see What is S3 on Outposts? (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) - // in the Amazon S3 User Guide. + // Access points are not supported by directory buckets. Bucket *string `type:"string"` // Indicates whether the multipart upload uses an S3 Bucket Key for server-side // encryption with Key Management Service (KMS) keys (SSE-KMS). + // + // This functionality is not supported for directory buckets. BucketKeyEnabled *bool `location:"header" locationName:"x-amz-server-side-encryption-bucket-key-enabled" type:"boolean"` // The base64-encoded, 32-bit CRC32 checksum of the object. This will only be - // present if it was uploaded with the object. With multipart uploads, this - // may not be a checksum value of the object. For more information about how - // checksums are calculated with multipart uploads, see Checking object integrity - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // present if it was uploaded with the object. When you use an API operation + // on an object that was uploaded using multipart uploads, this value may not + // be a direct checksum value of the full object. Instead, it's a calculation + // based on the checksum values of each individual part. For more information + // about how checksums are calculated with multipart uploads, see Checking object + // integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) // in the Amazon S3 User Guide. ChecksumCRC32 *string `type:"string"` // The base64-encoded, 32-bit CRC32C checksum of the object. This will only - // be present if it was uploaded with the object. With multipart uploads, this - // may not be a checksum value of the object. For more information about how - // checksums are calculated with multipart uploads, see Checking object integrity - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // be present if it was uploaded with the object. When you use an API operation + // on an object that was uploaded using multipart uploads, this value may not + // be a direct checksum value of the full object. Instead, it's a calculation + // based on the checksum values of each individual part. For more information + // about how checksums are calculated with multipart uploads, see Checking object + // integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) // in the Amazon S3 User Guide. ChecksumCRC32C *string `type:"string"` // The base64-encoded, 160-bit SHA-1 digest of the object. This will only be - // present if it was uploaded with the object. With multipart uploads, this - // may not be a checksum value of the object. For more information about how - // checksums are calculated with multipart uploads, see Checking object integrity - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // present if it was uploaded with the object. When you use the API operation + // on an object that was uploaded using multipart uploads, this value may not + // be a direct checksum value of the full object. Instead, it's a calculation + // based on the checksum values of each individual part. For more information + // about how checksums are calculated with multipart uploads, see Checking object + // integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) // in the Amazon S3 User Guide. ChecksumSHA1 *string `type:"string"` // The base64-encoded, 256-bit SHA-256 digest of the object. This will only - // be present if it was uploaded with the object. With multipart uploads, this - // may not be a checksum value of the object. For more information about how - // checksums are calculated with multipart uploads, see Checking object integrity - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // be present if it was uploaded with the object. When you use an API operation + // on an object that was uploaded using multipart uploads, this value may not + // be a direct checksum value of the full object. Instead, it's a calculation + // based on the checksum values of each individual part. For more information + // about how checksums are calculated with multipart uploads, see Checking object + // integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) // in the Amazon S3 User Guide. ChecksumSHA256 *string `type:"string"` @@ -13085,6 +14074,8 @@ type CompleteMultipartUploadOutput struct { // If the object expiration is configured, this will contain the expiration // date (expiry-date) and rule ID (rule-id). The value of rule-id is URL-encoded. + // + // This functionality is not supported for directory buckets. Expiration *string `location:"header" locationName:"x-amz-expiration" type:"string"` // The object key of the newly created object. @@ -13095,11 +14086,15 @@ type CompleteMultipartUploadOutput struct { // If present, indicates that the requester was successfully charged for the // request. + // + // This functionality is not supported for directory buckets. RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` - // If present, specifies the ID of the Key Management Service (KMS) symmetric + // If present, indicates the ID of the Key Management Service (KMS) symmetric // encryption customer managed key that was used for the object. // + // This functionality is not supported for directory buckets. + // // SSEKMSKeyId is a sensitive parameter and its value will be // replaced with "sensitive" in string returned by CompleteMultipartUploadOutput's // String and GoString methods. @@ -13107,10 +14102,15 @@ type CompleteMultipartUploadOutput struct { // The server-side encryption algorithm used when storing this object in Amazon // S3 (for example, AES256, aws:kms). + // + // For directory buckets, only server-side encryption with Amazon S3 managed + // keys (SSE-S3) (AES256) is supported. ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` // Version ID of the newly created object, in case the bucket has versioning // turned on. + // + // This functionality is not supported for directory buckets. VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"` } @@ -13263,34 +14263,42 @@ type CompletedPart struct { _ struct{} `type:"structure"` // The base64-encoded, 32-bit CRC32 checksum of the object. This will only be - // present if it was uploaded with the object. With multipart uploads, this - // may not be a checksum value of the object. For more information about how - // checksums are calculated with multipart uploads, see Checking object integrity - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // present if it was uploaded with the object. When you use an API operation + // on an object that was uploaded using multipart uploads, this value may not + // be a direct checksum value of the full object. Instead, it's a calculation + // based on the checksum values of each individual part. For more information + // about how checksums are calculated with multipart uploads, see Checking object + // integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) // in the Amazon S3 User Guide. ChecksumCRC32 *string `type:"string"` // The base64-encoded, 32-bit CRC32C checksum of the object. This will only - // be present if it was uploaded with the object. With multipart uploads, this - // may not be a checksum value of the object. For more information about how - // checksums are calculated with multipart uploads, see Checking object integrity - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // be present if it was uploaded with the object. When you use an API operation + // on an object that was uploaded using multipart uploads, this value may not + // be a direct checksum value of the full object. Instead, it's a calculation + // based on the checksum values of each individual part. For more information + // about how checksums are calculated with multipart uploads, see Checking object + // integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) // in the Amazon S3 User Guide. ChecksumCRC32C *string `type:"string"` // The base64-encoded, 160-bit SHA-1 digest of the object. This will only be - // present if it was uploaded with the object. With multipart uploads, this - // may not be a checksum value of the object. For more information about how - // checksums are calculated with multipart uploads, see Checking object integrity - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // present if it was uploaded with the object. When you use the API operation + // on an object that was uploaded using multipart uploads, this value may not + // be a direct checksum value of the full object. Instead, it's a calculation + // based on the checksum values of each individual part. For more information + // about how checksums are calculated with multipart uploads, see Checking object + // integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) // in the Amazon S3 User Guide. ChecksumSHA1 *string `type:"string"` // The base64-encoded, 256-bit SHA-256 digest of the object. This will only - // be present if it was uploaded with the object. With multipart uploads, this - // may not be a checksum value of the object. For more information about how - // checksums are calculated with multipart uploads, see Checking object integrity - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // be present if it was uploaded with the object. When you use an API operation + // on an object that was uploaded using multipart uploads, this value may not + // be a direct checksum value of the full object. Instead, it's a calculation + // based on the checksum values of each individual part. For more information + // about how checksums are calculated with multipart uploads, see Checking object + // integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) // in the Amazon S3 User Guide. ChecksumSHA256 *string `type:"string"` @@ -13299,6 +14307,16 @@ type CompletedPart struct { // Part number that identifies the part. This is a positive integer between // 1 and 10,000. + // + // * General purpose buckets - In CompleteMultipartUpload, when a additional + // checksum (including x-amz-checksum-crc32, x-amz-checksum-crc32c, x-amz-checksum-sha1, + // or x-amz-checksum-sha256) is applied to each part, the PartNumber must + // start at 1 and the part numbers must be consecutive. Otherwise, Amazon + // S3 generates an HTTP 400 Bad Request status code and an InvalidPartOrder + // error code. + // + // * Directory buckets - In CompleteMultipartUpload, the PartNumber must + // start at 1 and the part numbers must be consecutive. PartNumber *int64 `type:"integer"` } @@ -13458,26 +14476,60 @@ func (s *ContinuationEvent) MarshalEvent(pm protocol.PayloadMarshaler) (msg even type CopyObjectInput struct { _ struct{} `locationName:"CopyObjectRequest" type:"structure"` - // The canned ACL to apply to the object. + // The canned access control list (ACL) to apply to the object. + // + // When you copy an object, the ACL metadata is not preserved and is set to + // private by default. Only the owner has full access control. To override the + // default ACL setting, specify a new ACL when you generate a copy request. + // For more information, see Using ACLs (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3_ACLs_UsingACLs.html). + // + // If the destination bucket that you're copying objects to uses the bucket + // owner enforced setting for S3 Object Ownership, ACLs are disabled and no + // longer affect permissions. Buckets that use this setting only accept PUT + // requests that don't specify an ACL or PUT requests that specify bucket owner + // full control ACLs, such as the bucket-owner-full-control canned ACL or an + // equivalent form of this ACL expressed in the XML format. For more information, + // see Controlling ownership of objects and disabling ACLs (https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html) + // in the Amazon S3 User Guide. + // + // * If your destination bucket uses the bucket owner enforced setting for + // Object Ownership, all objects written to the bucket by any account will + // be owned by the bucket owner. + // + // * This functionality is not supported for directory buckets. // - // This action is not supported by Amazon S3 on Outposts. + // * This functionality is not supported for Amazon S3 on Outposts. ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"ObjectCannedACL"` // The name of the destination bucket. // - // When using this action with an access point, you must direct requests to - // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // Directory buckets - When you use this operation with a directory bucket, + // you must use virtual-hosted-style requests in the format Bucket_name.s3express-az_id.region.amazonaws.com. + // Path-style requests are not supported. Directory bucket names must be unique + // in the chosen Availability Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3 + // (for example, DOC-EXAMPLE-BUCKET--usw2-az2--x-s3). For information about + // bucket naming restrictions, see Directory bucket naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) + // in the Amazon S3 User Guide. + // + // Access points - When you use this action with an access point, you must provide + // the alias of the access point in place of the bucket name or specify the + // access point ARN. When using the access point ARN, you must direct requests + // to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. // When using this action with an access point through the Amazon Web Services // SDKs, you provide the access point ARN in place of the bucket name. For more // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) // in the Amazon S3 User Guide. // - // When you use this action with Amazon S3 on Outposts, you must direct requests - // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form - // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When - // you use this action with S3 on Outposts through the Amazon Web Services SDKs, - // you provide the Outposts access point ARN in place of the bucket name. For - // more information about S3 on Outposts ARNs, see What is S3 on Outposts? (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // Access points and Object Lambda access points are not supported by directory + // buckets. + // + // S3 on Outposts - When you use this action with Amazon S3 on Outposts, you + // must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname + // takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. + // When you use this action with S3 on Outposts through the Amazon Web Services + // SDKs, you provide the Outposts access point ARN in place of the bucket name. + // For more information about S3 on Outposts ARNs, see What is S3 on Outposts? + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) // in the Amazon S3 User Guide. // // Bucket is a required field @@ -13485,44 +14537,73 @@ type CopyObjectInput struct { // Specifies whether Amazon S3 should use an S3 Bucket Key for object encryption // with server-side encryption using Key Management Service (KMS) keys (SSE-KMS). + // If a target object uses SSE-KMS, you can enable an S3 Bucket Key for the + // object. + // // Setting this header to true causes Amazon S3 to use an S3 Bucket Key for - // object encryption with SSE-KMS. + // object encryption with SSE-KMS. Specifying this header with a COPY action + // doesn’t affect bucket-level settings for S3 Bucket Key. // - // Specifying this header with a COPY action doesn’t affect bucket-level settings - // for S3 Bucket Key. + // For more information, see Amazon S3 Bucket Keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-key.html) + // in the Amazon S3 User Guide. + // + // This functionality is not supported when the destination bucket is a directory + // bucket. BucketKeyEnabled *bool `location:"header" locationName:"x-amz-server-side-encryption-bucket-key-enabled" type:"boolean"` - // Specifies caching behavior along the request/reply chain. + // Specifies the caching behavior along the request/reply chain. CacheControl *string `location:"header" locationName:"Cache-Control" type:"string"` - // Indicates the algorithm you want Amazon S3 to use to create the checksum + // Indicates the algorithm that you want Amazon S3 to use to create the checksum // for the object. For more information, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) // in the Amazon S3 User Guide. + // + // When you copy an object, if the source object has a checksum, that checksum + // value will be copied to the new object by default. If the CopyObject request + // does not include this x-amz-checksum-algorithm header, the checksum algorithm + // will be copied from the source object to the destination object (if it's + // present on the source object). You can optionally specify a different checksum + // algorithm to use with the x-amz-checksum-algorithm header. Unrecognized or + // unsupported values will respond with the HTTP status code 400 Bad Request. + // + // For directory buckets, when you use Amazon Web Services SDKs, CRC32 is the + // default checksum algorithm that's used for performance. ChecksumAlgorithm *string `location:"header" locationName:"x-amz-checksum-algorithm" type:"string" enum:"ChecksumAlgorithm"` - // Specifies presentational information for the object. + // Specifies presentational information for the object. Indicates whether an + // object should be displayed in a web browser or downloaded as a file. It allows + // specifying the desired filename for the downloaded file. ContentDisposition *string `location:"header" locationName:"Content-Disposition" type:"string"` // Specifies what content encodings have been applied to the object and thus // what decoding mechanisms must be applied to obtain the media-type referenced // by the Content-Type header field. + // + // For directory buckets, only the aws-chunked value is supported in this header + // field. ContentEncoding *string `location:"header" locationName:"Content-Encoding" type:"string"` // The language the content is in. ContentLanguage *string `location:"header" locationName:"Content-Language" type:"string"` - // A standard MIME type describing the format of the object data. + // A standard MIME type that describes the format of the object data. ContentType *string `location:"header" locationName:"Content-Type" type:"string"` - // Specifies the source object for the copy operation. You specify the value - // in one of two formats, depending on whether you want to access the source - // object through an access point (https://docs.aws.amazon.com/AmazonS3/latest/userguide/access-points.html): + // Specifies the source object for the copy operation. The source object can + // be up to 5 GB. If the source object is an object that was uploaded by using + // a multipart upload, the object copy will be a single part object after the + // source object is copied to the destination bucket. + // + // You specify the value of the copy source in one of two formats, depending + // on whether you want to access the source object through an access point (https://docs.aws.amazon.com/AmazonS3/latest/userguide/access-points.html): // // * For objects not accessed through an access point, specify the name of // the source bucket and the key of the source object, separated by a slash - // (/). For example, to copy the object reports/january.pdf from the bucket - // awsexamplebucket, use awsexamplebucket/reports/january.pdf. The value - // must be URL-encoded. + // (/). For example, to copy the object reports/january.pdf from the general + // purpose bucket awsexamplebucket, use awsexamplebucket/reports/january.pdf. + // The value must be URL-encoded. To copy the object reports/january.pdf + // from the directory bucket awsexamplebucket--use1-az5--x-s3, use awsexamplebucket--use1-az5--x-s3/reports/january.pdf. + // The value must be URL-encoded. // // * For objects accessed through access points, specify the Amazon Resource // Name (ARN) of the object as accessed through the access point, in the @@ -13531,43 +14612,104 @@ type CopyObjectInput struct { // my-access-point owned by account 123456789012 in Region us-west-2, use // the URL encoding of arn:aws:s3:us-west-2:123456789012:accesspoint/my-access-point/object/reports/january.pdf. // The value must be URL encoded. Amazon S3 supports copy operations using - // access points only when the source and destination buckets are in the - // same Amazon Web Services Region. Alternatively, for objects accessed through - // Amazon S3 on Outposts, specify the ARN of the object as accessed in the - // format arn:aws:s3-outposts:::outpost//object/. + // Access points only when the source and destination buckets are in the + // same Amazon Web Services Region. Access points are not supported by directory + // buckets. Alternatively, for objects accessed through Amazon S3 on Outposts, + // specify the ARN of the object as accessed in the format arn:aws:s3-outposts:::outpost//object/. // For example, to copy the object reports/january.pdf through outpost my-outpost // owned by account 123456789012 in Region us-west-2, use the URL encoding // of arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/object/reports/january.pdf. // The value must be URL-encoded. // - // To copy a specific version of an object, append ?versionId= to - // the value (for example, awsexamplebucket/reports/january.pdf?versionId=QUpfdndhfd8438MNFDN93jdnJFkdmqnh893). + // If your source bucket versioning is enabled, the x-amz-copy-source header + // by default identifies the current version of an object to copy. If the current + // version is a delete marker, Amazon S3 behaves as if the object was deleted. + // To copy a different version, use the versionId query parameter. Specifically, + // append ?versionId= to the value (for example, awsexamplebucket/reports/january.pdf?versionId=QUpfdndhfd8438MNFDN93jdnJFkdmqnh893). // If you don't specify a version ID, Amazon S3 copies the latest version of // the source object. // + // If you enable versioning on the destination bucket, Amazon S3 generates a + // unique version ID for the copied object. This version ID is different from + // the version ID of the source object. Amazon S3 returns the version ID of + // the copied object in the x-amz-version-id response header in the response. + // + // If you do not enable versioning or suspend it on the destination bucket, + // the version ID that Amazon S3 generates in the x-amz-version-id response + // header is always null. + // + // Directory buckets - S3 Versioning isn't enabled and supported for directory + // buckets. + // // CopySource is a required field CopySource *string `location:"header" locationName:"x-amz-copy-source" type:"string" required:"true"` // Copies the object if its entity tag (ETag) matches the specified tag. + // + // If both the x-amz-copy-source-if-match and x-amz-copy-source-if-unmodified-since + // headers are present in the request and evaluate as follows, Amazon S3 returns + // 200 OK and copies the data: + // + // * x-amz-copy-source-if-match condition evaluates to true + // + // * x-amz-copy-source-if-unmodified-since condition evaluates to false CopySourceIfMatch *string `location:"header" locationName:"x-amz-copy-source-if-match" type:"string"` // Copies the object if it has been modified since the specified time. + // + // If both the x-amz-copy-source-if-none-match and x-amz-copy-source-if-modified-since + // headers are present in the request and evaluate as follows, Amazon S3 returns + // the 412 Precondition Failed response code: + // + // * x-amz-copy-source-if-none-match condition evaluates to false + // + // * x-amz-copy-source-if-modified-since condition evaluates to true CopySourceIfModifiedSince *time.Time `location:"header" locationName:"x-amz-copy-source-if-modified-since" type:"timestamp"` // Copies the object if its entity tag (ETag) is different than the specified // ETag. + // + // If both the x-amz-copy-source-if-none-match and x-amz-copy-source-if-modified-since + // headers are present in the request and evaluate as follows, Amazon S3 returns + // the 412 Precondition Failed response code: + // + // * x-amz-copy-source-if-none-match condition evaluates to false + // + // * x-amz-copy-source-if-modified-since condition evaluates to true CopySourceIfNoneMatch *string `location:"header" locationName:"x-amz-copy-source-if-none-match" type:"string"` // Copies the object if it hasn't been modified since the specified time. + // + // If both the x-amz-copy-source-if-match and x-amz-copy-source-if-unmodified-since + // headers are present in the request and evaluate as follows, Amazon S3 returns + // 200 OK and copies the data: + // + // * x-amz-copy-source-if-match condition evaluates to true + // + // * x-amz-copy-source-if-unmodified-since condition evaluates to false CopySourceIfUnmodifiedSince *time.Time `location:"header" locationName:"x-amz-copy-source-if-unmodified-since" type:"timestamp"` // Specifies the algorithm to use when decrypting the source object (for example, // AES256). + // + // If the source object for the copy is stored in Amazon S3 using SSE-C, you + // must provide the necessary encryption information in your request so that + // Amazon S3 can decrypt the object for copying. + // + // This functionality is not supported when the source object is in a directory + // bucket. CopySourceSSECustomerAlgorithm *string `location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-algorithm" type:"string"` // Specifies the customer-provided encryption key for Amazon S3 to use to decrypt - // the source object. The encryption key provided in this header must be one - // that was used when the source object was created. + // the source object. The encryption key provided in this header must be the + // same one that was used when the source object was created. + // + // If the source object for the copy is stored in Amazon S3 using SSE-C, you + // must provide the necessary encryption information in your request so that + // Amazon S3 can decrypt the object for copying. + // + // This functionality is not supported when the source object is in a directory + // bucket. // // CopySourceSSECustomerKey is a sensitive parameter and its value will be // replaced with "sensitive" in string returned by CopyObjectInput's @@ -13577,16 +14719,23 @@ type CopyObjectInput struct { // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. // Amazon S3 uses this header for a message integrity check to ensure that the // encryption key was transmitted without error. + // + // If the source object for the copy is stored in Amazon S3 using SSE-C, you + // must provide the necessary encryption information in your request so that + // Amazon S3 can decrypt the object for copying. + // + // This functionality is not supported when the source object is in a directory + // bucket. CopySourceSSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-key-MD5" type:"string"` - // The account ID of the expected destination bucket owner. If the destination - // bucket is owned by a different account, the request fails with the HTTP status - // code 403 Forbidden (access denied). + // The account ID of the expected destination bucket owner. If the account ID + // that you provide does not match the actual owner of the destination bucket, + // the request fails with the HTTP status code 403 Forbidden (access denied). ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` - // The account ID of the expected source bucket owner. If the source bucket - // is owned by a different account, the request fails with the HTTP status code - // 403 Forbidden (access denied). + // The account ID of the expected source bucket owner. If the account ID that + // you provide does not match the actual owner of the source bucket, the request + // fails with the HTTP status code 403 Forbidden (access denied). ExpectedSourceBucketOwner *string `location:"header" locationName:"x-amz-source-expected-bucket-owner" type:"string"` // The date and time at which the object is no longer cacheable. @@ -13594,22 +14743,30 @@ type CopyObjectInput struct { // Gives the grantee READ, READ_ACP, and WRITE_ACP permissions on the object. // - // This action is not supported by Amazon S3 on Outposts. + // * This functionality is not supported for directory buckets. + // + // * This functionality is not supported for Amazon S3 on Outposts. GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"` // Allows grantee to read the object data and its metadata. // - // This action is not supported by Amazon S3 on Outposts. + // * This functionality is not supported for directory buckets. + // + // * This functionality is not supported for Amazon S3 on Outposts. GrantRead *string `location:"header" locationName:"x-amz-grant-read" type:"string"` // Allows grantee to read the object ACL. // - // This action is not supported by Amazon S3 on Outposts. + // * This functionality is not supported for directory buckets. + // + // * This functionality is not supported for Amazon S3 on Outposts. GrantReadACP *string `location:"header" locationName:"x-amz-grant-read-acp" type:"string"` // Allows grantee to write the ACL for the applicable object. // - // This action is not supported by Amazon S3 on Outposts. + // * This functionality is not supported for directory buckets. + // + // * This functionality is not supported for Amazon S3 on Outposts. GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"` // The key of the destination object. @@ -13621,35 +14778,69 @@ type CopyObjectInput struct { Metadata map[string]*string `location:"headers" locationName:"x-amz-meta-" type:"map"` // Specifies whether the metadata is copied from the source object or replaced - // with metadata provided in the request. + // with metadata that's provided in the request. When copying an object, you + // can preserve all metadata (the default) or specify new metadata. If this + // header isn’t specified, COPY is the default behavior. + // + // General purpose bucket - For general purpose buckets, when you grant permissions, + // you can use the s3:x-amz-metadata-directive condition key to enforce certain + // metadata behavior when objects are uploaded. For more information, see Amazon + // S3 condition key examples (https://docs.aws.amazon.com/AmazonS3/latest/dev/amazon-s3-policy-keys.html) + // in the Amazon S3 User Guide. + // + // x-amz-website-redirect-location is unique to each object and is not copied + // when using the x-amz-metadata-directive header. To copy the value, you must + // specify x-amz-website-redirect-location in the request header. MetadataDirective *string `location:"header" locationName:"x-amz-metadata-directive" type:"string" enum:"MetadataDirective"` - // Specifies whether you want to apply a legal hold to the copied object. + // Specifies whether you want to apply a legal hold to the object copy. + // + // This functionality is not supported for directory buckets. ObjectLockLegalHoldStatus *string `location:"header" locationName:"x-amz-object-lock-legal-hold" type:"string" enum:"ObjectLockLegalHoldStatus"` - // The Object Lock mode that you want to apply to the copied object. + // The Object Lock mode that you want to apply to the object copy. + // + // This functionality is not supported for directory buckets. ObjectLockMode *string `location:"header" locationName:"x-amz-object-lock-mode" type:"string" enum:"ObjectLockMode"` - // The date and time when you want the copied object's Object Lock to expire. + // The date and time when you want the Object Lock of the object copy to expire. + // + // This functionality is not supported for directory buckets. ObjectLockRetainUntilDate *time.Time `location:"header" locationName:"x-amz-object-lock-retain-until-date" type:"timestamp" timestampFormat:"iso8601"` // Confirms that the requester knows that they will be charged for the request. - // Bucket owners need not specify this parameter in their requests. For information - // about downloading objects from Requester Pays buckets, see Downloading Objects + // Bucket owners need not specify this parameter in their requests. If either + // the source or destination S3 bucket has Requester Pays enabled, the requester + // will pay for corresponding charges to copy the object. For information about + // downloading objects from Requester Pays buckets, see Downloading Objects // in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) // in the Amazon S3 User Guide. + // + // This functionality is not supported for directory buckets. RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` - // Specifies the algorithm to use to when encrypting the object (for example, - // AES256). + // Specifies the algorithm to use when encrypting the object (for example, AES256). + // + // When you perform a CopyObject operation, if you want to use a different type + // of encryption setting for the target object, you can specify appropriate + // encryption-related headers to encrypt the target object with an Amazon S3 + // managed key, a KMS key, or a customer-provided key. If the encryption setting + // in your request is different from the default encryption configuration of + // the destination bucket, the encryption setting in your request takes precedence. + // + // This functionality is not supported when the destination bucket is a directory + // bucket. SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting - // data. This value is used to store the object and then it is discarded; Amazon + // data. This value is used to store the object and then it is discarded. Amazon // S3 does not store the encryption key. The key must be appropriate for use // with the algorithm specified in the x-amz-server-side-encryption-customer-algorithm // header. // + // This functionality is not supported when the destination bucket is a directory + // bucket. + // // SSECustomerKey is a sensitive parameter and its value will be // replaced with "sensitive" in string returned by CopyObjectInput's // String and GoString methods. @@ -13658,55 +14849,201 @@ type CopyObjectInput struct { // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. // Amazon S3 uses this header for a message integrity check to ensure that the // encryption key was transmitted without error. + // + // This functionality is not supported when the destination bucket is a directory + // bucket. SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` // Specifies the Amazon Web Services KMS Encryption Context to use for object // encryption. The value of this header is a base64-encoded UTF-8 string holding - // JSON with the encryption context key-value pairs. + // JSON with the encryption context key-value pairs. This value must be explicitly + // added to specify encryption context for CopyObject requests. + // + // This functionality is not supported when the destination bucket is a directory + // bucket. // // SSEKMSEncryptionContext is a sensitive parameter and its value will be // replaced with "sensitive" in string returned by CopyObjectInput's // String and GoString methods. SSEKMSEncryptionContext *string `location:"header" locationName:"x-amz-server-side-encryption-context" type:"string" sensitive:"true"` - // Specifies the KMS key ID to use for object encryption. All GET and PUT requests - // for an object protected by KMS will fail if they're not made via SSL or using - // SigV4. For information about configuring any of the officially supported - // Amazon Web Services SDKs and Amazon Web Services CLI, see Specifying the - // Signature Version in Request Authentication (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version) + // Specifies the KMS ID (Key ID, Key ARN, or Key Alias) to use for object encryption. + // All GET and PUT requests for an object protected by KMS will fail if they're + // not made via SSL or using SigV4. For information about configuring any of + // the officially supported Amazon Web Services SDKs and Amazon Web Services + // CLI, see Specifying the Signature Version in Request Authentication (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version) // in the Amazon S3 User Guide. // + // This functionality is not supported when the destination bucket is a directory + // bucket. + // // SSEKMSKeyId is a sensitive parameter and its value will be // replaced with "sensitive" in string returned by CopyObjectInput's // String and GoString methods. SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` // The server-side encryption algorithm used when storing this object in Amazon - // S3 (for example, AES256, aws:kms, aws:kms:dsse). + // S3 (for example, AES256, aws:kms, aws:kms:dsse). Unrecognized or unsupported + // values won’t write a destination object and will receive a 400 Bad Request + // response. + // + // Amazon S3 automatically encrypts all new objects that are copied to an S3 + // bucket. When copying an object, if you don't specify encryption information + // in your copy request, the encryption setting of the target object is set + // to the default encryption configuration of the destination bucket. By default, + // all buckets have a base level of encryption configuration that uses server-side + // encryption with Amazon S3 managed keys (SSE-S3). If the destination bucket + // has a default encryption configuration that uses server-side encryption with + // Key Management Service (KMS) keys (SSE-KMS), dual-layer server-side encryption + // with Amazon Web Services KMS keys (DSSE-KMS), or server-side encryption with + // customer-provided encryption keys (SSE-C), Amazon S3 uses the corresponding + // KMS key, or a customer-provided key to encrypt the target object copy. + // + // When you perform a CopyObject operation, if you want to use a different type + // of encryption setting for the target object, you can specify appropriate + // encryption-related headers to encrypt the target object with an Amazon S3 + // managed key, a KMS key, or a customer-provided key. If the encryption setting + // in your request is different from the default encryption configuration of + // the destination bucket, the encryption setting in your request takes precedence. + // + // With server-side encryption, Amazon S3 encrypts your data as it writes your + // data to disks in its data centers and decrypts the data when you access it. + // For more information about server-side encryption, see Using Server-Side + // Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/serv-side-encryption.html) + // in the Amazon S3 User Guide. + // + // For directory buckets, only server-side encryption with Amazon S3 managed + // keys (SSE-S3) (AES256) is supported. ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` - // By default, Amazon S3 uses the STANDARD Storage Class to store newly created - // objects. The STANDARD storage class provides high durability and high availability. - // Depending on performance needs, you can specify a different Storage Class. - // Amazon S3 on Outposts only uses the OUTPOSTS Storage Class. For more information, - // see Storage Classes (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html) + // If the x-amz-storage-class header is not used, the copied object will be + // stored in the STANDARD Storage Class by default. The STANDARD storage class + // provides high durability and high availability. Depending on performance + // needs, you can specify a different Storage Class. + // + // * Directory buckets - For directory buckets, only the S3 Express One Zone + // storage class is supported to store newly created objects. Unsupported + // storage class values won't write a destination object and will respond + // with the HTTP status code 400 Bad Request. + // + // * Amazon S3 on Outposts - S3 on Outposts only uses the OUTPOSTS Storage + // Class. + // + // You can use the CopyObject action to change the storage class of an object + // that is already stored in Amazon S3 by using the x-amz-storage-class header. + // For more information, see Storage Classes (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html) + // in the Amazon S3 User Guide. + // + // Before using an object as a source object for the copy operation, you must + // restore a copy of it if it meets any of the following conditions: + // + // * The storage class of the source object is GLACIER or DEEP_ARCHIVE. + // + // * The storage class of the source object is INTELLIGENT_TIERING and it's + // S3 Intelligent-Tiering access tier (https://docs.aws.amazon.com/AmazonS3/latest/userguide/intelligent-tiering-overview.html#intel-tiering-tier-definition) + // is Archive Access or Deep Archive Access. + // + // For more information, see RestoreObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_RestoreObject.html) + // and Copying Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/CopyingObjectsExamples.html) // in the Amazon S3 User Guide. StorageClass *string `location:"header" locationName:"x-amz-storage-class" type:"string" enum:"StorageClass"` - // The tag-set for the object destination object this value must be used in - // conjunction with the TaggingDirective. The tag-set must be encoded as URL - // Query parameters. + // The tag-set for the object copy in the destination bucket. This value must + // be used in conjunction with the x-amz-tagging-directive if you choose REPLACE + // for the x-amz-tagging-directive. If you choose COPY for the x-amz-tagging-directive, + // you don't need to set the x-amz-tagging header, because the tag-set will + // be copied from the source object directly. The tag-set must be encoded as + // URL Query parameters. + // + // The default value is the empty value. + // + // Directory buckets - For directory buckets in a CopyObject operation, only + // the empty tag-set is supported. Any requests that attempt to write non-empty + // tags into directory buckets will receive a 501 Not Implemented status code. + // When the destination bucket is a directory bucket, you will receive a 501 + // Not Implemented response in any of the following situations: + // + // * When you attempt to COPY the tag-set from an S3 source object that has + // non-empty tags. + // + // * When you attempt to REPLACE the tag-set of a source object and set a + // non-empty value to x-amz-tagging. + // + // * When you don't set the x-amz-tagging-directive header and the source + // object has non-empty tags. This is because the default value of x-amz-tagging-directive + // is COPY. + // + // Because only the empty tag-set is supported for directory buckets in a CopyObject + // operation, the following situations are allowed: + // + // * When you attempt to COPY the tag-set from a directory bucket source + // object that has no tags to a general purpose bucket. It copies an empty + // tag-set to the destination object. + // + // * When you attempt to REPLACE the tag-set of a directory bucket source + // object and set the x-amz-tagging value of the directory bucket destination + // object to empty. + // + // * When you attempt to REPLACE the tag-set of a general purpose bucket + // source object that has non-empty tags and set the x-amz-tagging value + // of the directory bucket destination object to empty. + // + // * When you attempt to REPLACE the tag-set of a directory bucket source + // object and don't set the x-amz-tagging value of the directory bucket destination + // object. This is because the default value of x-amz-tagging is the empty + // value. Tagging *string `location:"header" locationName:"x-amz-tagging" type:"string"` - // Specifies whether the object tag-set are copied from the source object or - // replaced with tag-set provided in the request. + // Specifies whether the object tag-set is copied from the source object or + // replaced with the tag-set that's provided in the request. + // + // The default value is COPY. + // + // Directory buckets - For directory buckets in a CopyObject operation, only + // the empty tag-set is supported. Any requests that attempt to write non-empty + // tags into directory buckets will receive a 501 Not Implemented status code. + // When the destination bucket is a directory bucket, you will receive a 501 + // Not Implemented response in any of the following situations: + // + // * When you attempt to COPY the tag-set from an S3 source object that has + // non-empty tags. + // + // * When you attempt to REPLACE the tag-set of a source object and set a + // non-empty value to x-amz-tagging. + // + // * When you don't set the x-amz-tagging-directive header and the source + // object has non-empty tags. This is because the default value of x-amz-tagging-directive + // is COPY. + // + // Because only the empty tag-set is supported for directory buckets in a CopyObject + // operation, the following situations are allowed: + // + // * When you attempt to COPY the tag-set from a directory bucket source + // object that has no tags to a general purpose bucket. It copies an empty + // tag-set to the destination object. + // + // * When you attempt to REPLACE the tag-set of a directory bucket source + // object and set the x-amz-tagging value of the directory bucket destination + // object to empty. + // + // * When you attempt to REPLACE the tag-set of a general purpose bucket + // source object that has non-empty tags and set the x-amz-tagging value + // of the directory bucket destination object to empty. + // + // * When you attempt to REPLACE the tag-set of a directory bucket source + // object and don't set the x-amz-tagging value of the directory bucket destination + // object. This is because the default value of x-amz-tagging is the empty + // value. TaggingDirective *string `location:"header" locationName:"x-amz-tagging-directive" type:"string" enum:"TaggingDirective"` - // If the bucket is configured as a website, redirects requests for this object - // to another object in the same bucket or to an external URL. Amazon S3 stores - // the value of this header in the object metadata. This value is unique to - // each object and is not copied when using the x-amz-metadata-directive header. - // Instead, you may opt to provide this header in combination with the directive. + // If the destination bucket is configured as a website, redirects requests + // for this object copy to another object in the same bucket or to an external + // URL. Amazon S3 stores the value of this header in the object metadata. This + // value is unique to each object and is not copied when using the x-amz-metadata-directive + // header. Instead, you may opt to provide this header in combination with the + // x-amz-metadata-directive header. + // + // This functionality is not supported for directory buckets. WebsiteRedirectLocation *string `location:"header" locationName:"x-amz-website-redirect-location" type:"string"` } @@ -14052,53 +15389,75 @@ type CopyObjectOutput struct { // Indicates whether the copied object uses an S3 Bucket Key for server-side // encryption with Key Management Service (KMS) keys (SSE-KMS). + // + // This functionality is not supported for directory buckets. BucketKeyEnabled *bool `location:"header" locationName:"x-amz-server-side-encryption-bucket-key-enabled" type:"boolean"` // Container for all response elements. CopyObjectResult *CopyObjectResult `type:"structure"` - // Version of the copied object in the destination bucket. + // Version ID of the source object that was copied. + // + // This functionality is not supported when the source object is in a directory + // bucket. CopySourceVersionId *string `location:"header" locationName:"x-amz-copy-source-version-id" type:"string"` // If the object expiration is configured, the response includes this header. + // + // This functionality is not supported for directory buckets. Expiration *string `location:"header" locationName:"x-amz-expiration" type:"string"` // If present, indicates that the requester was successfully charged for the // request. + // + // This functionality is not supported for directory buckets. RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` // If server-side encryption with a customer-provided encryption key was requested, - // the response will include this header confirming the encryption algorithm - // used. + // the response will include this header to confirm the encryption algorithm + // that's used. + // + // This functionality is not supported for directory buckets. SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` // If server-side encryption with a customer-provided encryption key was requested, - // the response will include this header to provide round-trip message integrity + // the response will include this header to provide the round-trip message integrity // verification of the customer-provided encryption key. + // + // This functionality is not supported for directory buckets. SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` - // If present, specifies the Amazon Web Services KMS Encryption Context to use + // If present, indicates the Amazon Web Services KMS Encryption Context to use // for object encryption. The value of this header is a base64-encoded UTF-8 // string holding JSON with the encryption context key-value pairs. // + // This functionality is not supported for directory buckets. + // // SSEKMSEncryptionContext is a sensitive parameter and its value will be // replaced with "sensitive" in string returned by CopyObjectOutput's // String and GoString methods. SSEKMSEncryptionContext *string `location:"header" locationName:"x-amz-server-side-encryption-context" type:"string" sensitive:"true"` - // If present, specifies the ID of the Key Management Service (KMS) symmetric + // If present, indicates the ID of the Key Management Service (KMS) symmetric // encryption customer managed key that was used for the object. // + // This functionality is not supported for directory buckets. + // // SSEKMSKeyId is a sensitive parameter and its value will be // replaced with "sensitive" in string returned by CopyObjectOutput's // String and GoString methods. SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` - // The server-side encryption algorithm used when storing this object in Amazon + // The server-side encryption algorithm used when you store this object in Amazon // S3 (for example, AES256, aws:kms, aws:kms:dsse). + // + // For directory buckets, only server-side encryption with Amazon S3 managed + // keys (SSE-S3) (AES256) is supported. ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` // Version ID of the newly created copy. + // + // This functionality is not supported for directory buckets. VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"` } @@ -14191,34 +15550,26 @@ type CopyObjectResult struct { _ struct{} `type:"structure"` // The base64-encoded, 32-bit CRC32 checksum of the object. This will only be - // present if it was uploaded with the object. With multipart uploads, this - // may not be a checksum value of the object. For more information about how - // checksums are calculated with multipart uploads, see Checking object integrity - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // present if it was uploaded with the object. For more information, see Checking + // object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) // in the Amazon S3 User Guide. ChecksumCRC32 *string `type:"string"` // The base64-encoded, 32-bit CRC32C checksum of the object. This will only - // be present if it was uploaded with the object. With multipart uploads, this - // may not be a checksum value of the object. For more information about how - // checksums are calculated with multipart uploads, see Checking object integrity - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // be present if it was uploaded with the object. For more information, see + // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) // in the Amazon S3 User Guide. ChecksumCRC32C *string `type:"string"` // The base64-encoded, 160-bit SHA-1 digest of the object. This will only be - // present if it was uploaded with the object. With multipart uploads, this - // may not be a checksum value of the object. For more information about how - // checksums are calculated with multipart uploads, see Checking object integrity - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // present if it was uploaded with the object. For more information, see Checking + // object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) // in the Amazon S3 User Guide. ChecksumSHA1 *string `type:"string"` // The base64-encoded, 256-bit SHA-256 digest of the object. This will only - // be present if it was uploaded with the object. With multipart uploads, this - // may not be a checksum value of the object. For more information about how - // checksums are calculated with multipart uploads, see Checking object integrity - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // be present if it was uploaded with the object. For more information, see + // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) // in the Amazon S3 User Guide. ChecksumSHA256 *string `type:"string"` @@ -14289,34 +15640,42 @@ type CopyPartResult struct { _ struct{} `type:"structure"` // The base64-encoded, 32-bit CRC32 checksum of the object. This will only be - // present if it was uploaded with the object. With multipart uploads, this - // may not be a checksum value of the object. For more information about how - // checksums are calculated with multipart uploads, see Checking object integrity - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // present if it was uploaded with the object. When you use an API operation + // on an object that was uploaded using multipart uploads, this value may not + // be a direct checksum value of the full object. Instead, it's a calculation + // based on the checksum values of each individual part. For more information + // about how checksums are calculated with multipart uploads, see Checking object + // integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) // in the Amazon S3 User Guide. ChecksumCRC32 *string `type:"string"` // The base64-encoded, 32-bit CRC32C checksum of the object. This will only - // be present if it was uploaded with the object. With multipart uploads, this - // may not be a checksum value of the object. For more information about how - // checksums are calculated with multipart uploads, see Checking object integrity - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // be present if it was uploaded with the object. When you use an API operation + // on an object that was uploaded using multipart uploads, this value may not + // be a direct checksum value of the full object. Instead, it's a calculation + // based on the checksum values of each individual part. For more information + // about how checksums are calculated with multipart uploads, see Checking object + // integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) // in the Amazon S3 User Guide. ChecksumCRC32C *string `type:"string"` // The base64-encoded, 160-bit SHA-1 digest of the object. This will only be - // present if it was uploaded with the object. With multipart uploads, this - // may not be a checksum value of the object. For more information about how - // checksums are calculated with multipart uploads, see Checking object integrity - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // present if it was uploaded with the object. When you use the API operation + // on an object that was uploaded using multipart uploads, this value may not + // be a direct checksum value of the full object. Instead, it's a calculation + // based on the checksum values of each individual part. For more information + // about how checksums are calculated with multipart uploads, see Checking object + // integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) // in the Amazon S3 User Guide. ChecksumSHA1 *string `type:"string"` // The base64-encoded, 256-bit SHA-256 digest of the object. This will only - // be present if it was uploaded with the object. With multipart uploads, this - // may not be a checksum value of the object. For more information about how - // checksums are calculated with multipart uploads, see Checking object integrity - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // be present if it was uploaded with the object. When you use an API operation + // on an object that was uploaded using multipart uploads, this value may not + // be a direct checksum value of the full object. Instead, it's a calculation + // based on the checksum values of each individual part. For more information + // about how checksums are calculated with multipart uploads, see Checking object + // integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) // in the Amazon S3 User Guide. ChecksumSHA256 *string `type:"string"` @@ -14385,8 +15744,29 @@ func (s *CopyPartResult) SetLastModified(v time.Time) *CopyPartResult { type CreateBucketConfiguration struct { _ struct{} `type:"structure"` - // Specifies the Region where the bucket will be created. If you don't specify - // a Region, the bucket is created in the US East (N. Virginia) Region (us-east-1). + // Specifies the information about the bucket that will be created. + // + // This functionality is only supported by directory buckets. + Bucket *BucketInfo `type:"structure"` + + // Specifies the location where the bucket will be created. + // + // For directory buckets, the location type is Availability Zone. + // + // This functionality is only supported by directory buckets. + Location *LocationInfo `type:"structure"` + + // Specifies the Region where the bucket will be created. You might choose a + // Region to optimize latency, minimize costs, or address regulatory requirements. + // For example, if you reside in Europe, you will probably find it advantageous + // to create buckets in the Europe (Ireland) Region. For more information, see + // Accessing a bucket (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html#access-bucket-intro) + // in the Amazon S3 User Guide. + // + // If you don't specify a Region, the bucket is created in the US East (N. Virginia) + // Region (us-east-1) by default. + // + // This functionality is not supported for directory buckets. LocationConstraint *string `type:"string" enum:"BucketLocationConstraint"` } @@ -14408,6 +15788,22 @@ func (s CreateBucketConfiguration) GoString() string { return s.String() } +// SetBucket sets the Bucket field's value. +func (s *CreateBucketConfiguration) SetBucket(v *BucketInfo) *CreateBucketConfiguration { + s.Bucket = v + return s +} + +func (s *CreateBucketConfiguration) getBucket() (v *BucketInfo) { + return s.Bucket +} + +// SetLocation sets the Location field's value. +func (s *CreateBucketConfiguration) SetLocation(v *LocationInfo) *CreateBucketConfiguration { + s.Location = v + return s +} + // SetLocationConstraint sets the LocationConstraint field's value. func (s *CreateBucketConfiguration) SetLocationConstraint(v string) *CreateBucketConfiguration { s.LocationConstraint = &v @@ -14418,10 +15814,25 @@ type CreateBucketInput struct { _ struct{} `locationName:"CreateBucketRequest" type:"structure" payload:"CreateBucketConfiguration"` // The canned ACL to apply to the bucket. + // + // This functionality is not supported for directory buckets. ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"BucketCannedACL"` // The name of the bucket to create. // + // General purpose buckets - For information about bucket naming restrictions, + // see Bucket naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/bucketnamingrules.html) + // in the Amazon S3 User Guide. + // + // Directory buckets - When you use this operation with a directory bucket, + // you must use path-style requests in the format https://s3express-control.region_code.amazonaws.com/bucket-name + // . Virtual-hosted-style requests aren't supported. Directory bucket names + // must be unique in the chosen Availability Zone. Bucket names must also follow + // the format bucket_base_name--az_id--x-s3 (for example, DOC-EXAMPLE-BUCKET--usw2-az2--x-s3). + // For information about bucket naming restrictions, see Directory bucket naming + // rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) + // in the Amazon S3 User Guide + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` @@ -14430,24 +15841,36 @@ type CreateBucketInput struct { // Allows grantee the read, write, read ACP, and write ACP permissions on the // bucket. + // + // This functionality is not supported for directory buckets. GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"` // Allows grantee to list the objects in the bucket. + // + // This functionality is not supported for directory buckets. GrantRead *string `location:"header" locationName:"x-amz-grant-read" type:"string"` // Allows grantee to read the bucket ACL. + // + // This functionality is not supported for directory buckets. GrantReadACP *string `location:"header" locationName:"x-amz-grant-read-acp" type:"string"` // Allows grantee to create new objects in the bucket. // // For the bucket and object owners of existing objects, also allows deletions // and overwrites of those objects. + // + // This functionality is not supported for directory buckets. GrantWrite *string `location:"header" locationName:"x-amz-grant-write" type:"string"` // Allows grantee to write the ACL for the applicable bucket. + // + // This functionality is not supported for directory buckets. GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"` // Specifies whether you want S3 Object Lock to be enabled for the new bucket. + // + // This functionality is not supported for directory buckets. ObjectLockEnabledForBucket *bool `location:"header" locationName:"x-amz-bucket-object-lock-enabled" type:"boolean"` // The container element for object ownership for a bucket's ownership controls. @@ -14462,8 +15885,19 @@ type CreateBucketInput struct { // BucketOwnerEnforced - Access control lists (ACLs) are disabled and no longer // affect permissions. The bucket owner automatically owns and has full control // over every object in the bucket. The bucket only accepts PUT requests that - // don't specify an ACL or bucket owner full control ACLs, such as the bucket-owner-full-control - // canned ACL or an equivalent form of this ACL expressed in the XML format. + // don't specify an ACL or specify bucket owner full control ACLs (such as the + // predefined bucket-owner-full-control canned ACL or a custom ACL in XML format + // that grants the same permissions). + // + // By default, ObjectOwnership is set to BucketOwnerEnforced and ACLs are disabled. + // We recommend keeping ACLs disabled, except in uncommon use cases where you + // must control access for each object individually. For more information about + // S3 Object Ownership, see Controlling ownership of objects and disabling ACLs + // for your bucket (https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html) + // in the Amazon S3 User Guide. + // + // This functionality is not supported for directory buckets. Directory buckets + // use the bucket owner enforced setting for S3 Object Ownership. ObjectOwnership *string `location:"header" locationName:"x-amz-object-ownership" type:"string" enum:"ObjectOwnership"` } @@ -14602,26 +16036,54 @@ func (s *CreateBucketOutput) SetLocation(v string) *CreateBucketOutput { type CreateMultipartUploadInput struct { _ struct{} `locationName:"CreateMultipartUploadRequest" type:"structure"` - // The canned ACL to apply to the object. + // The canned ACL to apply to the object. Amazon S3 supports a set of predefined + // ACLs, known as canned ACLs. Each canned ACL has a predefined set of grantees + // and permissions. For more information, see Canned ACL (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL) + // in the Amazon S3 User Guide. + // + // By default, all objects are private. Only the owner has full access control. + // When uploading an object, you can grant access permissions to individual + // Amazon Web Services accounts or to predefined groups defined by Amazon S3. + // These permissions are then added to the access control list (ACL) on the + // new object. For more information, see Using ACLs (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3_ACLs_UsingACLs.html). + // One way to grant the permissions using the request headers is to specify + // a canned ACL with the x-amz-acl request header. // - // This action is not supported by Amazon S3 on Outposts. + // * This functionality is not supported for directory buckets. + // + // * This functionality is not supported for Amazon S3 on Outposts. ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"ObjectCannedACL"` - // The name of the bucket to which to initiate the upload + // The name of the bucket where the multipart upload is initiated and where + // the object is uploaded. + // + // Directory buckets - When you use this operation with a directory bucket, + // you must use virtual-hosted-style requests in the format Bucket_name.s3express-az_id.region.amazonaws.com. + // Path-style requests are not supported. Directory bucket names must be unique + // in the chosen Availability Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3 + // (for example, DOC-EXAMPLE-BUCKET--usw2-az2--x-s3). For information about + // bucket naming restrictions, see Directory bucket naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) + // in the Amazon S3 User Guide. // - // When using this action with an access point, you must direct requests to - // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // Access points - When you use this action with an access point, you must provide + // the alias of the access point in place of the bucket name or specify the + // access point ARN. When using the access point ARN, you must direct requests + // to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. // When using this action with an access point through the Amazon Web Services // SDKs, you provide the access point ARN in place of the bucket name. For more // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) // in the Amazon S3 User Guide. // - // When you use this action with Amazon S3 on Outposts, you must direct requests - // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form - // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When - // you use this action with S3 on Outposts through the Amazon Web Services SDKs, - // you provide the Outposts access point ARN in place of the bucket name. For - // more information about S3 on Outposts ARNs, see What is S3 on Outposts? (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // Access points and Object Lambda access points are not supported by directory + // buckets. + // + // S3 on Outposts - When you use this action with Amazon S3 on Outposts, you + // must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname + // takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. + // When you use this action with S3 on Outposts through the Amazon Web Services + // SDKs, you provide the Outposts access point ARN in place of the bucket name. + // For more information about S3 on Outposts ARNs, see What is S3 on Outposts? + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) // in the Amazon S3 User Guide. // // Bucket is a required field @@ -14634,12 +16096,14 @@ type CreateMultipartUploadInput struct { // // Specifying this header with an object action doesn’t affect bucket-level // settings for S3 Bucket Key. + // + // This functionality is not supported for directory buckets. BucketKeyEnabled *bool `location:"header" locationName:"x-amz-server-side-encryption-bucket-key-enabled" type:"boolean"` // Specifies caching behavior along the request/reply chain. CacheControl *string `location:"header" locationName:"Cache-Control" type:"string"` - // Indicates the algorithm you want Amazon S3 to use to create the checksum + // Indicates the algorithm that you want Amazon S3 to use to create the checksum // for the object. For more information, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) // in the Amazon S3 User Guide. ChecksumAlgorithm *string `location:"header" locationName:"x-amz-checksum-algorithm" type:"string" enum:"ChecksumAlgorithm"` @@ -14650,40 +16114,175 @@ type CreateMultipartUploadInput struct { // Specifies what content encodings have been applied to the object and thus // what decoding mechanisms must be applied to obtain the media-type referenced // by the Content-Type header field. + // + // For directory buckets, only the aws-chunked value is supported in this header + // field. ContentEncoding *string `location:"header" locationName:"Content-Encoding" type:"string"` - // The language the content is in. + // The language that the content is in. ContentLanguage *string `location:"header" locationName:"Content-Language" type:"string"` // A standard MIME type describing the format of the object data. ContentType *string `location:"header" locationName:"Content-Type" type:"string"` - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` // The date and time at which the object is no longer cacheable. Expires *time.Time `location:"header" locationName:"Expires" type:"timestamp"` - // Gives the grantee READ, READ_ACP, and WRITE_ACP permissions on the object. + // Specify access permissions explicitly to give the grantee READ, READ_ACP, + // and WRITE_ACP permissions on the object. + // + // By default, all objects are private. Only the owner has full access control. + // When uploading an object, you can use this header to explicitly grant access + // permissions to specific Amazon Web Services accounts or groups. This header + // maps to specific permissions that Amazon S3 supports in an ACL. For more + // information, see Access Control List (ACL) Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html) + // in the Amazon S3 User Guide. + // + // You specify each grantee as a type=value pair, where the type is one of the + // following: + // + // * id – if the value specified is the canonical user ID of an Amazon + // Web Services account // - // This action is not supported by Amazon S3 on Outposts. + // * uri – if you are granting permissions to a predefined group + // + // * emailAddress – if the value specified is the email address of an Amazon + // Web Services account Using email addresses to specify a grantee is only + // supported in the following Amazon Web Services Regions: US East (N. Virginia) + // US West (N. California) US West (Oregon) Asia Pacific (Singapore) Asia + // Pacific (Sydney) Asia Pacific (Tokyo) Europe (Ireland) South America (São + // Paulo) For a list of all the Amazon S3 supported Regions and endpoints, + // see Regions and Endpoints (https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region) + // in the Amazon Web Services General Reference. + // + // For example, the following x-amz-grant-read header grants the Amazon Web + // Services accounts identified by account IDs permissions to read object data + // and its metadata: + // + // x-amz-grant-read: id="11112222333", id="444455556666" + // + // * This functionality is not supported for directory buckets. + // + // * This functionality is not supported for Amazon S3 on Outposts. GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"` - // Allows grantee to read the object data and its metadata. + // Specify access permissions explicitly to allow grantee to read the object + // data and its metadata. + // + // By default, all objects are private. Only the owner has full access control. + // When uploading an object, you can use this header to explicitly grant access + // permissions to specific Amazon Web Services accounts or groups. This header + // maps to specific permissions that Amazon S3 supports in an ACL. For more + // information, see Access Control List (ACL) Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html) + // in the Amazon S3 User Guide. // - // This action is not supported by Amazon S3 on Outposts. + // You specify each grantee as a type=value pair, where the type is one of the + // following: + // + // * id – if the value specified is the canonical user ID of an Amazon + // Web Services account + // + // * uri – if you are granting permissions to a predefined group + // + // * emailAddress – if the value specified is the email address of an Amazon + // Web Services account Using email addresses to specify a grantee is only + // supported in the following Amazon Web Services Regions: US East (N. Virginia) + // US West (N. California) US West (Oregon) Asia Pacific (Singapore) Asia + // Pacific (Sydney) Asia Pacific (Tokyo) Europe (Ireland) South America (São + // Paulo) For a list of all the Amazon S3 supported Regions and endpoints, + // see Regions and Endpoints (https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region) + // in the Amazon Web Services General Reference. + // + // For example, the following x-amz-grant-read header grants the Amazon Web + // Services accounts identified by account IDs permissions to read object data + // and its metadata: + // + // x-amz-grant-read: id="11112222333", id="444455556666" + // + // * This functionality is not supported for directory buckets. + // + // * This functionality is not supported for Amazon S3 on Outposts. GrantRead *string `location:"header" locationName:"x-amz-grant-read" type:"string"` - // Allows grantee to read the object ACL. + // Specify access permissions explicitly to allows grantee to read the object + // ACL. + // + // By default, all objects are private. Only the owner has full access control. + // When uploading an object, you can use this header to explicitly grant access + // permissions to specific Amazon Web Services accounts or groups. This header + // maps to specific permissions that Amazon S3 supports in an ACL. For more + // information, see Access Control List (ACL) Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html) + // in the Amazon S3 User Guide. + // + // You specify each grantee as a type=value pair, where the type is one of the + // following: + // + // * id – if the value specified is the canonical user ID of an Amazon + // Web Services account + // + // * uri – if you are granting permissions to a predefined group + // + // * emailAddress – if the value specified is the email address of an Amazon + // Web Services account Using email addresses to specify a grantee is only + // supported in the following Amazon Web Services Regions: US East (N. Virginia) + // US West (N. California) US West (Oregon) Asia Pacific (Singapore) Asia + // Pacific (Sydney) Asia Pacific (Tokyo) Europe (Ireland) South America (São + // Paulo) For a list of all the Amazon S3 supported Regions and endpoints, + // see Regions and Endpoints (https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region) + // in the Amazon Web Services General Reference. + // + // For example, the following x-amz-grant-read header grants the Amazon Web + // Services accounts identified by account IDs permissions to read object data + // and its metadata: + // + // x-amz-grant-read: id="11112222333", id="444455556666" // - // This action is not supported by Amazon S3 on Outposts. + // * This functionality is not supported for directory buckets. + // + // * This functionality is not supported for Amazon S3 on Outposts. GrantReadACP *string `location:"header" locationName:"x-amz-grant-read-acp" type:"string"` - // Allows grantee to write the ACL for the applicable object. + // Specify access permissions explicitly to allows grantee to allow grantee + // to write the ACL for the applicable object. + // + // By default, all objects are private. Only the owner has full access control. + // When uploading an object, you can use this header to explicitly grant access + // permissions to specific Amazon Web Services accounts or groups. This header + // maps to specific permissions that Amazon S3 supports in an ACL. For more + // information, see Access Control List (ACL) Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html) + // in the Amazon S3 User Guide. + // + // You specify each grantee as a type=value pair, where the type is one of the + // following: + // + // * id – if the value specified is the canonical user ID of an Amazon + // Web Services account + // + // * uri – if you are granting permissions to a predefined group + // + // * emailAddress – if the value specified is the email address of an Amazon + // Web Services account Using email addresses to specify a grantee is only + // supported in the following Amazon Web Services Regions: US East (N. Virginia) + // US West (N. California) US West (Oregon) Asia Pacific (Singapore) Asia + // Pacific (Sydney) Asia Pacific (Tokyo) Europe (Ireland) South America (São + // Paulo) For a list of all the Amazon S3 supported Regions and endpoints, + // see Regions and Endpoints (https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region) + // in the Amazon Web Services General Reference. // - // This action is not supported by Amazon S3 on Outposts. + // For example, the following x-amz-grant-read header grants the Amazon Web + // Services accounts identified by account IDs permissions to read object data + // and its metadata: + // + // x-amz-grant-read: id="11112222333", id="444455556666" + // + // * This functionality is not supported for directory buckets. + // + // * This functionality is not supported for Amazon S3 on Outposts. GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"` // Object key for which the multipart upload is to be initiated. @@ -14695,23 +16294,34 @@ type CreateMultipartUploadInput struct { Metadata map[string]*string `location:"headers" locationName:"x-amz-meta-" type:"map"` // Specifies whether you want to apply a legal hold to the uploaded object. + // + // This functionality is not supported for directory buckets. ObjectLockLegalHoldStatus *string `location:"header" locationName:"x-amz-object-lock-legal-hold" type:"string" enum:"ObjectLockLegalHoldStatus"` // Specifies the Object Lock mode that you want to apply to the uploaded object. + // + // This functionality is not supported for directory buckets. ObjectLockMode *string `location:"header" locationName:"x-amz-object-lock-mode" type:"string" enum:"ObjectLockMode"` // Specifies the date and time when you want the Object Lock to expire. + // + // This functionality is not supported for directory buckets. ObjectLockRetainUntilDate *time.Time `location:"header" locationName:"x-amz-object-lock-retain-until-date" type:"timestamp" timestampFormat:"iso8601"` // Confirms that the requester knows that they will be charged for the request. - // Bucket owners need not specify this parameter in their requests. For information - // about downloading objects from Requester Pays buckets, see Downloading Objects + // Bucket owners need not specify this parameter in their requests. If either + // the source or destination S3 bucket has Requester Pays enabled, the requester + // will pay for corresponding charges to copy the object. For information about + // downloading objects from Requester Pays buckets, see Downloading Objects // in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) // in the Amazon S3 User Guide. + // + // This functionality is not supported for directory buckets. RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` - // Specifies the algorithm to use to when encrypting the object (for example, - // AES256). + // Specifies the algorithm to use when encrypting the object (for example, AES256). + // + // This functionality is not supported for directory buckets. SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting @@ -14720,56 +16330,70 @@ type CreateMultipartUploadInput struct { // with the algorithm specified in the x-amz-server-side-encryption-customer-algorithm // header. // + // This functionality is not supported for directory buckets. + // // SSECustomerKey is a sensitive parameter and its value will be // replaced with "sensitive" in string returned by CreateMultipartUploadInput's // String and GoString methods. SSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"` - // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. - // Amazon S3 uses this header for a message integrity check to ensure that the - // encryption key was transmitted without error. + // Specifies the 128-bit MD5 digest of the customer-provided encryption key + // according to RFC 1321. Amazon S3 uses this header for a message integrity + // check to ensure that the encryption key was transmitted without error. + // + // This functionality is not supported for directory buckets. SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` // Specifies the Amazon Web Services KMS Encryption Context to use for object // encryption. The value of this header is a base64-encoded UTF-8 string holding // JSON with the encryption context key-value pairs. // + // This functionality is not supported for directory buckets. + // // SSEKMSEncryptionContext is a sensitive parameter and its value will be // replaced with "sensitive" in string returned by CreateMultipartUploadInput's // String and GoString methods. SSEKMSEncryptionContext *string `location:"header" locationName:"x-amz-server-side-encryption-context" type:"string" sensitive:"true"` - // Specifies the ID of the symmetric encryption customer managed key to use - // for object encryption. All GET and PUT requests for an object protected by - // KMS will fail if they're not made via SSL or using SigV4. For information - // about configuring any of the officially supported Amazon Web Services SDKs - // and Amazon Web Services CLI, see Specifying the Signature Version in Request - // Authentication (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version) - // in the Amazon S3 User Guide. + // Specifies the ID (Key ID, Key ARN, or Key Alias) of the symmetric encryption + // customer managed key to use for object encryption. + // + // This functionality is not supported for directory buckets. // // SSEKMSKeyId is a sensitive parameter and its value will be // replaced with "sensitive" in string returned by CreateMultipartUploadInput's // String and GoString methods. SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` - // The server-side encryption algorithm used when storing this object in Amazon + // The server-side encryption algorithm used when you store this object in Amazon // S3 (for example, AES256, aws:kms). + // + // For directory buckets, only server-side encryption with Amazon S3 managed + // keys (SSE-S3) (AES256) is supported. ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` // By default, Amazon S3 uses the STANDARD Storage Class to store newly created // objects. The STANDARD storage class provides high durability and high availability. // Depending on performance needs, you can specify a different Storage Class. - // Amazon S3 on Outposts only uses the OUTPOSTS Storage Class. For more information, - // see Storage Classes (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html) + // For more information, see Storage Classes (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html) // in the Amazon S3 User Guide. + // + // * For directory buckets, only the S3 Express One Zone storage class is + // supported to store newly created objects. + // + // * Amazon S3 on Outposts only uses the OUTPOSTS Storage Class. StorageClass *string `location:"header" locationName:"x-amz-storage-class" type:"string" enum:"StorageClass"` // The tag-set for the object. The tag-set must be encoded as URL Query parameters. + // + // This functionality is not supported for directory buckets. Tagging *string `location:"header" locationName:"x-amz-tagging" type:"string"` // If the bucket is configured as a website, redirects requests for this object // to another object in the same bucket or to an external URL. Amazon S3 stores // the value of this header in the object metadata. + // + // This functionality is not supported for directory buckets. WebsiteRedirectLocation *string `location:"header" locationName:"x-amz-website-redirect-location" type:"string"` } @@ -15042,38 +16666,32 @@ type CreateMultipartUploadOutput struct { // name in the request, the response includes this header. The header indicates // when the initiated multipart upload becomes eligible for an abort operation. // For more information, see Aborting Incomplete Multipart Uploads Using a Bucket - // Lifecycle Configuration (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config). + // Lifecycle Configuration (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config) + // in the Amazon S3 User Guide. // // The response also includes the x-amz-abort-rule-id header that provides the - // ID of the lifecycle configuration rule that defines this action. + // ID of the lifecycle configuration rule that defines the abort action. + // + // This functionality is not supported for directory buckets. AbortDate *time.Time `location:"header" locationName:"x-amz-abort-date" type:"timestamp"` // This header is returned along with the x-amz-abort-date header. It identifies // the applicable lifecycle configuration rule that defines the action to abort // incomplete multipart uploads. + // + // This functionality is not supported for directory buckets. AbortRuleId *string `location:"header" locationName:"x-amz-abort-rule-id" type:"string"` // The name of the bucket to which the multipart upload was initiated. Does // not return the access point ARN or access point alias if used. // - // When using this action with an access point, you must direct requests to - // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. - // When using this action with an access point through the Amazon Web Services - // SDKs, you provide the access point ARN in place of the bucket name. For more - // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) - // in the Amazon S3 User Guide. - // - // When you use this action with Amazon S3 on Outposts, you must direct requests - // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form - // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When - // you use this action with S3 on Outposts through the Amazon Web Services SDKs, - // you provide the Outposts access point ARN in place of the bucket name. For - // more information about S3 on Outposts ARNs, see What is S3 on Outposts? (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) - // in the Amazon S3 User Guide. + // Access points are not supported by directory buckets. Bucket *string `locationName:"Bucket" type:"string"` // Indicates whether the multipart upload uses an S3 Bucket Key for server-side // encryption with Key Management Service (KMS) keys (SSE-KMS). + // + // This functionality is not supported for directory buckets. BucketKeyEnabled *bool `location:"header" locationName:"x-amz-server-side-encryption-bucket-key-enabled" type:"boolean"` // The algorithm that was used to create a checksum of the object. @@ -15084,37 +16702,50 @@ type CreateMultipartUploadOutput struct { // If present, indicates that the requester was successfully charged for the // request. + // + // This functionality is not supported for directory buckets. RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` // If server-side encryption with a customer-provided encryption key was requested, - // the response will include this header confirming the encryption algorithm - // used. + // the response will include this header to confirm the encryption algorithm + // that's used. + // + // This functionality is not supported for directory buckets. SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` // If server-side encryption with a customer-provided encryption key was requested, - // the response will include this header to provide round-trip message integrity + // the response will include this header to provide the round-trip message integrity // verification of the customer-provided encryption key. + // + // This functionality is not supported for directory buckets. SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` - // If present, specifies the Amazon Web Services KMS Encryption Context to use + // If present, indicates the Amazon Web Services KMS Encryption Context to use // for object encryption. The value of this header is a base64-encoded UTF-8 // string holding JSON with the encryption context key-value pairs. // + // This functionality is not supported for directory buckets. + // // SSEKMSEncryptionContext is a sensitive parameter and its value will be // replaced with "sensitive" in string returned by CreateMultipartUploadOutput's // String and GoString methods. SSEKMSEncryptionContext *string `location:"header" locationName:"x-amz-server-side-encryption-context" type:"string" sensitive:"true"` - // If present, specifies the ID of the Key Management Service (KMS) symmetric + // If present, indicates the ID of the Key Management Service (KMS) symmetric // encryption customer managed key that was used for the object. // + // This functionality is not supported for directory buckets. + // // SSEKMSKeyId is a sensitive parameter and its value will be // replaced with "sensitive" in string returned by CreateMultipartUploadOutput's // String and GoString methods. SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` - // The server-side encryption algorithm used when storing this object in Amazon + // The server-side encryption algorithm used when you store this object in Amazon // S3 (for example, AES256, aws:kms). + // + // For directory buckets, only server-side encryption with Amazon S3 managed + // keys (SSE-S3) (AES256) is supported. ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` // ID for the initiated multipart upload. @@ -15224,6 +16855,136 @@ func (s *CreateMultipartUploadOutput) SetUploadId(v string) *CreateMultipartUplo return s } +type CreateSessionInput struct { + _ struct{} `locationName:"CreateSessionRequest" type:"structure"` + + // The name of the bucket that you create a session for. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Specifies the mode of the session that will be created, either ReadWrite + // or ReadOnly. By default, a ReadWrite session is created. A ReadWrite session + // is capable of executing all the Zonal endpoint APIs on a directory bucket. + // A ReadOnly session is constrained to execute the following Zonal endpoint + // APIs: GetObject, HeadObject, ListObjectsV2, GetObjectAttributes, ListParts, + // and ListMultipartUploads. + SessionMode *string `location:"header" locationName:"x-amz-create-session-mode" type:"string" enum:"SessionMode"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CreateSessionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CreateSessionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateSessionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateSessionInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *CreateSessionInput) SetBucket(v string) *CreateSessionInput { + s.Bucket = &v + return s +} + +func (s *CreateSessionInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetSessionMode sets the SessionMode field's value. +func (s *CreateSessionInput) SetSessionMode(v string) *CreateSessionInput { + s.SessionMode = &v + return s +} + +func (s *CreateSessionInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *CreateSessionInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s CreateSessionInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type CreateSessionOutput struct { + _ struct{} `type:"structure"` + + // The established temporary security credentials for the created session.. + // + // Credentials is a required field + Credentials *SessionCredentials `locationName:"Credentials" type:"structure" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CreateSessionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CreateSessionOutput) GoString() string { + return s.String() +} + +// SetCredentials sets the Credentials field's value. +func (s *CreateSessionOutput) SetCredentials(v *SessionCredentials) *CreateSessionOutput { + s.Credentials = v + return s +} + // The container element for specifying the default Object Lock retention settings // for new objects placed in the specified bucket. // @@ -15289,6 +17050,11 @@ type Delete struct { // The object to delete. // + // Directory buckets - For directory buckets, an object that's composed entirely + // of whitespace characters is not supported by the DeleteObjects API operation. + // The request will receive a 400 Bad Request error and none of the objects + // in the request will be deleted. + // // Objects is a required field Objects []*ObjectIdentifier `locationName:"Object" type:"list" flattened:"true" required:"true"` @@ -15358,9 +17124,9 @@ type DeleteBucketAnalyticsConfigurationInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` // The ID that identifies the analytics configuration. @@ -15488,9 +17254,9 @@ type DeleteBucketCorsInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` } @@ -15605,9 +17371,9 @@ type DeleteBucketEncryptionInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` } @@ -15718,12 +17484,25 @@ type DeleteBucketInput struct { // Specifies the bucket being deleted. // + // Directory buckets - When you use this operation with a directory bucket, + // you must use path-style requests in the format https://s3express-control.region_code.amazonaws.com/bucket-name + // . Virtual-hosted-style requests aren't supported. Directory bucket names + // must be unique in the chosen Availability Zone. Bucket names must also follow + // the format bucket_base_name--az_id--x-s3 (for example, DOC-EXAMPLE-BUCKET--usw2-az2--x-s3). + // For information about bucket naming restrictions, see Directory bucket naming + // rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) + // in the Amazon S3 User Guide + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). + // + // For directory buckets, this header is not supported in this API operation. + // If you specify this header, the request fails with the HTTP status code 501 + // Not Implemented. ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` } @@ -15935,9 +17714,9 @@ type DeleteBucketInventoryConfigurationInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` // The ID used to identify the inventory configuration. @@ -16065,9 +17844,9 @@ type DeleteBucketLifecycleInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` } @@ -16181,9 +17960,9 @@ type DeleteBucketMetricsConfigurationInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` // The ID used to identify the metrics configuration. The ID has a 64 character @@ -16334,9 +18113,9 @@ type DeleteBucketOwnershipControlsInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` } @@ -16447,12 +18226,25 @@ type DeleteBucketPolicyInput struct { // The bucket name. // + // Directory buckets - When you use this operation with a directory bucket, + // you must use path-style requests in the format https://s3express-control.region_code.amazonaws.com/bucket-name + // . Virtual-hosted-style requests aren't supported. Directory bucket names + // must be unique in the chosen Availability Zone. Bucket names must also follow + // the format bucket_base_name--az_id--x-s3 (for example, DOC-EXAMPLE-BUCKET--usw2-az2--x-s3). + // For information about bucket naming restrictions, see Directory bucket naming + // rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) + // in the Amazon S3 User Guide + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). + // + // For directory buckets, this header is not supported in this API operation. + // If you specify this header, the request fails with the HTTP status code 501 + // Not Implemented. ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` } @@ -16566,9 +18358,9 @@ type DeleteBucketReplicationInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` } @@ -16682,9 +18474,9 @@ type DeleteBucketTaggingInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` } @@ -16798,9 +18590,9 @@ type DeleteBucketWebsiteInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` } @@ -16917,7 +18709,7 @@ type DeleteMarkerEntry struct { // The object key. Key *string `min:"1" type:"string"` - // Date and time the object was last modified. + // Date and time when the object was last modified. LastModified *time.Time `type:"timestamp"` // The account that created the delete marker.> @@ -17026,19 +18818,33 @@ type DeleteObjectInput struct { // The bucket name of the bucket containing the object. // - // When using this action with an access point, you must direct requests to - // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // Directory buckets - When you use this operation with a directory bucket, + // you must use virtual-hosted-style requests in the format Bucket_name.s3express-az_id.region.amazonaws.com. + // Path-style requests are not supported. Directory bucket names must be unique + // in the chosen Availability Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3 + // (for example, DOC-EXAMPLE-BUCKET--usw2-az2--x-s3). For information about + // bucket naming restrictions, see Directory bucket naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) + // in the Amazon S3 User Guide. + // + // Access points - When you use this action with an access point, you must provide + // the alias of the access point in place of the bucket name or specify the + // access point ARN. When using the access point ARN, you must direct requests + // to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. // When using this action with an access point through the Amazon Web Services // SDKs, you provide the access point ARN in place of the bucket name. For more // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) // in the Amazon S3 User Guide. // - // When you use this action with Amazon S3 on Outposts, you must direct requests - // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form - // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When - // you use this action with S3 on Outposts through the Amazon Web Services SDKs, - // you provide the Outposts access point ARN in place of the bucket name. For - // more information about S3 on Outposts ARNs, see What is S3 on Outposts? (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // Access points and Object Lambda access points are not supported by directory + // buckets. + // + // S3 on Outposts - When you use this action with Amazon S3 on Outposts, you + // must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname + // takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. + // When you use this action with S3 on Outposts through the Amazon Web Services + // SDKs, you provide the Outposts access point ARN in place of the bucket name. + // For more information about S3 on Outposts ARNs, see What is S3 on Outposts? + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) // in the Amazon S3 User Guide. // // Bucket is a required field @@ -17047,11 +18853,13 @@ type DeleteObjectInput struct { // Indicates whether S3 Object Lock should bypass Governance-mode restrictions // to process this operation. To use this header, you must have the s3:BypassGovernanceRetention // permission. + // + // This functionality is not supported for directory buckets. BypassGovernanceRetention *bool `location:"header" locationName:"x-amz-bypass-governance-retention" type:"boolean"` - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` // Key name of the object to delete. @@ -17063,16 +18871,25 @@ type DeleteObjectInput struct { // and the value that is displayed on your authentication device. Required to // permanently delete a versioned object if versioning is configured with MFA // delete enabled. + // + // This functionality is not supported for directory buckets. MFA *string `location:"header" locationName:"x-amz-mfa" type:"string"` // Confirms that the requester knows that they will be charged for the request. - // Bucket owners need not specify this parameter in their requests. For information - // about downloading objects from Requester Pays buckets, see Downloading Objects + // Bucket owners need not specify this parameter in their requests. If either + // the source or destination S3 bucket has Requester Pays enabled, the requester + // will pay for corresponding charges to copy the object. For information about + // downloading objects from Requester Pays buckets, see Downloading Objects // in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) // in the Amazon S3 User Guide. + // + // This functionality is not supported for directory buckets. RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` - // VersionId used to reference a specific version of the object. + // Version ID used to reference a specific version of the object. + // + // For directory buckets in this API operation, only the null value of the version + // ID is supported. VersionId *string `location:"querystring" locationName:"versionId" type:"string"` } @@ -17195,16 +19012,24 @@ func (s DeleteObjectInput) updateArnableField(v string) (interface{}, error) { type DeleteObjectOutput struct { _ struct{} `type:"structure"` - // Specifies whether the versioned object that was permanently deleted was (true) - // or was not (false) a delete marker. + // Indicates whether the specified object version that was permanently deleted + // was (true) or was not (false) a delete marker before deletion. In a simple + // DELETE, this header indicates whether (true) or not (false) the current version + // of the object is a delete marker. + // + // This functionality is not supported for directory buckets. DeleteMarker *bool `location:"header" locationName:"x-amz-delete-marker" type:"boolean"` // If present, indicates that the requester was successfully charged for the // request. + // + // This functionality is not supported for directory buckets. RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` // Returns the version ID of the delete marker created as a result of the DELETE // operation. + // + // This functionality is not supported for directory buckets. VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"` } @@ -17249,27 +19074,30 @@ type DeleteObjectTaggingInput struct { // The bucket name containing the objects from which to remove the tags. // - // When using this action with an access point, you must direct requests to - // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // Access points - When you use this action with an access point, you must provide + // the alias of the access point in place of the bucket name or specify the + // access point ARN. When using the access point ARN, you must direct requests + // to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. // When using this action with an access point through the Amazon Web Services // SDKs, you provide the access point ARN in place of the bucket name. For more // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) // in the Amazon S3 User Guide. // - // When you use this action with Amazon S3 on Outposts, you must direct requests - // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form - // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When - // you use this action with S3 on Outposts through the Amazon Web Services SDKs, - // you provide the Outposts access point ARN in place of the bucket name. For - // more information about S3 on Outposts ARNs, see What is S3 on Outposts? (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // S3 on Outposts - When you use this action with Amazon S3 on Outposts, you + // must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname + // takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. + // When you use this action with S3 on Outposts through the Amazon Web Services + // SDKs, you provide the Outposts access point ARN in place of the bucket name. + // For more information about S3 on Outposts ARNs, see What is S3 on Outposts? + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) // in the Amazon S3 User Guide. // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` // The key that identifies the object in the bucket from which to remove all @@ -17416,19 +19244,33 @@ type DeleteObjectsInput struct { // The bucket name containing the objects to delete. // - // When using this action with an access point, you must direct requests to - // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // Directory buckets - When you use this operation with a directory bucket, + // you must use virtual-hosted-style requests in the format Bucket_name.s3express-az_id.region.amazonaws.com. + // Path-style requests are not supported. Directory bucket names must be unique + // in the chosen Availability Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3 + // (for example, DOC-EXAMPLE-BUCKET--usw2-az2--x-s3). For information about + // bucket naming restrictions, see Directory bucket naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) + // in the Amazon S3 User Guide. + // + // Access points - When you use this action with an access point, you must provide + // the alias of the access point in place of the bucket name or specify the + // access point ARN. When using the access point ARN, you must direct requests + // to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. // When using this action with an access point through the Amazon Web Services // SDKs, you provide the access point ARN in place of the bucket name. For more // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) // in the Amazon S3 User Guide. // - // When you use this action with Amazon S3 on Outposts, you must direct requests - // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form - // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When - // you use this action with S3 on Outposts through the Amazon Web Services SDKs, - // you provide the Outposts access point ARN in place of the bucket name. For - // more information about S3 on Outposts ARNs, see What is S3 on Outposts? (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // Access points and Object Lambda access points are not supported by directory + // buckets. + // + // S3 on Outposts - When you use this action with Amazon S3 on Outposts, you + // must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname + // takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. + // When you use this action with S3 on Outposts through the Amazon Web Services + // SDKs, you provide the Outposts access point ARN in place of the bucket name. + // For more information about S3 on Outposts ARNs, see What is S3 on Outposts? + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) // in the Amazon S3 User Guide. // // Bucket is a required field @@ -17437,22 +19279,38 @@ type DeleteObjectsInput struct { // Specifies whether you want to delete this object even if it has a Governance-type // Object Lock in place. To use this header, you must have the s3:BypassGovernanceRetention // permission. + // + // This functionality is not supported for directory buckets. BypassGovernanceRetention *bool `location:"header" locationName:"x-amz-bypass-governance-retention" type:"boolean"` - // Indicates the algorithm used to create the checksum for the object when using - // the SDK. This header will not provide any additional functionality if not - // using the SDK. When sending this header, there must be a corresponding x-amz-checksum - // or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with - // the HTTP status code 400 Bad Request. For more information, see Checking - // object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // Indicates the algorithm used to create the checksum for the object when you + // use the SDK. This header will not provide any additional functionality if + // you don't use the SDK. When you send this header, there must be a corresponding + // x-amz-checksum-algorithm or x-amz-trailer header sent. Otherwise, Amazon + // S3 fails the request with the HTTP status code 400 Bad Request. + // + // For the x-amz-checksum-algorithm header, replace algorithm with the supported + // algorithm from the following list: + // + // * CRC32 + // + // * CRC32C + // + // * SHA1 + // + // * SHA256 + // + // For more information, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) // in the Amazon S3 User Guide. // + // If the individual checksum value you provide through x-amz-checksum-algorithm + // doesn't match the checksum algorithm you set through x-amz-sdk-checksum-algorithm, + // Amazon S3 ignores any provided ChecksumAlgorithm parameter and uses the checksum + // algorithm that matches the provided value in x-amz-checksum-algorithm . + // // If you provide an individual checksum, Amazon S3 ignores any provided ChecksumAlgorithm // parameter. // - // This checksum algorithm must be the same for all parts and it match the checksum - // value supplied in the CreateMultipartUpload request. - // // The AWS SDK for Go v1 does not support automatic computing request payload // checksum. This feature is available in the AWS SDK for Go v2. If a value // is specified for this parameter, the matching algorithm's checksum member @@ -17468,22 +19326,37 @@ type DeleteObjectsInput struct { // Delete is a required field Delete *Delete `locationName:"Delete" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` // The concatenation of the authentication device's serial number, a space, // and the value that is displayed on your authentication device. Required to // permanently delete a versioned object if versioning is configured with MFA // delete enabled. + // + // When performing the DeleteObjects operation on an MFA delete enabled bucket, + // which attempts to delete the specified versioned objects, you must include + // an MFA token. If you don't provide an MFA token, the entire request will + // fail, even if there are non-versioned objects that you are trying to delete. + // If you provide an invalid token, whether there are versioned object keys + // in the request or not, the entire Multi-Object Delete request will fail. + // For information about MFA Delete, see MFA Delete (https://docs.aws.amazon.com/AmazonS3/latest/dev/Versioning.html#MultiFactorAuthenticationDelete) + // in the Amazon S3 User Guide. + // + // This functionality is not supported for directory buckets. MFA *string `location:"header" locationName:"x-amz-mfa" type:"string"` // Confirms that the requester knows that they will be charged for the request. - // Bucket owners need not specify this parameter in their requests. For information - // about downloading objects from Requester Pays buckets, see Downloading Objects + // Bucket owners need not specify this parameter in their requests. If either + // the source or destination S3 bucket has Requester Pays enabled, the requester + // will pay for corresponding charges to copy the object. For information about + // downloading objects from Requester Pays buckets, see Downloading Objects // in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) // in the Amazon S3 User Guide. + // + // This functionality is not supported for directory buckets. RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` } @@ -17618,6 +19491,8 @@ type DeleteObjectsOutput struct { // If present, indicates that the requester was successfully charged for the // request. + // + // This functionality is not supported for directory buckets. RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` } @@ -17665,9 +19540,9 @@ type DeletePublicAccessBlockInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` } @@ -17777,20 +19652,27 @@ func (s DeletePublicAccessBlockOutput) GoString() string { type DeletedObject struct { _ struct{} `type:"structure"` - // Specifies whether the versioned object that was permanently deleted was (true) - // or was not (false) a delete marker. In a simple DELETE, this header indicates - // whether (true) or not (false) a delete marker was created. + // Indicates whether the specified object version that was permanently deleted + // was (true) or was not (false) a delete marker before deletion. In a simple + // DELETE, this header indicates whether (true) or not (false) the current version + // of the object is a delete marker. + // + // This functionality is not supported for directory buckets. DeleteMarker *bool `type:"boolean"` // The version ID of the delete marker created as a result of the DELETE operation. // If you delete a specific object version, the value returned by this header // is the version ID of the object version deleted. + // + // This functionality is not supported for directory buckets. DeleteMarkerVersionId *string `type:"string"` // The name of the deleted object. Key *string `min:"1" type:"string"` // The version ID of the deleted object. + // + // This functionality is not supported for directory buckets. VersionId *string `type:"string"` } @@ -18506,6 +20388,8 @@ type Error struct { Message *string `type:"string"` // The version ID of the error. + // + // This functionality is not supported for directory buckets. VersionId *string `type:"string"` } @@ -18731,16 +20615,20 @@ type GetBucketAccelerateConfigurationInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` // Confirms that the requester knows that they will be charged for the request. - // Bucket owners need not specify this parameter in their requests. For information - // about downloading objects from Requester Pays buckets, see Downloading Objects + // Bucket owners need not specify this parameter in their requests. If either + // the source or destination S3 bucket has Requester Pays enabled, the requester + // will pay for corresponding charges to copy the object. For information about + // downloading objects from Requester Pays buckets, see Downloading Objects // in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) // in the Amazon S3 User Guide. + // + // This functionality is not supported for directory buckets. RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` } @@ -18835,6 +20723,8 @@ type GetBucketAccelerateConfigurationOutput struct { // If present, indicates that the requester was successfully charged for the // request. + // + // This functionality is not supported for directory buckets. RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` // The accelerate configuration of the bucket. @@ -18876,10 +20766,10 @@ type GetBucketAclInput struct { // Specifies the S3 bucket whose ACL is being requested. // - // To use this API operation against an access point, provide the alias of the - // access point in place of the bucket name. + // When you use this API operation with an access point, provide the alias of + // the access point in place of the bucket name. // - // To use this API operation against an Object Lambda access point, provide + // When you use this API operation with an Object Lambda access point, provide // the alias of the Object Lambda access point in place of the bucket name. // If the Object Lambda access point alias in a request is not valid, the error // code InvalidAccessPointAliasError is returned. For more information about @@ -18888,9 +20778,9 @@ type GetBucketAclInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` } @@ -19022,9 +20912,9 @@ type GetBucketAnalyticsConfigurationInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` // The ID that identifies the analytics configuration. @@ -19158,10 +21048,10 @@ type GetBucketCorsInput struct { // The bucket name for which to get the cors configuration. // - // To use this API operation against an access point, provide the alias of the - // access point in place of the bucket name. + // When you use this API operation with an access point, provide the alias of + // the access point in place of the bucket name. // - // To use this API operation against an Object Lambda access point, provide + // When you use this API operation with an Object Lambda access point, provide // the alias of the Object Lambda access point in place of the bucket name. // If the Object Lambda access point alias in a request is not valid, the error // code InvalidAccessPointAliasError is returned. For more information about @@ -19170,9 +21060,9 @@ type GetBucketCorsInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` } @@ -19297,9 +21187,9 @@ type GetBucketEncryptionInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` } @@ -19551,9 +21441,9 @@ type GetBucketInventoryConfigurationInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` // The ID used to identify the inventory configuration. @@ -19690,9 +21580,9 @@ type GetBucketLifecycleConfigurationInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` } @@ -19815,9 +21705,9 @@ type GetBucketLifecycleInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` } @@ -19937,10 +21827,10 @@ type GetBucketLocationInput struct { // The name of the bucket for which to get the location. // - // To use this API operation against an access point, provide the alias of the - // access point in place of the bucket name. + // When you use this API operation with an access point, provide the alias of + // the access point in place of the bucket name. // - // To use this API operation against an Object Lambda access point, provide + // When you use this API operation with an Object Lambda access point, provide // the alias of the Object Lambda access point in place of the bucket name. // If the Object Lambda access point alias in a request is not valid, the error // code InvalidAccessPointAliasError is returned. For more information about @@ -19949,9 +21839,9 @@ type GetBucketLocationInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` } @@ -20076,9 +21966,9 @@ type GetBucketLoggingInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` } @@ -20204,9 +22094,9 @@ type GetBucketMetricsConfigurationInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` // The ID used to identify the metrics configuration. The ID has a 64 character @@ -20341,10 +22231,10 @@ type GetBucketNotificationConfigurationRequest struct { // The name of the bucket for which to get the notification configuration. // - // To use this API operation against an access point, provide the alias of the - // access point in place of the bucket name. + // When you use this API operation with an access point, provide the alias of + // the access point in place of the bucket name. // - // To use this API operation against an Object Lambda access point, provide + // When you use this API operation with an Object Lambda access point, provide // the alias of the Object Lambda access point in place of the bucket name. // If the Object Lambda access point alias in a request is not valid, the error // code InvalidAccessPointAliasError is returned. For more information about @@ -20353,9 +22243,9 @@ type GetBucketNotificationConfigurationRequest struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` } @@ -20447,9 +22337,9 @@ type GetBucketOwnershipControlsInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` } @@ -20568,23 +22458,40 @@ func (s *GetBucketOwnershipControlsOutput) SetOwnershipControls(v *OwnershipCont type GetBucketPolicyInput struct { _ struct{} `locationName:"GetBucketPolicyRequest" type:"structure"` - // The bucket name for which to get the bucket policy. + // The bucket name to get the bucket policy for. // - // To use this API operation against an access point, provide the alias of the - // access point in place of the bucket name. + // Directory buckets - When you use this operation with a directory bucket, + // you must use path-style requests in the format https://s3express-control.region_code.amazonaws.com/bucket-name + // . Virtual-hosted-style requests aren't supported. Directory bucket names + // must be unique in the chosen Availability Zone. Bucket names must also follow + // the format bucket_base_name--az_id--x-s3 (for example, DOC-EXAMPLE-BUCKET--usw2-az2--x-s3). + // For information about bucket naming restrictions, see Directory bucket naming + // rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) + // in the Amazon S3 User Guide // - // To use this API operation against an Object Lambda access point, provide - // the alias of the Object Lambda access point in place of the bucket name. - // If the Object Lambda access point alias in a request is not valid, the error - // code InvalidAccessPointAliasError is returned. For more information about - // InvalidAccessPointAliasError, see List of Error Codes (https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList). + // Access points - When you use this API operation with an access point, provide + // the alias of the access point in place of the bucket name. + // + // Object Lambda access points - When you use this API operation with an Object + // Lambda access point, provide the alias of the Object Lambda access point + // in place of the bucket name. If the Object Lambda access point alias in a + // request is not valid, the error code InvalidAccessPointAliasError is returned. + // For more information about InvalidAccessPointAliasError, see List of Error + // Codes (https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList). + // + // Access points and Object Lambda access points are not supported by directory + // buckets. // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). + // + // For directory buckets, this header is not supported in this API operation. + // If you specify this header, the request fails with the HTTP status code 501 + // Not Implemented. ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` } @@ -20707,9 +22614,9 @@ type GetBucketPolicyStatusInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` } @@ -20832,9 +22739,9 @@ type GetBucketReplicationInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` } @@ -20958,9 +22865,9 @@ type GetBucketRequestPaymentInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` } @@ -21083,9 +22990,9 @@ type GetBucketTaggingInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` } @@ -21210,9 +23117,9 @@ type GetBucketVersioningInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` } @@ -21346,9 +23253,9 @@ type GetBucketWebsiteInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` } @@ -21496,8 +23403,10 @@ type GetObjectAclInput struct { // The bucket name that contains the object for which to get the ACL information. // - // When using this action with an access point, you must direct requests to - // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // Access points - When you use this action with an access point, you must provide + // the alias of the access point in place of the bucket name or specify the + // access point ARN. When using the access point ARN, you must direct requests + // to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. // When using this action with an access point through the Amazon Web Services // SDKs, you provide the access point ARN in place of the bucket name. For more // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) @@ -21506,9 +23415,9 @@ type GetObjectAclInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` // The key of the object for which to get the ACL information. @@ -21517,13 +23426,19 @@ type GetObjectAclInput struct { Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` // Confirms that the requester knows that they will be charged for the request. - // Bucket owners need not specify this parameter in their requests. For information - // about downloading objects from Requester Pays buckets, see Downloading Objects + // Bucket owners need not specify this parameter in their requests. If either + // the source or destination S3 bucket has Requester Pays enabled, the requester + // will pay for corresponding charges to copy the object. For information about + // downloading objects from Requester Pays buckets, see Downloading Objects // in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) // in the Amazon S3 User Guide. + // + // This functionality is not supported for directory buckets. RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` - // VersionId used to reference a specific version of the object. + // Version ID used to reference a specific version of the object. + // + // This functionality is not supported for directory buckets. VersionId *string `location:"querystring" locationName:"versionId" type:"string"` } @@ -21642,6 +23557,8 @@ type GetObjectAclOutput struct { // If present, indicates that the requester was successfully charged for the // request. + // + // This functionality is not supported for directory buckets. RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` } @@ -21686,27 +23603,41 @@ type GetObjectAttributesInput struct { // The name of the bucket that contains the object. // - // When using this action with an access point, you must direct requests to - // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // Directory buckets - When you use this operation with a directory bucket, + // you must use virtual-hosted-style requests in the format Bucket_name.s3express-az_id.region.amazonaws.com. + // Path-style requests are not supported. Directory bucket names must be unique + // in the chosen Availability Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3 + // (for example, DOC-EXAMPLE-BUCKET--usw2-az2--x-s3). For information about + // bucket naming restrictions, see Directory bucket naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) + // in the Amazon S3 User Guide. + // + // Access points - When you use this action with an access point, you must provide + // the alias of the access point in place of the bucket name or specify the + // access point ARN. When using the access point ARN, you must direct requests + // to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. // When using this action with an access point through the Amazon Web Services // SDKs, you provide the access point ARN in place of the bucket name. For more // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) // in the Amazon S3 User Guide. // - // When you use this action with Amazon S3 on Outposts, you must direct requests - // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form - // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When - // you use this action with S3 on Outposts through the Amazon Web Services SDKs, - // you provide the Outposts access point ARN in place of the bucket name. For - // more information about S3 on Outposts ARNs, see What is S3 on Outposts? (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // Access points and Object Lambda access points are not supported by directory + // buckets. + // + // S3 on Outposts - When you use this action with Amazon S3 on Outposts, you + // must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname + // takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. + // When you use this action with S3 on Outposts through the Amazon Web Services + // SDKs, you provide the Outposts access point ARN in place of the bucket name. + // For more information about S3 on Outposts ARNs, see What is S3 on Outposts? + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) // in the Amazon S3 User Guide. // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` // The object key. @@ -21728,13 +23659,19 @@ type GetObjectAttributesInput struct { PartNumberMarker *int64 `location:"header" locationName:"x-amz-part-number-marker" type:"integer"` // Confirms that the requester knows that they will be charged for the request. - // Bucket owners need not specify this parameter in their requests. For information - // about downloading objects from Requester Pays buckets, see Downloading Objects + // Bucket owners need not specify this parameter in their requests. If either + // the source or destination S3 bucket has Requester Pays enabled, the requester + // will pay for corresponding charges to copy the object. For information about + // downloading objects from Requester Pays buckets, see Downloading Objects // in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) // in the Amazon S3 User Guide. + // + // This functionality is not supported for directory buckets. RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` // Specifies the algorithm to use when encrypting the object (for example, AES256). + // + // This functionality is not supported for directory buckets. SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting @@ -21743,6 +23680,8 @@ type GetObjectAttributesInput struct { // with the algorithm specified in the x-amz-server-side-encryption-customer-algorithm // header. // + // This functionality is not supported for directory buckets. + // // SSECustomerKey is a sensitive parameter and its value will be // replaced with "sensitive" in string returned by GetObjectAttributesInput's // String and GoString methods. @@ -21751,9 +23690,16 @@ type GetObjectAttributesInput struct { // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. // Amazon S3 uses this header for a message integrity check to ensure that the // encryption key was transmitted without error. + // + // This functionality is not supported for directory buckets. SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` // The version ID used to reference a specific version of the object. + // + // S3 Versioning isn't enabled and supported for directory buckets. For this + // API operation, only the null value of the version ID is supported by directory + // buckets. You can only specify null to the versionId query parameter in the + // request. VersionId *string `location:"querystring" locationName:"versionId" type:"string"` } @@ -21915,6 +23861,8 @@ type GetObjectAttributesOutput struct { // Specifies whether the object retrieved was (true) or was not (false) a delete // marker. If false, this response header does not appear in the response. + // + // This functionality is not supported for directory buckets. DeleteMarker *bool `location:"header" locationName:"x-amz-delete-marker" type:"boolean"` // An ETag is an opaque identifier assigned by a web server to a specific version @@ -21932,15 +23880,22 @@ type GetObjectAttributesOutput struct { // If present, indicates that the requester was successfully charged for the // request. + // + // This functionality is not supported for directory buckets. RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` // Provides the storage class information of the object. Amazon S3 returns this // header for all objects except for S3 Standard storage class objects. // // For more information, see Storage Classes (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html). + // + // Directory buckets - Only the S3 Express One Zone storage class is supported + // by directory buckets to store objects. StorageClass *string `type:"string" enum:"StorageClass"` // The version ID of the object. + // + // This functionality is not supported for directory buckets. VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"` } @@ -22038,6 +23993,15 @@ type GetObjectAttributesParts struct { // A container for elements related to a particular part. A response can contain // zero or more Parts elements. + // + // * General purpose buckets - For GetObjectAttributes, if a additional checksum + // (including x-amz-checksum-crc32, x-amz-checksum-crc32c, x-amz-checksum-sha1, + // or x-amz-checksum-sha256) isn't applied to the object specified in the + // request, the response doesn't return Part. + // + // * Directory buckets - For GetObjectAttributes, no matter whether a additional + // checksum is applied to the object specified in the request, the response + // returns Part. Parts []*ObjectPart `locationName:"Part" type:"list" flattened:"true"` // The total number of parts. @@ -22103,21 +24067,37 @@ type GetObjectInput struct { // The bucket name containing the object. // - // When using this action with an access point, you must direct requests to - // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // Directory buckets - When you use this operation with a directory bucket, + // you must use virtual-hosted-style requests in the format Bucket_name.s3express-az_id.region.amazonaws.com. + // Path-style requests are not supported. Directory bucket names must be unique + // in the chosen Availability Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3 + // (for example, DOC-EXAMPLE-BUCKET--usw2-az2--x-s3). For information about + // bucket naming restrictions, see Directory bucket naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) + // in the Amazon S3 User Guide. + // + // Access points - When you use this action with an access point, you must provide + // the alias of the access point in place of the bucket name or specify the + // access point ARN. When using the access point ARN, you must direct requests + // to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. // When using this action with an access point through the Amazon Web Services // SDKs, you provide the access point ARN in place of the bucket name. For more // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) // in the Amazon S3 User Guide. // - // When using an Object Lambda access point the hostname takes the form AccessPointName-AccountId.s3-object-lambda.Region.amazonaws.com. + // Object Lambda access points - When you use this action with an Object Lambda + // access point, you must direct requests to the Object Lambda access point + // hostname. The Object Lambda access point hostname takes the form AccessPointName-AccountId.s3-object-lambda.Region.amazonaws.com. // - // When you use this action with Amazon S3 on Outposts, you must direct requests - // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form - // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When - // you use this action with S3 on Outposts through the Amazon Web Services SDKs, - // you provide the Outposts access point ARN in place of the bucket name. For - // more information about S3 on Outposts ARNs, see What is S3 on Outposts? (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // Access points and Object Lambda access points are not supported by directory + // buckets. + // + // S3 on Outposts - When you use this action with Amazon S3 on Outposts, you + // must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname + // takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. + // When you use this action with S3 on Outposts through the Amazon Web Services + // SDKs, you provide the Outposts access point ARN in place of the bucket name. + // For more information about S3 on Outposts ARNs, see What is S3 on Outposts? + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) // in the Amazon S3 User Guide. // // Bucket is a required field @@ -22129,25 +24109,50 @@ type GetObjectInput struct { // validation. This feature is available in the AWS SDK for Go v2. ChecksumMode *string `location:"header" locationName:"x-amz-checksum-mode" type:"string" enum:"ChecksumMode"` - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` - // Return the object only if its entity tag (ETag) is the same as the one specified; - // otherwise, return a 412 (precondition failed) error. + // Return the object only if its entity tag (ETag) is the same as the one specified + // in this header; otherwise, return a 412 Precondition Failed error. + // + // If both of the If-Match and If-Unmodified-Since headers are present in the + // request as follows: If-Match condition evaluates to true, and; If-Unmodified-Since + // condition evaluates to false; then, S3 returns 200 OK and the data requested. + // + // For more information about conditional requests, see RFC 7232 (https://tools.ietf.org/html/rfc7232). IfMatch *string `location:"header" locationName:"If-Match" type:"string"` // Return the object only if it has been modified since the specified time; - // otherwise, return a 304 (not modified) error. + // otherwise, return a 304 Not Modified error. + // + // If both of the If-None-Match and If-Modified-Since headers are present in + // the request as follows:If-None-Match condition evaluates to false, and; If-Modified-Since + // condition evaluates to true; then, S3 returns 304 Not Modified status code. + // + // For more information about conditional requests, see RFC 7232 (https://tools.ietf.org/html/rfc7232). IfModifiedSince *time.Time `location:"header" locationName:"If-Modified-Since" type:"timestamp"` // Return the object only if its entity tag (ETag) is different from the one - // specified; otherwise, return a 304 (not modified) error. + // specified in this header; otherwise, return a 304 Not Modified error. + // + // If both of the If-None-Match and If-Modified-Since headers are present in + // the request as follows:If-None-Match condition evaluates to false, and; If-Modified-Since + // condition evaluates to true; then, S3 returns 304 Not Modified HTTP status + // code. + // + // For more information about conditional requests, see RFC 7232 (https://tools.ietf.org/html/rfc7232). IfNoneMatch *string `location:"header" locationName:"If-None-Match" type:"string"` // Return the object only if it has not been modified since the specified time; - // otherwise, return a 412 (precondition failed) error. + // otherwise, return a 412 Precondition Failed error. + // + // If both of the If-Match and If-Unmodified-Since headers are present in the + // request as follows: If-Match condition evaluates to true, and; If-Unmodified-Since + // condition evaluates to false; then, S3 returns 200 OK and the data requested. + // + // For more information about conditional requests, see RFC 7232 (https://tools.ietf.org/html/rfc7232). IfUnmodifiedSince *time.Time `location:"header" locationName:"If-Unmodified-Since" type:"timestamp"` // Key of the object to get. @@ -22160,7 +24165,7 @@ type GetObjectInput struct { // Useful for downloading just a part of an object. PartNumber *int64 `location:"querystring" locationName:"partNumber" type:"integer"` - // Downloads the specified range bytes of an object. For more information about + // Downloads the specified byte range of an object. For more information about // the HTTP Range header, see https://www.rfc-editor.org/rfc/rfc9110.html#name-range // (https://www.rfc-editor.org/rfc/rfc9110.html#name-range). // @@ -22168,16 +24173,20 @@ type GetObjectInput struct { Range *string `location:"header" locationName:"Range" type:"string"` // Confirms that the requester knows that they will be charged for the request. - // Bucket owners need not specify this parameter in their requests. For information - // about downloading objects from Requester Pays buckets, see Downloading Objects + // Bucket owners need not specify this parameter in their requests. If either + // the source or destination S3 bucket has Requester Pays enabled, the requester + // will pay for corresponding charges to copy the object. For information about + // downloading objects from Requester Pays buckets, see Downloading Objects // in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) // in the Amazon S3 User Guide. + // + // This functionality is not supported for directory buckets. RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` // Sets the Cache-Control header of the response. ResponseCacheControl *string `location:"querystring" locationName:"response-cache-control" type:"string"` - // Sets the Content-Disposition header of the response + // Sets the Content-Disposition header of the response. ResponseContentDisposition *string `location:"querystring" locationName:"response-content-disposition" type:"string"` // Sets the Content-Encoding header of the response. @@ -22192,27 +24201,92 @@ type GetObjectInput struct { // Sets the Expires header of the response. ResponseExpires *time.Time `location:"querystring" locationName:"response-expires" type:"timestamp" timestampFormat:"rfc822"` - // Specifies the algorithm to use to when decrypting the object (for example, - // AES256). + // Specifies the algorithm to use when decrypting the object (for example, AES256). + // + // If you encrypt an object by using server-side encryption with customer-provided + // encryption keys (SSE-C) when you store the object in Amazon S3, then when + // you GET the object, you must use the following headers: + // + // * x-amz-server-side-encryption-customer-algorithm + // + // * x-amz-server-side-encryption-customer-key + // + // * x-amz-server-side-encryption-customer-key-MD5 + // + // For more information about SSE-C, see Server-Side Encryption (Using Customer-Provided + // Encryption Keys) (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html) + // in the Amazon S3 User Guide. + // + // This functionality is not supported for directory buckets. SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` - // Specifies the customer-provided encryption key for Amazon S3 used to encrypt - // the data. This value is used to decrypt the object when recovering it and - // must match the one used when storing the data. The key must be appropriate - // for use with the algorithm specified in the x-amz-server-side-encryption-customer-algorithm - // header. + // Specifies the customer-provided encryption key that you originally provided + // for Amazon S3 to encrypt the data before storing it. This value is used to + // decrypt the object when recovering it and must match the one used when storing + // the data. The key must be appropriate for use with the algorithm specified + // in the x-amz-server-side-encryption-customer-algorithm header. + // + // If you encrypt an object by using server-side encryption with customer-provided + // encryption keys (SSE-C) when you store the object in Amazon S3, then when + // you GET the object, you must use the following headers: + // + // * x-amz-server-side-encryption-customer-algorithm + // + // * x-amz-server-side-encryption-customer-key + // + // * x-amz-server-side-encryption-customer-key-MD5 + // + // For more information about SSE-C, see Server-Side Encryption (Using Customer-Provided + // Encryption Keys) (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html) + // in the Amazon S3 User Guide. + // + // This functionality is not supported for directory buckets. // // SSECustomerKey is a sensitive parameter and its value will be // replaced with "sensitive" in string returned by GetObjectInput's // String and GoString methods. SSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"` - // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. - // Amazon S3 uses this header for a message integrity check to ensure that the - // encryption key was transmitted without error. + // Specifies the 128-bit MD5 digest of the customer-provided encryption key + // according to RFC 1321. Amazon S3 uses this header for a message integrity + // check to ensure that the encryption key was transmitted without error. + // + // If you encrypt an object by using server-side encryption with customer-provided + // encryption keys (SSE-C) when you store the object in Amazon S3, then when + // you GET the object, you must use the following headers: + // + // * x-amz-server-side-encryption-customer-algorithm + // + // * x-amz-server-side-encryption-customer-key + // + // * x-amz-server-side-encryption-customer-key-MD5 + // + // For more information about SSE-C, see Server-Side Encryption (Using Customer-Provided + // Encryption Keys) (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html) + // in the Amazon S3 User Guide. + // + // This functionality is not supported for directory buckets. SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` - // VersionId used to reference a specific version of the object. + // Version ID used to reference a specific version of the object. + // + // By default, the GetObject operation returns the current version of an object. + // To return a different version, use the versionId subresource. + // + // * If you include a versionId in your request header, you must have the + // s3:GetObjectVersion permission to access a specific version of an object. + // The s3:GetObject permission is not required in this scenario. + // + // * If you request the current version of an object without a specific versionId + // in the request header, only the s3:GetObject permission is required. The + // s3:GetObjectVersion permission is not required in this scenario. + // + // * Directory buckets - S3 Versioning isn't enabled and supported for directory + // buckets. For this API operation, only the null value of the version ID + // is supported by directory buckets. You can only specify null to the versionId + // query parameter in the request. + // + // For more information about versioning, see PutBucketVersioning (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketVersioning.html). VersionId *string `location:"querystring" locationName:"versionId" type:"string"` } @@ -22429,8 +24503,10 @@ type GetObjectLegalHoldInput struct { // The bucket name containing the object whose legal hold status you want to // retrieve. // - // When using this action with an access point, you must direct requests to - // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // Access points - When you use this action with an access point, you must provide + // the alias of the access point in place of the bucket name or specify the + // access point ARN. When using the access point ARN, you must direct requests + // to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. // When using this action with an access point through the Amazon Web Services // SDKs, you provide the access point ARN in place of the bucket name. For more // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) @@ -22439,9 +24515,9 @@ type GetObjectLegalHoldInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` // The key name for the object whose legal hold status you want to retrieve. @@ -22450,10 +24526,14 @@ type GetObjectLegalHoldInput struct { Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` // Confirms that the requester knows that they will be charged for the request. - // Bucket owners need not specify this parameter in their requests. For information - // about downloading objects from Requester Pays buckets, see Downloading Objects + // Bucket owners need not specify this parameter in their requests. If either + // the source or destination S3 bucket has Requester Pays enabled, the requester + // will pay for corresponding charges to copy the object. For information about + // downloading objects from Requester Pays buckets, see Downloading Objects // in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) // in the Amazon S3 User Guide. + // + // This functionality is not supported for directory buckets. RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` // The version ID of the object whose legal hold status you want to retrieve. @@ -22600,8 +24680,10 @@ type GetObjectLockConfigurationInput struct { // The bucket whose Object Lock configuration you want to retrieve. // - // When using this action with an access point, you must direct requests to - // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // Access points - When you use this action with an access point, you must provide + // the alias of the access point in place of the bucket name or specify the + // access point ARN. When using the access point ARN, you must direct requests + // to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. // When using this action with an access point through the Amazon Web Services // SDKs, you provide the access point ARN in place of the bucket name. For more // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) @@ -22610,9 +24692,9 @@ type GetObjectLockConfigurationInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` } @@ -22730,7 +24812,7 @@ func (s *GetObjectLockConfigurationOutput) SetObjectLockConfiguration(v *ObjectL type GetObjectOutput struct { _ struct{} `type:"structure" payload:"Body"` - // Indicates that a range of bytes was specified. + // Indicates that a range of bytes was specified in the request. AcceptRanges *string `location:"header" locationName:"accept-ranges" type:"string"` // Object data. @@ -22738,47 +24820,41 @@ type GetObjectOutput struct { // Indicates whether the object uses an S3 Bucket Key for server-side encryption // with Key Management Service (KMS) keys (SSE-KMS). + // + // This functionality is not supported for directory buckets. BucketKeyEnabled *bool `location:"header" locationName:"x-amz-server-side-encryption-bucket-key-enabled" type:"boolean"` // Specifies caching behavior along the request/reply chain. CacheControl *string `location:"header" locationName:"Cache-Control" type:"string"` // The base64-encoded, 32-bit CRC32 checksum of the object. This will only be - // present if it was uploaded with the object. With multipart uploads, this - // may not be a checksum value of the object. For more information about how - // checksums are calculated with multipart uploads, see Checking object integrity - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // present if it was uploaded with the object. For more information, see Checking + // object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) // in the Amazon S3 User Guide. ChecksumCRC32 *string `location:"header" locationName:"x-amz-checksum-crc32" type:"string"` // The base64-encoded, 32-bit CRC32C checksum of the object. This will only - // be present if it was uploaded with the object. With multipart uploads, this - // may not be a checksum value of the object. For more information about how - // checksums are calculated with multipart uploads, see Checking object integrity - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // be present if it was uploaded with the object. For more information, see + // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) // in the Amazon S3 User Guide. ChecksumCRC32C *string `location:"header" locationName:"x-amz-checksum-crc32c" type:"string"` // The base64-encoded, 160-bit SHA-1 digest of the object. This will only be - // present if it was uploaded with the object. With multipart uploads, this - // may not be a checksum value of the object. For more information about how - // checksums are calculated with multipart uploads, see Checking object integrity - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // present if it was uploaded with the object. For more information, see Checking + // object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) // in the Amazon S3 User Guide. ChecksumSHA1 *string `location:"header" locationName:"x-amz-checksum-sha1" type:"string"` // The base64-encoded, 256-bit SHA-256 digest of the object. This will only - // be present if it was uploaded with the object. With multipart uploads, this - // may not be a checksum value of the object. For more information about how - // checksums are calculated with multipart uploads, see Checking object integrity - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // be present if it was uploaded with the object. For more information, see + // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) // in the Amazon S3 User Guide. ChecksumSHA256 *string `location:"header" locationName:"x-amz-checksum-sha256" type:"string"` // Specifies presentational information for the object. ContentDisposition *string `location:"header" locationName:"Content-Disposition" type:"string"` - // Specifies what content encodings have been applied to the object and thus + // Indicates what content encodings have been applied to the object and thus // what decoding mechanisms must be applied to obtain the media-type referenced // by the Content-Type header field. ContentEncoding *string `location:"header" locationName:"Content-Encoding" type:"string"` @@ -22795,23 +24871,40 @@ type GetObjectOutput struct { // A standard MIME type describing the format of the object data. ContentType *string `location:"header" locationName:"Content-Type" type:"string"` - // Specifies whether the object retrieved was (true) or was not (false) a Delete + // Indicates whether the object retrieved was (true) or was not (false) a Delete // Marker. If false, this response header does not appear in the response. + // + // * If the current version of the object is a delete marker, Amazon S3 behaves + // as if the object was deleted and includes x-amz-delete-marker: true in + // the response. + // + // * If the specified version in the request is a delete marker, the response + // returns a 405 Method Not Allowed error and the Last-Modified: timestamp + // response header. DeleteMarker *bool `location:"header" locationName:"x-amz-delete-marker" type:"boolean"` // An entity tag (ETag) is an opaque identifier assigned by a web server to // a specific version of a resource found at a URL. ETag *string `location:"header" locationName:"ETag" type:"string"` - // If the object expiration is configured (see PUT Bucket lifecycle), the response - // includes this header. It includes the expiry-date and rule-id key-value pairs - // providing object expiration information. The value of the rule-id is URL-encoded. + // If the object expiration is configured (see PutBucketLifecycleConfiguration + // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html)), + // the response includes this header. It includes the expiry-date and rule-id + // key-value pairs providing object expiration information. The value of the + // rule-id is URL-encoded. + // + // This functionality is not supported for directory buckets. Expiration *string `location:"header" locationName:"x-amz-expiration" type:"string"` // The date and time at which the object is no longer cacheable. Expires *string `location:"header" locationName:"Expires" type:"string"` - // Creation date of the object. + // Date and time when the object was last modified. + // + // General purpose buckets - When you specify a versionId of the object in your + // request, if the specified version in the request is a delete marker, the + // response returns a 405 Method Not Allowed error and the Last-Modified: timestamp + // response header. LastModified *time.Time `location:"header" locationName:"Last-Modified" type:"timestamp"` // A map of metadata to store with the object in S3. @@ -22821,20 +24914,29 @@ type GetObjectOutput struct { // Set `aws.Config.LowerCaseHeaderMaps` to `true` to write unmarshaled keys to the map as lowercase. Metadata map[string]*string `location:"headers" locationName:"x-amz-meta-" type:"map"` - // This is set to the number of metadata entries not returned in x-amz-meta - // headers. This can happen if you create metadata using an API like SOAP that - // supports more flexible metadata than the REST API. For example, using SOAP, - // you can create metadata whose values are not legal HTTP headers. + // This is set to the number of metadata entries not returned in the headers + // that are prefixed with x-amz-meta-. This can happen if you create metadata + // using an API like SOAP that supports more flexible metadata than the REST + // API. For example, using SOAP, you can create metadata whose values are not + // legal HTTP headers. + // + // This functionality is not supported for directory buckets. MissingMeta *int64 `location:"header" locationName:"x-amz-missing-meta" type:"integer"` // Indicates whether this object has an active legal hold. This field is only // returned if you have permission to view an object's legal hold status. + // + // This functionality is not supported for directory buckets. ObjectLockLegalHoldStatus *string `location:"header" locationName:"x-amz-object-lock-legal-hold" type:"string" enum:"ObjectLockLegalHoldStatus"` - // The Object Lock mode currently in place for this object. + // The Object Lock mode that's currently in place for this object. + // + // This functionality is not supported for directory buckets. ObjectLockMode *string `location:"header" locationName:"x-amz-object-lock-mode" type:"string" enum:"ObjectLockMode"` // The date and time when this object's Object Lock will expire. + // + // This functionality is not supported for directory buckets. ObjectLockRetainUntilDate *time.Time `location:"header" locationName:"x-amz-object-lock-retain-until-date" type:"timestamp" timestampFormat:"iso8601"` // The count of parts this object has. This value is only returned if you specify @@ -22843,51 +24945,80 @@ type GetObjectOutput struct { // Amazon S3 can return this if your request involves a bucket that is either // a source or destination in a replication rule. + // + // This functionality is not supported for directory buckets. ReplicationStatus *string `location:"header" locationName:"x-amz-replication-status" type:"string" enum:"ReplicationStatus"` // If present, indicates that the requester was successfully charged for the // request. + // + // This functionality is not supported for directory buckets. RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` // Provides information about object restoration action and expiration time // of the restored object copy. + // + // This functionality is not supported for directory buckets. Only the S3 Express + // One Zone storage class is supported by directory buckets to store objects. Restore *string `location:"header" locationName:"x-amz-restore" type:"string"` // If server-side encryption with a customer-provided encryption key was requested, - // the response will include this header confirming the encryption algorithm - // used. + // the response will include this header to confirm the encryption algorithm + // that's used. + // + // This functionality is not supported for directory buckets. SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` // If server-side encryption with a customer-provided encryption key was requested, - // the response will include this header to provide round-trip message integrity + // the response will include this header to provide the round-trip message integrity // verification of the customer-provided encryption key. + // + // This functionality is not supported for directory buckets. SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` - // If present, specifies the ID of the Key Management Service (KMS) symmetric + // If present, indicates the ID of the Key Management Service (KMS) symmetric // encryption customer managed key that was used for the object. // + // This functionality is not supported for directory buckets. + // // SSEKMSKeyId is a sensitive parameter and its value will be // replaced with "sensitive" in string returned by GetObjectOutput's // String and GoString methods. SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` - // The server-side encryption algorithm used when storing this object in Amazon + // The server-side encryption algorithm used when you store this object in Amazon // S3 (for example, AES256, aws:kms, aws:kms:dsse). + // + // For directory buckets, only server-side encryption with Amazon S3 managed + // keys (SSE-S3) (AES256) is supported. ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` // Provides storage class information of the object. Amazon S3 returns this // header for all objects except for S3 Standard storage class objects. + // + // Directory buckets - Only the S3 Express One Zone storage class is supported + // by directory buckets to store objects. StorageClass *string `location:"header" locationName:"x-amz-storage-class" type:"string" enum:"StorageClass"` - // The number of tags, if any, on the object. + // The number of tags, if any, on the object, when you have the relevant permission + // to read object tags. + // + // You can use GetObjectTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectTagging.html) + // to retrieve the tag set associated with an object. + // + // This functionality is not supported for directory buckets. TagCount *int64 `location:"header" locationName:"x-amz-tagging-count" type:"integer"` - // Version of the object. + // Version ID of the object. + // + // This functionality is not supported for directory buckets. VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"` // If the bucket is configured as a website, redirects requests for this object // to another object in the same bucket or to an external URL. Amazon S3 stores // the value of this header in the object metadata. + // + // This functionality is not supported for directory buckets. WebsiteRedirectLocation *string `location:"header" locationName:"x-amz-website-redirect-location" type:"string"` } @@ -23131,8 +25262,10 @@ type GetObjectRetentionInput struct { // The bucket name containing the object whose retention settings you want to // retrieve. // - // When using this action with an access point, you must direct requests to - // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // Access points - When you use this action with an access point, you must provide + // the alias of the access point in place of the bucket name or specify the + // access point ARN. When using the access point ARN, you must direct requests + // to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. // When using this action with an access point through the Amazon Web Services // SDKs, you provide the access point ARN in place of the bucket name. For more // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) @@ -23141,9 +25274,9 @@ type GetObjectRetentionInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` // The key name for the object whose retention settings you want to retrieve. @@ -23152,10 +25285,14 @@ type GetObjectRetentionInput struct { Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` // Confirms that the requester knows that they will be charged for the request. - // Bucket owners need not specify this parameter in their requests. For information - // about downloading objects from Requester Pays buckets, see Downloading Objects + // Bucket owners need not specify this parameter in their requests. If either + // the source or destination S3 bucket has Requester Pays enabled, the requester + // will pay for corresponding charges to copy the object. For information about + // downloading objects from Requester Pays buckets, see Downloading Objects // in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) // in the Amazon S3 User Guide. + // + // This functionality is not supported for directory buckets. RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` // The version ID for the object whose retention settings you want to retrieve. @@ -23302,27 +25439,30 @@ type GetObjectTaggingInput struct { // The bucket name containing the object for which to get the tagging information. // - // When using this action with an access point, you must direct requests to - // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // Access points - When you use this action with an access point, you must provide + // the alias of the access point in place of the bucket name or specify the + // access point ARN. When using the access point ARN, you must direct requests + // to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. // When using this action with an access point through the Amazon Web Services // SDKs, you provide the access point ARN in place of the bucket name. For more // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) // in the Amazon S3 User Guide. // - // When you use this action with Amazon S3 on Outposts, you must direct requests - // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form - // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When - // you use this action with S3 on Outposts through the Amazon Web Services SDKs, - // you provide the Outposts access point ARN in place of the bucket name. For - // more information about S3 on Outposts ARNs, see What is S3 on Outposts? (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // S3 on Outposts - When you use this action with Amazon S3 on Outposts, you + // must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname + // takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. + // When you use this action with S3 on Outposts through the Amazon Web Services + // SDKs, you provide the Outposts access point ARN in place of the bucket name. + // For more information about S3 on Outposts ARNs, see What is S3 on Outposts? + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) // in the Amazon S3 User Guide. // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` // Object key for which to get the tagging information. @@ -23331,10 +25471,14 @@ type GetObjectTaggingInput struct { Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` // Confirms that the requester knows that they will be charged for the request. - // Bucket owners need not specify this parameter in their requests. For information - // about downloading objects from Requester Pays buckets, see Downloading Objects + // Bucket owners need not specify this parameter in their requests. If either + // the source or destination S3 bucket has Requester Pays enabled, the requester + // will pay for corresponding charges to copy the object. For information about + // downloading objects from Requester Pays buckets, see Downloading Objects // in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) // in the Amazon S3 User Guide. + // + // This functionality is not supported for directory buckets. RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` // The versionId of the object for which to get the tagging information. @@ -23496,9 +25640,9 @@ type GetObjectTorrentInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` // The object key for which to get the information. @@ -23507,10 +25651,14 @@ type GetObjectTorrentInput struct { Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` // Confirms that the requester knows that they will be charged for the request. - // Bucket owners need not specify this parameter in their requests. For information - // about downloading objects from Requester Pays buckets, see Downloading Objects + // Bucket owners need not specify this parameter in their requests. If either + // the source or destination S3 bucket has Requester Pays enabled, the requester + // will pay for corresponding charges to copy the object. For information about + // downloading objects from Requester Pays buckets, see Downloading Objects // in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) // in the Amazon S3 User Guide. + // + // This functionality is not supported for directory buckets. RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` } @@ -23620,6 +25768,8 @@ type GetObjectTorrentOutput struct { // If present, indicates that the requester was successfully charged for the // request. + // + // This functionality is not supported for directory buckets. RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` } @@ -23662,9 +25812,9 @@ type GetPublicAccessBlockInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` } @@ -23994,33 +26144,48 @@ type HeadBucketInput struct { // The bucket name. // - // When using this action with an access point, you must direct requests to - // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // Directory buckets - When you use this operation with a directory bucket, + // you must use virtual-hosted-style requests in the format Bucket_name.s3express-az_id.region.amazonaws.com. + // Path-style requests are not supported. Directory bucket names must be unique + // in the chosen Availability Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3 + // (for example, DOC-EXAMPLE-BUCKET--usw2-az2--x-s3). For information about + // bucket naming restrictions, see Directory bucket naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) + // in the Amazon S3 User Guide. + // + // Access points - When you use this action with an access point, you must provide + // the alias of the access point in place of the bucket name or specify the + // access point ARN. When using the access point ARN, you must direct requests + // to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. // When using this action with an access point through the Amazon Web Services // SDKs, you provide the access point ARN in place of the bucket name. For more // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) // in the Amazon S3 User Guide. // - // When you use this action with an Object Lambda access point, provide the - // alias of the Object Lambda access point in place of the bucket name. If the - // Object Lambda access point alias in a request is not valid, the error code - // InvalidAccessPointAliasError is returned. For more information about InvalidAccessPointAliasError, - // see List of Error Codes (https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList). - // - // When you use this action with Amazon S3 on Outposts, you must direct requests - // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form - // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When - // you use this action with S3 on Outposts through the Amazon Web Services SDKs, - // you provide the Outposts access point ARN in place of the bucket name. For - // more information about S3 on Outposts ARNs, see What is S3 on Outposts? (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // Object Lambda access points - When you use this API operation with an Object + // Lambda access point, provide the alias of the Object Lambda access point + // in place of the bucket name. If the Object Lambda access point alias in a + // request is not valid, the error code InvalidAccessPointAliasError is returned. + // For more information about InvalidAccessPointAliasError, see List of Error + // Codes (https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList). + // + // Access points and Object Lambda access points are not supported by directory + // buckets. + // + // S3 on Outposts - When you use this action with Amazon S3 on Outposts, you + // must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname + // takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. + // When you use this action with S3 on Outposts through the Amazon Web Services + // SDKs, you provide the Outposts access point ARN in place of the bucket name. + // For more information about S3 on Outposts ARNs, see What is S3 on Outposts? + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) // in the Amazon S3 User Guide. // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` } @@ -24106,6 +26271,30 @@ func (s HeadBucketInput) updateArnableField(v string) (interface{}, error) { type HeadBucketOutput struct { _ struct{} `type:"structure"` + + // Indicates whether the bucket name used in the request is an access point + // alias. + // + // This functionality is not supported for directory buckets. + AccessPointAlias *bool `location:"header" locationName:"x-amz-access-point-alias" type:"boolean"` + + // The name of the location where the bucket will be created. + // + // For directory buckets, the AZ ID of the Availability Zone where the bucket + // is created. An example AZ ID value is usw2-az2. + // + // This functionality is only supported by directory buckets. + BucketLocationName *string `location:"header" locationName:"x-amz-bucket-location-name" type:"string"` + + // The type of location where the bucket is created. + // + // This functionality is only supported by directory buckets. + BucketLocationType *string `location:"header" locationName:"x-amz-bucket-location-type" type:"string" enum:"LocationType"` + + // The Region that the bucket is located. + // + // This functionality is not supported for directory buckets. + BucketRegion *string `location:"header" locationName:"x-amz-bucket-region" type:"string"` } // String returns the string representation. @@ -24126,24 +26315,62 @@ func (s HeadBucketOutput) GoString() string { return s.String() } +// SetAccessPointAlias sets the AccessPointAlias field's value. +func (s *HeadBucketOutput) SetAccessPointAlias(v bool) *HeadBucketOutput { + s.AccessPointAlias = &v + return s +} + +// SetBucketLocationName sets the BucketLocationName field's value. +func (s *HeadBucketOutput) SetBucketLocationName(v string) *HeadBucketOutput { + s.BucketLocationName = &v + return s +} + +// SetBucketLocationType sets the BucketLocationType field's value. +func (s *HeadBucketOutput) SetBucketLocationType(v string) *HeadBucketOutput { + s.BucketLocationType = &v + return s +} + +// SetBucketRegion sets the BucketRegion field's value. +func (s *HeadBucketOutput) SetBucketRegion(v string) *HeadBucketOutput { + s.BucketRegion = &v + return s +} + type HeadObjectInput struct { _ struct{} `locationName:"HeadObjectRequest" type:"structure"` - // The name of the bucket containing the object. + // The name of the bucket that contains the object. + // + // Directory buckets - When you use this operation with a directory bucket, + // you must use virtual-hosted-style requests in the format Bucket_name.s3express-az_id.region.amazonaws.com. + // Path-style requests are not supported. Directory bucket names must be unique + // in the chosen Availability Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3 + // (for example, DOC-EXAMPLE-BUCKET--usw2-az2--x-s3). For information about + // bucket naming restrictions, see Directory bucket naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) + // in the Amazon S3 User Guide. // - // When using this action with an access point, you must direct requests to - // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // Access points - When you use this action with an access point, you must provide + // the alias of the access point in place of the bucket name or specify the + // access point ARN. When using the access point ARN, you must direct requests + // to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. // When using this action with an access point through the Amazon Web Services // SDKs, you provide the access point ARN in place of the bucket name. For more // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) // in the Amazon S3 User Guide. // - // When you use this action with Amazon S3 on Outposts, you must direct requests - // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form - // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When - // you use this action with S3 on Outposts through the Amazon Web Services SDKs, - // you provide the Outposts access point ARN in place of the bucket name. For - // more information about S3 on Outposts ARNs, see What is S3 on Outposts? (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // Access points and Object Lambda access points are not supported by directory + // buckets. + // + // S3 on Outposts - When you use this action with Amazon S3 on Outposts, you + // must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname + // takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. + // When you use this action with S3 on Outposts through the Amazon Web Services + // SDKs, you provide the Outposts access point ARN in place of the bucket name. + // For more information about S3 on Outposts ARNs, see What is S3 on Outposts? + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) // in the Amazon S3 User Guide. // // Bucket is a required field @@ -24156,25 +26383,69 @@ type HeadObjectInput struct { // must have permission to use the kms:Decrypt action for the request to succeed. ChecksumMode *string `location:"header" locationName:"x-amz-checksum-mode" type:"string" enum:"ChecksumMode"` - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` // Return the object only if its entity tag (ETag) is the same as the one specified; // otherwise, return a 412 (precondition failed) error. + // + // If both of the If-Match and If-Unmodified-Since headers are present in the + // request as follows: + // + // * If-Match condition evaluates to true, and; + // + // * If-Unmodified-Since condition evaluates to false; + // + // Then Amazon S3 returns 200 OK and the data requested. + // + // For more information about conditional requests, see RFC 7232 (https://tools.ietf.org/html/rfc7232). IfMatch *string `location:"header" locationName:"If-Match" type:"string"` // Return the object only if it has been modified since the specified time; // otherwise, return a 304 (not modified) error. + // + // If both of the If-None-Match and If-Modified-Since headers are present in + // the request as follows: + // + // * If-None-Match condition evaluates to false, and; + // + // * If-Modified-Since condition evaluates to true; + // + // Then Amazon S3 returns the 304 Not Modified response code. + // + // For more information about conditional requests, see RFC 7232 (https://tools.ietf.org/html/rfc7232). IfModifiedSince *time.Time `location:"header" locationName:"If-Modified-Since" type:"timestamp"` // Return the object only if its entity tag (ETag) is different from the one // specified; otherwise, return a 304 (not modified) error. + // + // If both of the If-None-Match and If-Modified-Since headers are present in + // the request as follows: + // + // * If-None-Match condition evaluates to false, and; + // + // * If-Modified-Since condition evaluates to true; + // + // Then Amazon S3 returns the 304 Not Modified response code. + // + // For more information about conditional requests, see RFC 7232 (https://tools.ietf.org/html/rfc7232). IfNoneMatch *string `location:"header" locationName:"If-None-Match" type:"string"` // Return the object only if it has not been modified since the specified time; // otherwise, return a 412 (precondition failed) error. + // + // If both of the If-Match and If-Unmodified-Since headers are present in the + // request as follows: + // + // * If-Match condition evaluates to true, and; + // + // * If-Unmodified-Since condition evaluates to false; + // + // Then Amazon S3 returns 200 OK and the data requested. + // + // For more information about conditional requests, see RFC 7232 (https://tools.ietf.org/html/rfc7232). IfUnmodifiedSince *time.Time `location:"header" locationName:"If-Unmodified-Since" type:"timestamp"` // The object key. @@ -24194,14 +26465,19 @@ type HeadObjectInput struct { Range *string `location:"header" locationName:"Range" type:"string"` // Confirms that the requester knows that they will be charged for the request. - // Bucket owners need not specify this parameter in their requests. For information - // about downloading objects from Requester Pays buckets, see Downloading Objects + // Bucket owners need not specify this parameter in their requests. If either + // the source or destination S3 bucket has Requester Pays enabled, the requester + // will pay for corresponding charges to copy the object. For information about + // downloading objects from Requester Pays buckets, see Downloading Objects // in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) // in the Amazon S3 User Guide. + // + // This functionality is not supported for directory buckets. RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` - // Specifies the algorithm to use to when encrypting the object (for example, - // AES256). + // Specifies the algorithm to use when encrypting the object (for example, AES256). + // + // This functionality is not supported for directory buckets. SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting @@ -24210,6 +26486,8 @@ type HeadObjectInput struct { // with the algorithm specified in the x-amz-server-side-encryption-customer-algorithm // header. // + // This functionality is not supported for directory buckets. + // // SSECustomerKey is a sensitive parameter and its value will be // replaced with "sensitive" in string returned by HeadObjectInput's // String and GoString methods. @@ -24218,9 +26496,14 @@ type HeadObjectInput struct { // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. // Amazon S3 uses this header for a message integrity check to ensure that the // encryption key was transmitted without error. + // + // This functionality is not supported for directory buckets. SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` - // VersionId used to reference a specific version of the object. + // Version ID used to reference a specific version of the object. + // + // For directory buckets in this API operation, only the null value of the version + // ID is supported. VersionId *string `location:"querystring" locationName:"versionId" type:"string"` } @@ -24402,51 +26685,63 @@ type HeadObjectOutput struct { AcceptRanges *string `location:"header" locationName:"accept-ranges" type:"string"` // The archive state of the head object. + // + // This functionality is not supported for directory buckets. ArchiveStatus *string `location:"header" locationName:"x-amz-archive-status" type:"string" enum:"ArchiveStatus"` // Indicates whether the object uses an S3 Bucket Key for server-side encryption // with Key Management Service (KMS) keys (SSE-KMS). + // + // This functionality is not supported for directory buckets. BucketKeyEnabled *bool `location:"header" locationName:"x-amz-server-side-encryption-bucket-key-enabled" type:"boolean"` // Specifies caching behavior along the request/reply chain. CacheControl *string `location:"header" locationName:"Cache-Control" type:"string"` // The base64-encoded, 32-bit CRC32 checksum of the object. This will only be - // present if it was uploaded with the object. With multipart uploads, this - // may not be a checksum value of the object. For more information about how - // checksums are calculated with multipart uploads, see Checking object integrity - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // present if it was uploaded with the object. When you use an API operation + // on an object that was uploaded using multipart uploads, this value may not + // be a direct checksum value of the full object. Instead, it's a calculation + // based on the checksum values of each individual part. For more information + // about how checksums are calculated with multipart uploads, see Checking object + // integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) // in the Amazon S3 User Guide. ChecksumCRC32 *string `location:"header" locationName:"x-amz-checksum-crc32" type:"string"` // The base64-encoded, 32-bit CRC32C checksum of the object. This will only - // be present if it was uploaded with the object. With multipart uploads, this - // may not be a checksum value of the object. For more information about how - // checksums are calculated with multipart uploads, see Checking object integrity - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // be present if it was uploaded with the object. When you use an API operation + // on an object that was uploaded using multipart uploads, this value may not + // be a direct checksum value of the full object. Instead, it's a calculation + // based on the checksum values of each individual part. For more information + // about how checksums are calculated with multipart uploads, see Checking object + // integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) // in the Amazon S3 User Guide. ChecksumCRC32C *string `location:"header" locationName:"x-amz-checksum-crc32c" type:"string"` // The base64-encoded, 160-bit SHA-1 digest of the object. This will only be - // present if it was uploaded with the object. With multipart uploads, this - // may not be a checksum value of the object. For more information about how - // checksums are calculated with multipart uploads, see Checking object integrity - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // present if it was uploaded with the object. When you use the API operation + // on an object that was uploaded using multipart uploads, this value may not + // be a direct checksum value of the full object. Instead, it's a calculation + // based on the checksum values of each individual part. For more information + // about how checksums are calculated with multipart uploads, see Checking object + // integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) // in the Amazon S3 User Guide. ChecksumSHA1 *string `location:"header" locationName:"x-amz-checksum-sha1" type:"string"` // The base64-encoded, 256-bit SHA-256 digest of the object. This will only - // be present if it was uploaded with the object. With multipart uploads, this - // may not be a checksum value of the object. For more information about how - // checksums are calculated with multipart uploads, see Checking object integrity - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // be present if it was uploaded with the object. When you use an API operation + // on an object that was uploaded using multipart uploads, this value may not + // be a direct checksum value of the full object. Instead, it's a calculation + // based on the checksum values of each individual part. For more information + // about how checksums are calculated with multipart uploads, see Checking object + // integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) // in the Amazon S3 User Guide. ChecksumSHA256 *string `location:"header" locationName:"x-amz-checksum-sha256" type:"string"` // Specifies presentational information for the object. ContentDisposition *string `location:"header" locationName:"Content-Disposition" type:"string"` - // Specifies what content encodings have been applied to the object and thus + // Indicates what content encodings have been applied to the object and thus // what decoding mechanisms must be applied to obtain the media-type referenced // by the Content-Type header field. ContentEncoding *string `location:"header" locationName:"Content-Encoding" type:"string"` @@ -24462,21 +26757,27 @@ type HeadObjectOutput struct { // Specifies whether the object retrieved was (true) or was not (false) a Delete // Marker. If false, this response header does not appear in the response. + // + // This functionality is not supported for directory buckets. DeleteMarker *bool `location:"header" locationName:"x-amz-delete-marker" type:"boolean"` // An entity tag (ETag) is an opaque identifier assigned by a web server to // a specific version of a resource found at a URL. ETag *string `location:"header" locationName:"ETag" type:"string"` - // If the object expiration is configured (see PUT Bucket lifecycle), the response - // includes this header. It includes the expiry-date and rule-id key-value pairs - // providing object expiration information. The value of the rule-id is URL-encoded. + // If the object expiration is configured (see PutBucketLifecycleConfiguration + // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html)), + // the response includes this header. It includes the expiry-date and rule-id + // key-value pairs providing object expiration information. The value of the + // rule-id is URL-encoded. + // + // This functionality is not supported for directory buckets. Expiration *string `location:"header" locationName:"x-amz-expiration" type:"string"` // The date and time at which the object is no longer cacheable. Expires *string `location:"header" locationName:"Expires" type:"string"` - // Creation date of the object. + // Date and time when the object was last modified. LastModified *time.Time `location:"header" locationName:"Last-Modified" type:"timestamp"` // A map of metadata to store with the object in S3. @@ -24490,6 +26791,8 @@ type HeadObjectOutput struct { // headers. This can happen if you create metadata using an API like SOAP that // supports more flexible metadata than the REST API. For example, using SOAP, // you can create metadata whose values are not legal HTTP headers. + // + // This functionality is not supported for directory buckets. MissingMeta *int64 `location:"header" locationName:"x-amz-missing-meta" type:"integer"` // Specifies whether a legal hold is in effect for this object. This header @@ -24497,15 +26800,21 @@ type HeadObjectOutput struct { // This header is not returned if the specified version of this object has never // had a legal hold applied. For more information about S3 Object Lock, see // Object Lock (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html). + // + // This functionality is not supported for directory buckets. ObjectLockLegalHoldStatus *string `location:"header" locationName:"x-amz-object-lock-legal-hold" type:"string" enum:"ObjectLockLegalHoldStatus"` // The Object Lock mode, if any, that's in effect for this object. This header // is only returned if the requester has the s3:GetObjectRetention permission. // For more information about S3 Object Lock, see Object Lock (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html). + // + // This functionality is not supported for directory buckets. ObjectLockMode *string `location:"header" locationName:"x-amz-object-lock-mode" type:"string" enum:"ObjectLockMode"` // The date and time when the Object Lock retention period expires. This header // is only returned if the requester has the s3:GetObjectRetention permission. + // + // This functionality is not supported for directory buckets. ObjectLockRetainUntilDate *time.Time `location:"header" locationName:"x-amz-object-lock-retain-until-date" type:"timestamp" timestampFormat:"iso8601"` // The count of parts this object has. This value is only returned if you specify @@ -24544,10 +26853,14 @@ type HeadObjectOutput struct { // header will return FAILED. // // For more information, see Replication (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html). + // + // This functionality is not supported for directory buckets. ReplicationStatus *string `location:"header" locationName:"x-amz-replication-status" type:"string" enum:"ReplicationStatus"` // If present, indicates that the requester was successfully charged for the // request. + // + // This functionality is not supported for directory buckets. RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` // If the object is an archived object (an object whose storage class is GLACIER), @@ -24565,42 +26878,61 @@ type HeadObjectOutput struct { // // For more information about archiving objects, see Transitioning Objects: // General Considerations (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html#lifecycle-transition-general-considerations). + // + // This functionality is not supported for directory buckets. Only the S3 Express + // One Zone storage class is supported by directory buckets to store objects. Restore *string `location:"header" locationName:"x-amz-restore" type:"string"` // If server-side encryption with a customer-provided encryption key was requested, - // the response will include this header confirming the encryption algorithm - // used. + // the response will include this header to confirm the encryption algorithm + // that's used. + // + // This functionality is not supported for directory buckets. SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` // If server-side encryption with a customer-provided encryption key was requested, - // the response will include this header to provide round-trip message integrity + // the response will include this header to provide the round-trip message integrity // verification of the customer-provided encryption key. + // + // This functionality is not supported for directory buckets. SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` - // If present, specifies the ID of the Key Management Service (KMS) symmetric + // If present, indicates the ID of the Key Management Service (KMS) symmetric // encryption customer managed key that was used for the object. // + // This functionality is not supported for directory buckets. + // // SSEKMSKeyId is a sensitive parameter and its value will be // replaced with "sensitive" in string returned by HeadObjectOutput's // String and GoString methods. SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` - // The server-side encryption algorithm used when storing this object in Amazon + // The server-side encryption algorithm used when you store this object in Amazon // S3 (for example, AES256, aws:kms, aws:kms:dsse). + // + // For directory buckets, only server-side encryption with Amazon S3 managed + // keys (SSE-S3) (AES256) is supported. ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` // Provides storage class information of the object. Amazon S3 returns this // header for all objects except for S3 Standard storage class objects. // // For more information, see Storage Classes (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html). + // + // Directory buckets - Only the S3 Express One Zone storage class is supported + // by directory buckets to store objects. StorageClass *string `location:"header" locationName:"x-amz-storage-class" type:"string" enum:"StorageClass"` - // Version of the object. + // Version ID of the object. + // + // This functionality is not supported for directory buckets. VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"` // If the bucket is configured as a website, redirects requests for this object // to another object in the same bucket or to an external URL. Amazon S3 stores // the value of this header in the object metadata. + // + // This functionality is not supported for directory buckets. WebsiteRedirectLocation *string `location:"header" locationName:"x-amz-website-redirect-location" type:"string"` } @@ -24886,10 +27218,16 @@ type Initiator struct { _ struct{} `type:"structure"` // Name of the Principal. + // + // This functionality is not supported for directory buckets. DisplayName *string `type:"string"` // If the principal is an Amazon Web Services account, it provides the Canonical // User ID. If the principal is an IAM User, it provides a user ARN value. + // + // Directory buckets - If the principal is an Amazon Web Services account, it + // provides the Amazon Web Services account ID. If the principal is an IAM User, + // it provides a user ARN value. ID *string `type:"string"` } @@ -26325,9 +28663,9 @@ type ListBucketAnalyticsConfigurationsInput struct { // should begin. ContinuationToken *string `location:"querystring" locationName:"continuation-token" type:"string"` - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` } @@ -26651,9 +28989,9 @@ type ListBucketInventoryConfigurationsInput struct { // that Amazon S3 understands. ContinuationToken *string `location:"querystring" locationName:"continuation-token" type:"string"` - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` } @@ -26820,9 +29158,9 @@ type ListBucketMetricsConfigurationsInput struct { // value that Amazon S3 understands. ContinuationToken *string `location:"querystring" locationName:"continuation-token" type:"string"` - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` } @@ -27038,24 +29376,123 @@ func (s *ListBucketsOutput) SetOwner(v *Owner) *ListBucketsOutput { return s } +type ListDirectoryBucketsInput struct { + _ struct{} `locationName:"ListDirectoryBucketsRequest" type:"structure"` + + // ContinuationToken indicates to Amazon S3 that the list is being continued + // on this bucket with a token. ContinuationToken is obfuscated and is not a + // real key. You can use this ContinuationToken for pagination of the list results. + ContinuationToken *string `location:"querystring" locationName:"continuation-token" type:"string"` + + // Maximum number of buckets to be returned in response. When the number is + // more than the count of buckets that are owned by an Amazon Web Services account, + // return all the buckets in response. + MaxDirectoryBuckets *int64 `location:"querystring" locationName:"max-directory-buckets" type:"integer"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListDirectoryBucketsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListDirectoryBucketsInput) GoString() string { + return s.String() +} + +// SetContinuationToken sets the ContinuationToken field's value. +func (s *ListDirectoryBucketsInput) SetContinuationToken(v string) *ListDirectoryBucketsInput { + s.ContinuationToken = &v + return s +} + +// SetMaxDirectoryBuckets sets the MaxDirectoryBuckets field's value. +func (s *ListDirectoryBucketsInput) SetMaxDirectoryBuckets(v int64) *ListDirectoryBucketsInput { + s.MaxDirectoryBuckets = &v + return s +} + +type ListDirectoryBucketsOutput struct { + _ struct{} `type:"structure"` + + // The list of buckets owned by the requester. + Buckets []*Bucket `locationNameList:"Bucket" type:"list"` + + // If ContinuationToken was sent with the request, it is included in the response. + // You can use the returned ContinuationToken for pagination of the list response. + ContinuationToken *string `type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListDirectoryBucketsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListDirectoryBucketsOutput) GoString() string { + return s.String() +} + +// SetBuckets sets the Buckets field's value. +func (s *ListDirectoryBucketsOutput) SetBuckets(v []*Bucket) *ListDirectoryBucketsOutput { + s.Buckets = v + return s +} + +// SetContinuationToken sets the ContinuationToken field's value. +func (s *ListDirectoryBucketsOutput) SetContinuationToken(v string) *ListDirectoryBucketsOutput { + s.ContinuationToken = &v + return s +} + type ListMultipartUploadsInput struct { _ struct{} `locationName:"ListMultipartUploadsRequest" type:"structure"` // The name of the bucket to which the multipart upload was initiated. // - // When using this action with an access point, you must direct requests to - // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // Directory buckets - When you use this operation with a directory bucket, + // you must use virtual-hosted-style requests in the format Bucket_name.s3express-az_id.region.amazonaws.com. + // Path-style requests are not supported. Directory bucket names must be unique + // in the chosen Availability Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3 + // (for example, DOC-EXAMPLE-BUCKET--usw2-az2--x-s3). For information about + // bucket naming restrictions, see Directory bucket naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) + // in the Amazon S3 User Guide. + // + // Access points - When you use this action with an access point, you must provide + // the alias of the access point in place of the bucket name or specify the + // access point ARN. When using the access point ARN, you must direct requests + // to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. // When using this action with an access point through the Amazon Web Services // SDKs, you provide the access point ARN in place of the bucket name. For more // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) // in the Amazon S3 User Guide. // - // When you use this action with Amazon S3 on Outposts, you must direct requests - // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form - // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When - // you use this action with S3 on Outposts through the Amazon Web Services SDKs, - // you provide the Outposts access point ARN in place of the bucket name. For - // more information about S3 on Outposts ARNs, see What is S3 on Outposts? (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // Access points and Object Lambda access points are not supported by directory + // buckets. + // + // S3 on Outposts - When you use this action with Amazon S3 on Outposts, you + // must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname + // takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. + // When you use this action with S3 on Outposts through the Amazon Web Services + // SDKs, you provide the Outposts access point ARN in place of the bucket name. + // For more information about S3 on Outposts ARNs, see What is S3 on Outposts? + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) // in the Amazon S3 User Guide. // // Bucket is a required field @@ -27069,6 +29506,8 @@ type ListMultipartUploadsInput struct { // parameter, then the substring starts at the beginning of the key. The keys // that are grouped under CommonPrefixes result element are not returned elsewhere // in the response. + // + // Directory buckets - For directory buckets, / is the only supported delimiter. Delimiter *string `location:"querystring" locationName:"delimiter" type:"string"` // Requests Amazon S3 to encode the object keys in the response and specifies @@ -27079,20 +29518,28 @@ type ListMultipartUploadsInput struct { // keys in the response. EncodingType *string `location:"querystring" locationName:"encoding-type" type:"string" enum:"EncodingType"` - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` - // Together with upload-id-marker, this parameter specifies the multipart upload - // after which listing should begin. - // - // If upload-id-marker is not specified, only the keys lexicographically greater - // than the specified key-marker will be included in the list. - // - // If upload-id-marker is specified, any multipart uploads for a key equal to - // the key-marker might also be included, provided those multipart uploads have - // upload IDs lexicographically greater than the specified upload-id-marker. + // Specifies the multipart upload after which listing should begin. + // + // * General purpose buckets - For general purpose buckets, key-marker is + // an object key. Together with upload-id-marker, this parameter specifies + // the multipart upload after which listing should begin. If upload-id-marker + // is not specified, only the keys lexicographically greater than the specified + // key-marker will be included in the list. If upload-id-marker is specified, + // any multipart uploads for a key equal to the key-marker might also be + // included, provided those multipart uploads have upload IDs lexicographically + // greater than the specified upload-id-marker. + // + // * Directory buckets - For directory buckets, key-marker is obfuscated + // and isn't a real object key. The upload-id-marker parameter isn't supported + // by directory buckets. To list the additional multipart uploads, you only + // need to set the value of key-marker to the NextKeyMarker value from the + // previous response. In the ListMultipartUploads response, the multipart + // uploads aren't sorted lexicographically based on the object keys. KeyMarker *string `location:"querystring" locationName:"key-marker" type:"string"` // Sets the maximum number of multipart uploads, from 1 to 1,000, to return @@ -27104,13 +29551,20 @@ type ListMultipartUploadsInput struct { // prefix. You can use prefixes to separate a bucket into different grouping // of keys. (You can think of using prefix to make groups in the same way that // you'd use a folder in a file system.) + // + // Directory buckets - For directory buckets, only prefixes that end in a delimiter + // (/) are supported. Prefix *string `location:"querystring" locationName:"prefix" type:"string"` // Confirms that the requester knows that they will be charged for the request. - // Bucket owners need not specify this parameter in their requests. For information - // about downloading objects from Requester Pays buckets, see Downloading Objects + // Bucket owners need not specify this parameter in their requests. If either + // the source or destination S3 bucket has Requester Pays enabled, the requester + // will pay for corresponding charges to copy the object. For information about + // downloading objects from Requester Pays buckets, see Downloading Objects // in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) // in the Amazon S3 User Guide. + // + // This functionality is not supported for directory buckets. RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` // Together with key-marker, specifies the multipart upload after which listing @@ -27118,6 +29572,8 @@ type ListMultipartUploadsInput struct { // is ignored. Otherwise, any multipart uploads for a key equal to the key-marker // might be included in the list only if they have an upload ID lexicographically // greater than the specified upload-id-marker. + // + // This functionality is not supported for directory buckets. UploadIdMarker *string `location:"querystring" locationName:"upload-id-marker" type:"string"` } @@ -27253,10 +29709,15 @@ type ListMultipartUploadsOutput struct { // If you specify a delimiter in the request, then the result returns each distinct // key prefix containing the delimiter in a CommonPrefixes element. The distinct // key prefixes are returned in the Prefix child element. + // + // Directory buckets - For directory buckets, only prefixes that end in a delimiter + // (/) are supported. CommonPrefixes []*CommonPrefix `type:"list" flattened:"true"` // Contains the delimiter you specified in the request. If you don't specify // a delimiter in your request, this element is absent from the response. + // + // Directory buckets - For directory buckets, / is the only supported delimiter. Delimiter *string `type:"string"` // Encoding type used by Amazon S3 to encode object keys in the response. @@ -27287,17 +29748,26 @@ type ListMultipartUploadsOutput struct { // When a list is truncated, this element specifies the value that should be // used for the upload-id-marker request parameter in a subsequent request. + // + // This functionality is not supported for directory buckets. NextUploadIdMarker *string `type:"string"` // When a prefix is provided in the request, this field contains the specified // prefix. The result contains only keys starting with the specified prefix. + // + // Directory buckets - For directory buckets, only prefixes that end in a delimiter + // (/) are supported. Prefix *string `type:"string"` // If present, indicates that the requester was successfully charged for the // request. + // + // This functionality is not supported for directory buckets. RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` // Upload ID after which listing began. + // + // This functionality is not supported for directory buckets. UploadIdMarker *string `type:"string"` // Container for elements related to a particular multipart upload. A response @@ -27431,9 +29901,9 @@ type ListObjectVersionsInput struct { // keys in the response. EncodingType *string `location:"querystring" locationName:"encoding-type" type:"string" enum:"EncodingType"` - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` // Specifies the key to start with when listing objects in a bucket. @@ -27459,10 +29929,14 @@ type ListObjectVersionsInput struct { Prefix *string `location:"querystring" locationName:"prefix" type:"string"` // Confirms that the requester knows that they will be charged for the request. - // Bucket owners need not specify this parameter in their requests. For information - // about downloading objects from Requester Pays buckets, see Downloading Objects + // Bucket owners need not specify this parameter in their requests. If either + // the source or destination S3 bucket has Requester Pays enabled, the requester + // will pay for corresponding charges to copy the object. For information about + // downloading objects from Requester Pays buckets, see Downloading Objects // in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) // in the Amazon S3 User Guide. + // + // This functionality is not supported for directory buckets. RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` // Specifies the object version you want to start listing from. @@ -27656,6 +30130,8 @@ type ListObjectVersionsOutput struct { // If present, indicates that the requester was successfully charged for the // request. + // + // This functionality is not supported for directory buckets. RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` // Marks the last version of the key returned in a truncated response. @@ -27772,19 +30248,33 @@ type ListObjectsInput struct { // The name of the bucket containing the objects. // - // When using this action with an access point, you must direct requests to - // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // Directory buckets - When you use this operation with a directory bucket, + // you must use virtual-hosted-style requests in the format Bucket_name.s3express-az_id.region.amazonaws.com. + // Path-style requests are not supported. Directory bucket names must be unique + // in the chosen Availability Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3 + // (for example, DOC-EXAMPLE-BUCKET--usw2-az2--x-s3). For information about + // bucket naming restrictions, see Directory bucket naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) + // in the Amazon S3 User Guide. + // + // Access points - When you use this action with an access point, you must provide + // the alias of the access point in place of the bucket name or specify the + // access point ARN. When using the access point ARN, you must direct requests + // to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. // When using this action with an access point through the Amazon Web Services // SDKs, you provide the access point ARN in place of the bucket name. For more // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) // in the Amazon S3 User Guide. // - // When you use this action with Amazon S3 on Outposts, you must direct requests - // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form - // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When - // you use this action with S3 on Outposts through the Amazon Web Services SDKs, - // you provide the Outposts access point ARN in place of the bucket name. For - // more information about S3 on Outposts ARNs, see What is S3 on Outposts? (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // Access points and Object Lambda access points are not supported by directory + // buckets. + // + // S3 on Outposts - When you use this action with Amazon S3 on Outposts, you + // must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname + // takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. + // When you use this action with S3 on Outposts through the Amazon Web Services + // SDKs, you provide the Outposts access point ARN in place of the bucket name. + // For more information about S3 on Outposts ARNs, see What is S3 on Outposts? + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) // in the Amazon S3 User Guide. // // Bucket is a required field @@ -27801,9 +30291,9 @@ type ListObjectsInput struct { // keys in the response. EncodingType *string `location:"querystring" locationName:"encoding-type" type:"string" enum:"EncodingType"` - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` // Marker is where you want Amazon S3 to start listing from. Amazon S3 starts @@ -28014,6 +30504,8 @@ type ListObjectsOutput struct { // If present, indicates that the requester was successfully charged for the // request. + // + // This functionality is not supported for directory buckets. RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` } @@ -28104,21 +30596,33 @@ func (s *ListObjectsOutput) SetRequestCharged(v string) *ListObjectsOutput { type ListObjectsV2Input struct { _ struct{} `locationName:"ListObjectsV2Request" type:"structure"` - // Bucket name to list. + // Directory buckets - When you use this operation with a directory bucket, + // you must use virtual-hosted-style requests in the format Bucket_name.s3express-az_id.region.amazonaws.com. + // Path-style requests are not supported. Directory bucket names must be unique + // in the chosen Availability Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3 + // (for example, DOC-EXAMPLE-BUCKET--usw2-az2--x-s3). For information about + // bucket naming restrictions, see Directory bucket naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) + // in the Amazon S3 User Guide. // - // When using this action with an access point, you must direct requests to - // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // Access points - When you use this action with an access point, you must provide + // the alias of the access point in place of the bucket name or specify the + // access point ARN. When using the access point ARN, you must direct requests + // to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. // When using this action with an access point through the Amazon Web Services // SDKs, you provide the access point ARN in place of the bucket name. For more // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) // in the Amazon S3 User Guide. // - // When you use this action with Amazon S3 on Outposts, you must direct requests - // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form - // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When - // you use this action with S3 on Outposts through the Amazon Web Services SDKs, - // you provide the Outposts access point ARN in place of the bucket name. For - // more information about S3 on Outposts ARNs, see What is S3 on Outposts? (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // Access points and Object Lambda access points are not supported by directory + // buckets. + // + // S3 on Outposts - When you use this action with Amazon S3 on Outposts, you + // must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname + // takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. + // When you use this action with S3 on Outposts through the Amazon Web Services + // SDKs, you provide the Outposts access point ARN in place of the bucket name. + // For more information about S3 on Outposts ARNs, see What is S3 on Outposts? + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) // in the Amazon S3 User Guide. // // Bucket is a required field @@ -28126,23 +30630,35 @@ type ListObjectsV2Input struct { // ContinuationToken indicates to Amazon S3 that the list is being continued // on this bucket with a token. ContinuationToken is obfuscated and is not a - // real key. + // real key. You can use this ContinuationToken for pagination of the list results. ContinuationToken *string `location:"querystring" locationName:"continuation-token" type:"string"` // A delimiter is a character that you use to group keys. + // + // * Directory buckets - For directory buckets, / is the only supported delimiter. + // + // * Directory buckets - When you query ListObjectsV2 with a delimiter during + // in-progress multipart uploads, the CommonPrefixes response parameter contains + // the prefixes that are associated with the in-progress multipart uploads. + // For more information about multipart uploads, see Multipart Upload Overview + // (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html) in + // the Amazon S3 User Guide. Delimiter *string `location:"querystring" locationName:"delimiter" type:"string"` // Encoding type used by Amazon S3 to encode object keys in the response. EncodingType *string `location:"querystring" locationName:"encoding-type" type:"string" enum:"EncodingType"` - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` // The owner field is not present in ListObjectsV2 by default. If you want to // return the owner field with each key in the result, then set the FetchOwner // field to true. + // + // Directory buckets - For directory buckets, the bucket owner is returned as + // the object owner for all objects. FetchOwner *bool `location:"querystring" locationName:"fetch-owner" type:"boolean"` // Sets the maximum number of keys returned in the response. By default, the @@ -28152,18 +30668,27 @@ type ListObjectsV2Input struct { // Specifies the optional fields that you want returned in the response. Fields // that you do not specify are not returned. + // + // This functionality is not supported for directory buckets. OptionalObjectAttributes []*string `location:"header" locationName:"x-amz-optional-object-attributes" type:"list" enum:"OptionalObjectAttributes"` // Limits the response to keys that begin with the specified prefix. + // + // Directory buckets - For directory buckets, only prefixes that end in a delimiter + // (/) are supported. Prefix *string `location:"querystring" locationName:"prefix" type:"string"` // Confirms that the requester knows that she or he will be charged for the // list objects request in V2 style. Bucket owners need not specify this parameter // in their requests. + // + // This functionality is not supported for directory buckets. RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` // StartAfter is where you want Amazon S3 to start listing from. Amazon S3 starts // listing after this specified key. StartAfter can be any key in the bucket. + // + // This functionality is not supported for directory buckets. StartAfter *string `location:"querystring" locationName:"start-after" type:"string"` } @@ -28304,8 +30829,9 @@ func (s ListObjectsV2Input) updateArnableField(v string) (interface{}, error) { type ListObjectsV2Output struct { _ struct{} `type:"structure"` - // All of the keys (up to 1,000) rolled up into a common prefix count as a single - // return when calculating the number of returns. + // All of the keys (up to 1,000) that share the same prefix are grouped together. + // When counting the total numbers of returns by this API operation, this group + // of keys is considered as one item. // // A response can contain CommonPrefixes only if you specify a delimiter. // @@ -28319,12 +30845,24 @@ type ListObjectsV2Output struct { // in notes/summer/july, the common prefix is notes/summer/. All of the keys // that roll up into a common prefix count as a single return when calculating // the number of returns. + // + // * Directory buckets - For directory buckets, only prefixes that end in + // a delimiter (/) are supported. + // + // * Directory buckets - When you query ListObjectsV2 with a delimiter during + // in-progress multipart uploads, the CommonPrefixes response parameter contains + // the prefixes that are associated with the in-progress multipart uploads. + // For more information about multipart uploads, see Multipart Upload Overview + // (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html) in + // the Amazon S3 User Guide. CommonPrefixes []*CommonPrefix `type:"list" flattened:"true"` // Metadata about each object returned. Contents []*Object `type:"list" flattened:"true"` // If ContinuationToken was sent with the request, it is included in the response. + // You can use the returned ContinuationToken for pagination of the list response. + // You can use this ContinuationToken for pagination of the list results. ContinuationToken *string `type:"string"` // Causes keys that contain the same string between the prefix and the first @@ -28332,6 +30870,8 @@ type ListObjectsV2Output struct { // in the CommonPrefixes collection. These rolled-up keys are not returned elsewhere // in the response. Each rolled-up result counts as only one return against // the MaxKeys value. + // + // Directory buckets - For directory buckets, / is the only supported delimiter. Delimiter *string `type:"string"` // Encoding type used by Amazon S3 to encode object key names in the XML response. @@ -28359,21 +30899,6 @@ type ListObjectsV2Output struct { MaxKeys *int64 `type:"integer"` // The bucket name. - // - // When using this action with an access point, you must direct requests to - // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. - // When using this action with an access point through the Amazon Web Services - // SDKs, you provide the access point ARN in place of the bucket name. For more - // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) - // in the Amazon S3 User Guide. - // - // When you use this action with Amazon S3 on Outposts, you must direct requests - // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form - // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When - // you use this action with S3 on Outposts through the Amazon Web Services SDKs, - // you provide the Outposts access point ARN in place of the bucket name. For - // more information about S3 on Outposts ARNs, see What is S3 on Outposts? (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) - // in the Amazon S3 User Guide. Name *string `type:"string"` // NextContinuationToken is sent when isTruncated is true, which means there @@ -28383,13 +30908,20 @@ type ListObjectsV2Output struct { NextContinuationToken *string `type:"string"` // Keys that begin with the indicated prefix. + // + // Directory buckets - For directory buckets, only prefixes that end in a delimiter + // (/) are supported. Prefix *string `type:"string"` // If present, indicates that the requester was successfully charged for the // request. + // + // This functionality is not supported for directory buckets. RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` // If StartAfter was sent with the request, it is included in the response. + // + // This functionality is not supported for directory buckets. StartAfter *string `type:"string"` } @@ -28494,27 +31026,41 @@ type ListPartsInput struct { // The name of the bucket to which the parts are being uploaded. // - // When using this action with an access point, you must direct requests to - // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // Directory buckets - When you use this operation with a directory bucket, + // you must use virtual-hosted-style requests in the format Bucket_name.s3express-az_id.region.amazonaws.com. + // Path-style requests are not supported. Directory bucket names must be unique + // in the chosen Availability Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3 + // (for example, DOC-EXAMPLE-BUCKET--usw2-az2--x-s3). For information about + // bucket naming restrictions, see Directory bucket naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) + // in the Amazon S3 User Guide. + // + // Access points - When you use this action with an access point, you must provide + // the alias of the access point in place of the bucket name or specify the + // access point ARN. When using the access point ARN, you must direct requests + // to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. // When using this action with an access point through the Amazon Web Services // SDKs, you provide the access point ARN in place of the bucket name. For more // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) // in the Amazon S3 User Guide. // - // When you use this action with Amazon S3 on Outposts, you must direct requests - // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form - // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When - // you use this action with S3 on Outposts through the Amazon Web Services SDKs, - // you provide the Outposts access point ARN in place of the bucket name. For - // more information about S3 on Outposts ARNs, see What is S3 on Outposts? (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // Access points and Object Lambda access points are not supported by directory + // buckets. + // + // S3 on Outposts - When you use this action with Amazon S3 on Outposts, you + // must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname + // takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. + // When you use this action with S3 on Outposts through the Amazon Web Services + // SDKs, you provide the Outposts access point ARN in place of the bucket name. + // For more information about S3 on Outposts ARNs, see What is S3 on Outposts? + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) // in the Amazon S3 User Guide. // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` // Object key for which the multipart upload was initiated. @@ -28530,16 +31076,22 @@ type ListPartsInput struct { PartNumberMarker *int64 `location:"querystring" locationName:"part-number-marker" type:"integer"` // Confirms that the requester knows that they will be charged for the request. - // Bucket owners need not specify this parameter in their requests. For information - // about downloading objects from Requester Pays buckets, see Downloading Objects + // Bucket owners need not specify this parameter in their requests. If either + // the source or destination S3 bucket has Requester Pays enabled, the requester + // will pay for corresponding charges to copy the object. For information about + // downloading objects from Requester Pays buckets, see Downloading Objects // in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) // in the Amazon S3 User Guide. + // + // This functionality is not supported for directory buckets. RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` // The server-side encryption (SSE) algorithm used to encrypt the object. This // parameter is needed only when the object was created using a checksum algorithm. // For more information, see Protecting data using SSE-C keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html) // in the Amazon S3 User Guide. + // + // This functionality is not supported for directory buckets. SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` // The server-side encryption (SSE) customer managed key. This parameter is @@ -28547,6 +31099,8 @@ type ListPartsInput struct { // information, see Protecting data using SSE-C keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html) // in the Amazon S3 User Guide. // + // This functionality is not supported for directory buckets. + // // SSECustomerKey is a sensitive parameter and its value will be // replaced with "sensitive" in string returned by ListPartsInput's // String and GoString methods. @@ -28556,6 +31110,8 @@ type ListPartsInput struct { // is needed only when the object was created using a checksum algorithm. For // more information, see Protecting data using SSE-C keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html) // in the Amazon S3 User Guide. + // + // This functionality is not supported for directory buckets. SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` // Upload ID identifying the multipart upload whose parts are being listed. @@ -28720,11 +31276,15 @@ type ListPartsOutput struct { // // The response will also include the x-amz-abort-rule-id header that will provide // the ID of the lifecycle configuration rule that defines this action. + // + // This functionality is not supported for directory buckets. AbortDate *time.Time `location:"header" locationName:"x-amz-abort-date" type:"timestamp"` // This header is returned along with the x-amz-abort-date header. It identifies // applicable lifecycle configuration rule that defines the action to abort // incomplete multipart uploads. + // + // This functionality is not supported for directory buckets. AbortRuleId *string `location:"header" locationName:"x-amz-abort-rule-id" type:"string"` // The name of the bucket to which the multipart upload was initiated. Does @@ -28759,6 +31319,9 @@ type ListPartsOutput struct { // Container element that identifies the object owner, after the object is created. // If multipart upload is initiated by an IAM user, this element provides the // parent account ID and display name. + // + // Directory buckets - The bucket owner is returned as the object owner for + // all the parts. Owner *Owner `type:"structure"` // When a list is truncated, this element specifies the last part in the list, @@ -28772,10 +31335,14 @@ type ListPartsOutput struct { // If present, indicates that the requester was successfully charged for the // request. + // + // This functionality is not supported for directory buckets. RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` - // Class of storage (STANDARD or REDUCED_REDUNDANCY) used to store the uploaded - // object. + // The class of storage used to store the uploaded object. + // + // Directory buckets - Only the S3 Express One Zone storage class is supported + // by directory buckets to store objects. StorageClass *string `type:"string" enum:"StorageClass"` // Upload ID identifying the multipart upload whose parts are being listed. @@ -29033,6 +31600,56 @@ func (s *Location) SetUserMetadata(v []*MetadataEntry) *Location { return s } +// Specifies the location where the bucket will be created. +// +// For directory buckets, the location type is Availability Zone. For more information +// about directory buckets, see Directory buckets (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-buckets-overview.html) +// in the Amazon S3 User Guide. +// +// This functionality is only supported by directory buckets. +type LocationInfo struct { + _ struct{} `type:"structure"` + + // The name of the location where the bucket will be created. + // + // For directory buckets, the AZ ID of the Availability Zone where the bucket + // will be created. An example AZ ID value is usw2-az2. + Name *string `type:"string"` + + // The type of location where the bucket will be created. + Type *string `type:"string" enum:"LocationType"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s LocationInfo) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s LocationInfo) GoString() string { + return s.String() +} + +// SetName sets the Name field's value. +func (s *LocationInfo) SetName(v string) *LocationInfo { + s.Name = &v + return s +} + +// SetType sets the Type field's value. +func (s *LocationInfo) SetType(v string) *LocationInfo { + s.Type = &v + return s +} + // Describes where logs are stored and the prefix that Amazon S3 assigns to // all log object keys for a bucket. For more information, see PUT Bucket logging // (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTlogging.html) @@ -29058,6 +31675,9 @@ type LoggingEnabled struct { // in the Amazon S3 User Guide. TargetGrants []*TargetGrant `locationNameList:"Grant" type:"list"` + // Amazon S3 key format for log objects. + TargetObjectKeyFormat *TargetObjectKeyFormat `type:"structure"` + // A prefix for all log object keys. If you store log files from multiple Amazon // S3 buckets in a single bucket, you can use a prefix to distinguish which // log files came from which bucket. @@ -29122,6 +31742,12 @@ func (s *LoggingEnabled) SetTargetGrants(v []*TargetGrant) *LoggingEnabled { return s } +// SetTargetObjectKeyFormat sets the TargetObjectKeyFormat field's value. +func (s *LoggingEnabled) SetTargetObjectKeyFormat(v *TargetObjectKeyFormat) *LoggingEnabled { + s.TargetObjectKeyFormat = v + return s +} + // SetTargetPrefix sets the TargetPrefix field's value. func (s *LoggingEnabled) SetTargetPrefix(v string) *LoggingEnabled { s.TargetPrefix = &v @@ -29469,9 +32095,15 @@ type MultipartUpload struct { Key *string `min:"1" type:"string"` // Specifies the owner of the object that is part of the multipart upload. + // + // Directory buckets - The bucket owner is returned as the object owner for + // all the objects. Owner *Owner `type:"structure"` // The class of storage used to store the object. + // + // Directory buckets - Only the S3 Express One Zone storage class is supported + // by directory buckets to store objects. StorageClass *string `type:"string" enum:"StorageClass"` // Upload ID that identifies the multipart upload. @@ -29546,9 +32178,10 @@ func (s *MultipartUpload) SetUploadId(v string) *MultipartUpload { type NoncurrentVersionExpiration struct { _ struct{} `type:"structure"` - // Specifies how many noncurrent versions Amazon S3 will retain. If there are - // this many more recent noncurrent versions, Amazon S3 will take the associated - // action. For more information about noncurrent versions, see Lifecycle configuration + // Specifies how many newer noncurrent versions must exist before Amazon S3 + // can perform the associated action on a given version. If there are this many + // more recent noncurrent versions, Amazon S3 will take the associated action. + // For more information about noncurrent versions, see Lifecycle configuration // elements (https://docs.aws.amazon.com/AmazonS3/latest/userguide/intro-lifecycle-rules.html) // in the Amazon S3 User Guide. NewerNoncurrentVersions *int64 `type:"integer"` @@ -29601,9 +32234,10 @@ func (s *NoncurrentVersionExpiration) SetNoncurrentDays(v int64) *NoncurrentVers type NoncurrentVersionTransition struct { _ struct{} `type:"structure"` - // Specifies how many noncurrent versions Amazon S3 will retain. If there are - // this many more recent noncurrent versions, Amazon S3 will take the associated - // action. For more information about noncurrent versions, see Lifecycle configuration + // Specifies how many newer noncurrent versions must exist before Amazon S3 + // can perform the associated action on a given version. If there are this many + // more recent noncurrent versions, Amazon S3 will take the associated action. + // For more information about noncurrent versions, see Lifecycle configuration // elements (https://docs.aws.amazon.com/AmazonS3/latest/userguide/intro-lifecycle-rules.html) // in the Amazon S3 User Guide. NewerNoncurrentVersions *int64 `type:"integer"` @@ -29873,6 +32507,8 @@ type Object struct { // encryption. If an object is larger than 16 MB, the Amazon Web Services // Management Console will upload or copy that object as a Multipart Upload, // and therefore the ETag will not be an MD5 digest. + // + // Directory buckets - MD5 is not supported by directory buckets. ETag *string `type:"string"` // The name that you assign to an object. You use the object key to retrieve @@ -29883,6 +32519,8 @@ type Object struct { LastModified *time.Time `type:"timestamp"` // The owner of the object + // + // Directory buckets - The bucket owner is returned as the object owner. Owner *Owner `type:"structure"` // Specifies the restoration status of an object. Objects in certain storage @@ -29890,12 +32528,18 @@ type Object struct { // about these storage classes and how to work with archived objects, see Working // with archived objects (https://docs.aws.amazon.com/AmazonS3/latest/userguide/archived-objects.html) // in the Amazon S3 User Guide. + // + // This functionality is not supported for directory buckets. Only the S3 Express + // One Zone storage class is supported by directory buckets to store objects. RestoreStatus *RestoreStatus `type:"structure"` // Size in bytes of the object - Size *int64 `type:"integer"` + Size *int64 `type:"long"` // The class of storage used to store the object. + // + // Directory buckets - Only the S3 Express One Zone storage class is supported + // by directory buckets to store objects. StorageClass *string `type:"string" enum:"ObjectStorageClass"` } @@ -29978,7 +32622,9 @@ type ObjectIdentifier struct { // Key is a required field Key *string `min:"1" type:"string" required:"true"` - // VersionId for the specific version of the object to delete. + // Version ID for the specific version of the object to delete. + // + // This functionality is not supported for directory buckets. VersionId *string `type:"string"` } @@ -30193,26 +32839,32 @@ type ObjectPart struct { ChecksumCRC32 *string `type:"string"` // The base64-encoded, 32-bit CRC32C checksum of the object. This will only - // be present if it was uploaded with the object. With multipart uploads, this - // may not be a checksum value of the object. For more information about how - // checksums are calculated with multipart uploads, see Checking object integrity - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // be present if it was uploaded with the object. When you use an API operation + // on an object that was uploaded using multipart uploads, this value may not + // be a direct checksum value of the full object. Instead, it's a calculation + // based on the checksum values of each individual part. For more information + // about how checksums are calculated with multipart uploads, see Checking object + // integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) // in the Amazon S3 User Guide. ChecksumCRC32C *string `type:"string"` // The base64-encoded, 160-bit SHA-1 digest of the object. This will only be - // present if it was uploaded with the object. With multipart uploads, this - // may not be a checksum value of the object. For more information about how - // checksums are calculated with multipart uploads, see Checking object integrity - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // present if it was uploaded with the object. When you use the API operation + // on an object that was uploaded using multipart uploads, this value may not + // be a direct checksum value of the full object. Instead, it's a calculation + // based on the checksum values of each individual part. For more information + // about how checksums are calculated with multipart uploads, see Checking object + // integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) // in the Amazon S3 User Guide. ChecksumSHA1 *string `type:"string"` // The base64-encoded, 256-bit SHA-256 digest of the object. This will only - // be present if it was uploaded with the object. With multipart uploads, this - // may not be a checksum value of the object. For more information about how - // checksums are calculated with multipart uploads, see Checking object integrity - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // be present if it was uploaded with the object. When you use an API operation + // on an object that was uploaded using multipart uploads, this value may not + // be a direct checksum value of the full object. Instead, it's a calculation + // based on the checksum values of each individual part. For more information + // about how checksums are calculated with multipart uploads, see Checking object + // integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) // in the Amazon S3 User Guide. ChecksumSHA256 *string `type:"string"` @@ -30221,7 +32873,7 @@ type ObjectPart struct { PartNumber *int64 `type:"integer"` // The size of the uploaded part in bytes. - Size *int64 `type:"integer"` + Size *int64 `type:"long"` } // String returns the string representation. @@ -30295,7 +32947,7 @@ type ObjectVersion struct { // The object key. Key *string `min:"1" type:"string"` - // Date and time the object was last modified. + // Date and time when the object was last modified. LastModified *time.Time `type:"timestamp"` // Specifies the owner of the object. @@ -30309,7 +32961,7 @@ type ObjectVersion struct { RestoreStatus *RestoreStatus `type:"structure"` // Size in bytes of the object. - Size *int64 `type:"integer"` + Size *int64 `type:"long"` // The class of storage used to store the object. StorageClass *string `type:"string" enum:"ObjectVersionStorageClass"` @@ -30506,6 +33158,8 @@ type Owner struct { // * Europe (Ireland) // // * South America (São Paulo) + // + // This functionality is not supported for directory buckets. DisplayName *string `type:"string"` // Container for the ID of the owner. @@ -30615,8 +33269,19 @@ type OwnershipControlsRule struct { // BucketOwnerEnforced - Access control lists (ACLs) are disabled and no longer // affect permissions. The bucket owner automatically owns and has full control // over every object in the bucket. The bucket only accepts PUT requests that - // don't specify an ACL or bucket owner full control ACLs, such as the bucket-owner-full-control - // canned ACL or an equivalent form of this ACL expressed in the XML format. + // don't specify an ACL or specify bucket owner full control ACLs (such as the + // predefined bucket-owner-full-control canned ACL or a custom ACL in XML format + // that grants the same permissions). + // + // By default, ObjectOwnership is set to BucketOwnerEnforced and ACLs are disabled. + // We recommend keeping ACLs disabled, except in uncommon use cases where you + // must control access for each object individually. For more information about + // S3 Object Ownership, see Controlling ownership of objects and disabling ACLs + // for your bucket (https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html) + // in the Amazon S3 User Guide. + // + // This functionality is not supported for directory buckets. Directory buckets + // use the bucket owner enforced setting for S3 Object Ownership. // // ObjectOwnership is a required field ObjectOwnership *string `type:"string" required:"true" enum:"ObjectOwnership"` @@ -30694,18 +33359,22 @@ type Part struct { ChecksumCRC32 *string `type:"string"` // The base64-encoded, 32-bit CRC32C checksum of the object. This will only - // be present if it was uploaded with the object. With multipart uploads, this - // may not be a checksum value of the object. For more information about how - // checksums are calculated with multipart uploads, see Checking object integrity - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // be present if it was uploaded with the object. When you use an API operation + // on an object that was uploaded using multipart uploads, this value may not + // be a direct checksum value of the full object. Instead, it's a calculation + // based on the checksum values of each individual part. For more information + // about how checksums are calculated with multipart uploads, see Checking object + // integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) // in the Amazon S3 User Guide. ChecksumCRC32C *string `type:"string"` // The base64-encoded, 160-bit SHA-1 digest of the object. This will only be - // present if it was uploaded with the object. With multipart uploads, this - // may not be a checksum value of the object. For more information about how - // checksums are calculated with multipart uploads, see Checking object integrity - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // present if it was uploaded with the object. When you use the API operation + // on an object that was uploaded using multipart uploads, this value may not + // be a direct checksum value of the full object. Instead, it's a calculation + // based on the checksum values of each individual part. For more information + // about how checksums are calculated with multipart uploads, see Checking object + // integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) // in the Amazon S3 User Guide. ChecksumSHA1 *string `type:"string"` @@ -30727,7 +33396,7 @@ type Part struct { PartNumber *int64 `type:"integer"` // Size in bytes of the uploaded part data. - Size *int64 `type:"integer"` + Size *int64 `type:"long"` } // String returns the string representation. @@ -30796,6 +33465,44 @@ func (s *Part) SetSize(v int64) *Part { return s } +// Amazon S3 keys for log objects are partitioned in the following format: +// +// [DestinationPrefix][SourceAccountId]/[SourceRegion]/[SourceBucket]/[YYYY]/[MM]/[DD]/[YYYY]-[MM]-[DD]-[hh]-[mm]-[ss]-[UniqueString] +// +// PartitionedPrefix defaults to EventTime delivery when server access logs +// are delivered. +type PartitionedPrefix struct { + _ struct{} `locationName:"PartitionedPrefix" type:"structure"` + + // Specifies the partition date source for the partitioned prefix. PartitionDateSource + // can be EventTime or DeliveryTime. + PartitionDateSource *string `type:"string" enum:"PartitionDateSource"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PartitionedPrefix) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PartitionedPrefix) GoString() string { + return s.String() +} + +// SetPartitionDateSource sets the PartitionDateSource field's value. +func (s *PartitionedPrefix) SetPartitionDateSource(v string) *PartitionedPrefix { + s.PartitionDateSource = &v + return s +} + // The container element for a bucket's policy status. type PolicyStatus struct { _ struct{} `type:"structure"` @@ -31043,12 +33750,12 @@ type PutBucketAccelerateConfigurationInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // Indicates the algorithm used to create the checksum for the object when using - // the SDK. This header will not provide any additional functionality if not - // using the SDK. When sending this header, there must be a corresponding x-amz-checksum - // or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with - // the HTTP status code 400 Bad Request. For more information, see Checking - // object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // Indicates the algorithm used to create the checksum for the object when you + // use the SDK. This header will not provide any additional functionality if + // you don't use the SDK. When you send this header, there must be a corresponding + // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the + // request with the HTTP status code 400 Bad Request. For more information, + // see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) // in the Amazon S3 User Guide. // // If you provide an individual checksum, Amazon S3 ignores any provided ChecksumAlgorithm @@ -31060,9 +33767,9 @@ type PutBucketAccelerateConfigurationInput struct { // must be populated with the algorithm's checksum of the request payload. ChecksumAlgorithm *string `location:"header" locationName:"x-amz-sdk-checksum-algorithm" type:"string" enum:"ChecksumAlgorithm"` - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` } @@ -31197,12 +33904,12 @@ type PutBucketAclInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // Indicates the algorithm used to create the checksum for the object when using - // the SDK. This header will not provide any additional functionality if not - // using the SDK. When sending this header, there must be a corresponding x-amz-checksum - // or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with - // the HTTP status code 400 Bad Request. For more information, see Checking - // object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // Indicates the algorithm used to create the checksum for the object when you + // use the SDK. This header will not provide any additional functionality if + // you don't use the SDK. When you send this header, there must be a corresponding + // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the + // request with the HTTP status code 400 Bad Request. For more information, + // see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) // in the Amazon S3 User Guide. // // If you provide an individual checksum, Amazon S3 ignores any provided ChecksumAlgorithm @@ -31218,9 +33925,9 @@ type PutBucketAclInput struct { // to be used. ChecksumAlgorithm *string `location:"header" locationName:"x-amz-sdk-checksum-algorithm" type:"string" enum:"ChecksumAlgorithm"` - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` // Allows grantee the read, write, read ACP, and write ACP permissions on the @@ -31411,9 +34118,9 @@ type PutBucketAnalyticsConfigurationInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` // The ID that identifies the analytics configuration. @@ -31563,12 +34270,12 @@ type PutBucketCorsInput struct { // CORSConfiguration is a required field CORSConfiguration *CORSConfiguration `locationName:"CORSConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` - // Indicates the algorithm used to create the checksum for the object when using - // the SDK. This header will not provide any additional functionality if not - // using the SDK. When sending this header, there must be a corresponding x-amz-checksum - // or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with - // the HTTP status code 400 Bad Request. For more information, see Checking - // object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // Indicates the algorithm used to create the checksum for the object when you + // use the SDK. This header will not provide any additional functionality if + // you don't use the SDK. When you send this header, there must be a corresponding + // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the + // request with the HTTP status code 400 Bad Request. For more information, + // see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) // in the Amazon S3 User Guide. // // If you provide an individual checksum, Amazon S3 ignores any provided ChecksumAlgorithm @@ -31584,9 +34291,9 @@ type PutBucketCorsInput struct { // to be used. ChecksumAlgorithm *string `location:"header" locationName:"x-amz-sdk-checksum-algorithm" type:"string" enum:"ChecksumAlgorithm"` - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` } @@ -31727,12 +34434,12 @@ type PutBucketEncryptionInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // Indicates the algorithm used to create the checksum for the object when using - // the SDK. This header will not provide any additional functionality if not - // using the SDK. When sending this header, there must be a corresponding x-amz-checksum - // or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with - // the HTTP status code 400 Bad Request. For more information, see Checking - // object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // Indicates the algorithm used to create the checksum for the object when you + // use the SDK. This header will not provide any additional functionality if + // you don't use the SDK. When you send this header, there must be a corresponding + // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the + // request with the HTTP status code 400 Bad Request. For more information, + // see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) // in the Amazon S3 User Guide. // // If you provide an individual checksum, Amazon S3 ignores any provided ChecksumAlgorithm @@ -31748,9 +34455,9 @@ type PutBucketEncryptionInput struct { // to be used. ChecksumAlgorithm *string `location:"header" locationName:"x-amz-sdk-checksum-algorithm" type:"string" enum:"ChecksumAlgorithm"` - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` // Specifies the default server-side-encryption configuration. @@ -32028,9 +34735,9 @@ type PutBucketInventoryConfigurationInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` // The ID used to identify the inventory configuration. @@ -32177,12 +34884,12 @@ type PutBucketLifecycleConfigurationInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // Indicates the algorithm used to create the checksum for the object when using - // the SDK. This header will not provide any additional functionality if not - // using the SDK. When sending this header, there must be a corresponding x-amz-checksum - // or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with - // the HTTP status code 400 Bad Request. For more information, see Checking - // object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // Indicates the algorithm used to create the checksum for the object when you + // use the SDK. This header will not provide any additional functionality if + // you don't use the SDK. When you send this header, there must be a corresponding + // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the + // request with the HTTP status code 400 Bad Request. For more information, + // see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) // in the Amazon S3 User Guide. // // If you provide an individual checksum, Amazon S3 ignores any provided ChecksumAlgorithm @@ -32198,9 +34905,9 @@ type PutBucketLifecycleConfigurationInput struct { // to be used. ChecksumAlgorithm *string `location:"header" locationName:"x-amz-sdk-checksum-algorithm" type:"string" enum:"ChecksumAlgorithm"` - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` // Container for lifecycle rules. You can add as many as 1,000 rules. @@ -32332,12 +35039,12 @@ type PutBucketLifecycleInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // Indicates the algorithm used to create the checksum for the object when using - // the SDK. This header will not provide any additional functionality if not - // using the SDK. When sending this header, there must be a corresponding x-amz-checksum - // or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with - // the HTTP status code 400 Bad Request. For more information, see Checking - // object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // Indicates the algorithm used to create the checksum for the object when you + // use the SDK. This header will not provide any additional functionality if + // you don't use the SDK. When you send this header, there must be a corresponding + // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the + // request with the HTTP status code 400 Bad Request. For more information, + // see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) // in the Amazon S3 User Guide. // // If you provide an individual checksum, Amazon S3 ignores any provided ChecksumAlgorithm @@ -32353,9 +35060,9 @@ type PutBucketLifecycleInput struct { // to be used. ChecksumAlgorithm *string `location:"header" locationName:"x-amz-sdk-checksum-algorithm" type:"string" enum:"ChecksumAlgorithm"` - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` // Container for lifecycle rules. You can add as many as 1000 rules. @@ -32497,12 +35204,12 @@ type PutBucketLoggingInput struct { // BucketLoggingStatus is a required field BucketLoggingStatus *BucketLoggingStatus `locationName:"BucketLoggingStatus" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` - // Indicates the algorithm used to create the checksum for the object when using - // the SDK. This header will not provide any additional functionality if not - // using the SDK. When sending this header, there must be a corresponding x-amz-checksum - // or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with - // the HTTP status code 400 Bad Request. For more information, see Checking - // object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // Indicates the algorithm used to create the checksum for the object when you + // use the SDK. This header will not provide any additional functionality if + // you don't use the SDK. When you send this header, there must be a corresponding + // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the + // request with the HTTP status code 400 Bad Request. For more information, + // see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) // in the Amazon S3 User Guide. // // If you provide an individual checksum, Amazon S3 ignores any provided ChecksumAlgorithm @@ -32518,9 +35225,9 @@ type PutBucketLoggingInput struct { // to be used. ChecksumAlgorithm *string `location:"header" locationName:"x-amz-sdk-checksum-algorithm" type:"string" enum:"ChecksumAlgorithm"` - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` } @@ -32654,9 +35361,9 @@ type PutBucketMetricsConfigurationInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` // The ID used to identify the metrics configuration. The ID has a 64 character @@ -32804,9 +35511,9 @@ type PutBucketNotificationConfigurationInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` // A container for specifying the notification configuration of the bucket. @@ -32950,12 +35657,12 @@ type PutBucketNotificationInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // Indicates the algorithm used to create the checksum for the object when using - // the SDK. This header will not provide any additional functionality if not - // using the SDK. When sending this header, there must be a corresponding x-amz-checksum - // or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with - // the HTTP status code 400 Bad Request. For more information, see Checking - // object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // Indicates the algorithm used to create the checksum for the object when you + // use the SDK. This header will not provide any additional functionality if + // you don't use the SDK. When you send this header, there must be a corresponding + // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the + // request with the HTTP status code 400 Bad Request. For more information, + // see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) // in the Amazon S3 User Guide. // // If you provide an individual checksum, Amazon S3 ignores any provided ChecksumAlgorithm @@ -32971,9 +35678,9 @@ type PutBucketNotificationInput struct { // to be used. ChecksumAlgorithm *string `location:"header" locationName:"x-amz-sdk-checksum-algorithm" type:"string" enum:"ChecksumAlgorithm"` - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` // The container for the configuration. @@ -33107,9 +35814,9 @@ type PutBucketOwnershipControlsInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` // The OwnershipControls (BucketOwnerEnforced, BucketOwnerPreferred, or ObjectWriter) @@ -33240,19 +35947,45 @@ type PutBucketPolicyInput struct { // The name of the bucket. // + // Directory buckets - When you use this operation with a directory bucket, + // you must use path-style requests in the format https://s3express-control.region_code.amazonaws.com/bucket-name + // . Virtual-hosted-style requests aren't supported. Directory bucket names + // must be unique in the chosen Availability Zone. Bucket names must also follow + // the format bucket_base_name--az_id--x-s3 (for example, DOC-EXAMPLE-BUCKET--usw2-az2--x-s3). + // For information about bucket naming restrictions, see Directory bucket naming + // rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) + // in the Amazon S3 User Guide + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // Indicates the algorithm used to create the checksum for the object when using - // the SDK. This header will not provide any additional functionality if not - // using the SDK. When sending this header, there must be a corresponding x-amz-checksum - // or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with - // the HTTP status code 400 Bad Request. For more information, see Checking - // object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // Indicates the algorithm used to create the checksum for the object when you + // use the SDK. This header will not provide any additional functionality if + // you don't use the SDK. When you send this header, there must be a corresponding + // x-amz-checksum-algorithm or x-amz-trailer header sent. Otherwise, Amazon + // S3 fails the request with the HTTP status code 400 Bad Request. + // + // For the x-amz-checksum-algorithm header, replace algorithm with the supported + // algorithm from the following list: + // + // * CRC32 + // + // * CRC32C + // + // * SHA1 + // + // * SHA256 + // + // For more information, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) // in the Amazon S3 User Guide. // - // If you provide an individual checksum, Amazon S3 ignores any provided ChecksumAlgorithm - // parameter. + // If the individual checksum value you provide through x-amz-checksum-algorithm + // doesn't match the checksum algorithm you set through x-amz-sdk-checksum-algorithm, + // Amazon S3 ignores any provided ChecksumAlgorithm parameter and uses the checksum + // algorithm that matches the provided value in x-amz-checksum-algorithm . + // + // For directory buckets, when you use Amazon Web Services SDKs, CRC32 is the + // default checksum algorithm that's used for performance. // // The AWS SDK for Go v1 does not support automatic computing request payload // checksum. This feature is available in the AWS SDK for Go v2. If a value @@ -33266,15 +35999,24 @@ type PutBucketPolicyInput struct { // Set this parameter to true to confirm that you want to remove your permissions // to change this bucket policy in the future. + // + // This functionality is not supported for directory buckets. ConfirmRemoveSelfBucketAccess *bool `location:"header" locationName:"x-amz-confirm-remove-self-bucket-access" type:"boolean"` - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). + // + // For directory buckets, this header is not supported in this API operation. + // If you specify this header, the request fails with the HTTP status code 501 + // Not Implemented. ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` // The bucket policy as a JSON document. // + // For directory buckets, the only IAM action supported in the bucket policy + // is s3express:CreateSession. + // // Policy is a required field Policy *string `type:"string" required:"true"` } @@ -33410,12 +36152,12 @@ type PutBucketReplicationInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // Indicates the algorithm used to create the checksum for the object when using - // the SDK. This header will not provide any additional functionality if not - // using the SDK. When sending this header, there must be a corresponding x-amz-checksum - // or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with - // the HTTP status code 400 Bad Request. For more information, see Checking - // object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // Indicates the algorithm used to create the checksum for the object when you + // use the SDK. This header will not provide any additional functionality if + // you don't use the SDK. When you send this header, there must be a corresponding + // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the + // request with the HTTP status code 400 Bad Request. For more information, + // see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) // in the Amazon S3 User Guide. // // If you provide an individual checksum, Amazon S3 ignores any provided ChecksumAlgorithm @@ -33431,9 +36173,9 @@ type PutBucketReplicationInput struct { // to be used. ChecksumAlgorithm *string `location:"header" locationName:"x-amz-sdk-checksum-algorithm" type:"string" enum:"ChecksumAlgorithm"` - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` // A container for replication rules. You can add up to 1,000 rules. The maximum @@ -33582,12 +36324,12 @@ type PutBucketRequestPaymentInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // Indicates the algorithm used to create the checksum for the object when using - // the SDK. This header will not provide any additional functionality if not - // using the SDK. When sending this header, there must be a corresponding x-amz-checksum - // or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with - // the HTTP status code 400 Bad Request. For more information, see Checking - // object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // Indicates the algorithm used to create the checksum for the object when you + // use the SDK. This header will not provide any additional functionality if + // you don't use the SDK. When you send this header, there must be a corresponding + // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the + // request with the HTTP status code 400 Bad Request. For more information, + // see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) // in the Amazon S3 User Guide. // // If you provide an individual checksum, Amazon S3 ignores any provided ChecksumAlgorithm @@ -33603,9 +36345,9 @@ type PutBucketRequestPaymentInput struct { // to be used. ChecksumAlgorithm *string `location:"header" locationName:"x-amz-sdk-checksum-algorithm" type:"string" enum:"ChecksumAlgorithm"` - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` // Container for Payer. @@ -33744,12 +36486,12 @@ type PutBucketTaggingInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // Indicates the algorithm used to create the checksum for the object when using - // the SDK. This header will not provide any additional functionality if not - // using the SDK. When sending this header, there must be a corresponding x-amz-checksum - // or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with - // the HTTP status code 400 Bad Request. For more information, see Checking - // object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // Indicates the algorithm used to create the checksum for the object when you + // use the SDK. This header will not provide any additional functionality if + // you don't use the SDK. When you send this header, there must be a corresponding + // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the + // request with the HTTP status code 400 Bad Request. For more information, + // see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) // in the Amazon S3 User Guide. // // If you provide an individual checksum, Amazon S3 ignores any provided ChecksumAlgorithm @@ -33765,9 +36507,9 @@ type PutBucketTaggingInput struct { // to be used. ChecksumAlgorithm *string `location:"header" locationName:"x-amz-sdk-checksum-algorithm" type:"string" enum:"ChecksumAlgorithm"` - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` // Container for the TagSet and Tag elements. @@ -33906,12 +36648,12 @@ type PutBucketVersioningInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // Indicates the algorithm used to create the checksum for the object when using - // the SDK. This header will not provide any additional functionality if not - // using the SDK. When sending this header, there must be a corresponding x-amz-checksum - // or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with - // the HTTP status code 400 Bad Request. For more information, see Checking - // object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // Indicates the algorithm used to create the checksum for the object when you + // use the SDK. This header will not provide any additional functionality if + // you don't use the SDK. When you send this header, there must be a corresponding + // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the + // request with the HTTP status code 400 Bad Request. For more information, + // see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) // in the Amazon S3 User Guide. // // If you provide an individual checksum, Amazon S3 ignores any provided ChecksumAlgorithm @@ -33927,9 +36669,9 @@ type PutBucketVersioningInput struct { // to be used. ChecksumAlgorithm *string `location:"header" locationName:"x-amz-sdk-checksum-algorithm" type:"string" enum:"ChecksumAlgorithm"` - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` // The concatenation of the authentication device's serial number, a space, @@ -34073,12 +36815,12 @@ type PutBucketWebsiteInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // Indicates the algorithm used to create the checksum for the object when using - // the SDK. This header will not provide any additional functionality if not - // using the SDK. When sending this header, there must be a corresponding x-amz-checksum - // or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with - // the HTTP status code 400 Bad Request. For more information, see Checking - // object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // Indicates the algorithm used to create the checksum for the object when you + // use the SDK. This header will not provide any additional functionality if + // you don't use the SDK. When you send this header, there must be a corresponding + // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the + // request with the HTTP status code 400 Bad Request. For more information, + // see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) // in the Amazon S3 User Guide. // // If you provide an individual checksum, Amazon S3 ignores any provided ChecksumAlgorithm @@ -34094,9 +36836,9 @@ type PutBucketWebsiteInput struct { // to be used. ChecksumAlgorithm *string `location:"header" locationName:"x-amz-sdk-checksum-algorithm" type:"string" enum:"ChecksumAlgorithm"` - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` // Container for the request. @@ -34240,22 +36982,33 @@ type PutObjectAclInput struct { // The bucket name that contains the object to which you want to attach the // ACL. // - // When using this action with an access point, you must direct requests to - // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // Access points - When you use this action with an access point, you must provide + // the alias of the access point in place of the bucket name or specify the + // access point ARN. When using the access point ARN, you must direct requests + // to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. // When using this action with an access point through the Amazon Web Services // SDKs, you provide the access point ARN in place of the bucket name. For more // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) // in the Amazon S3 User Guide. // + // S3 on Outposts - When you use this action with Amazon S3 on Outposts, you + // must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname + // takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. + // When you use this action with S3 on Outposts through the Amazon Web Services + // SDKs, you provide the Outposts access point ARN in place of the bucket name. + // For more information about S3 on Outposts ARNs, see What is S3 on Outposts? + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // in the Amazon S3 User Guide. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // Indicates the algorithm used to create the checksum for the object when using - // the SDK. This header will not provide any additional functionality if not - // using the SDK. When sending this header, there must be a corresponding x-amz-checksum - // or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with - // the HTTP status code 400 Bad Request. For more information, see Checking - // object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // Indicates the algorithm used to create the checksum for the object when you + // use the SDK. This header will not provide any additional functionality if + // you don't use the SDK. When you send this header, there must be a corresponding + // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the + // request with the HTTP status code 400 Bad Request. For more information, + // see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) // in the Amazon S3 User Guide. // // If you provide an individual checksum, Amazon S3 ignores any provided ChecksumAlgorithm @@ -34271,25 +37024,25 @@ type PutObjectAclInput struct { // to be used. ChecksumAlgorithm *string `location:"header" locationName:"x-amz-sdk-checksum-algorithm" type:"string" enum:"ChecksumAlgorithm"` - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` // Allows grantee the read, write, read ACP, and write ACP permissions on the // bucket. // - // This action is not supported by Amazon S3 on Outposts. + // This functionality is not supported for Amazon S3 on Outposts. GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"` // Allows grantee to list the objects in the bucket. // - // This action is not supported by Amazon S3 on Outposts. + // This functionality is not supported for Amazon S3 on Outposts. GrantRead *string `location:"header" locationName:"x-amz-grant-read" type:"string"` // Allows grantee to read the bucket ACL. // - // This action is not supported by Amazon S3 on Outposts. + // This functionality is not supported for Amazon S3 on Outposts. GrantReadACP *string `location:"header" locationName:"x-amz-grant-read-acp" type:"string"` // Allows grantee to create new objects in the bucket. @@ -34300,37 +37053,28 @@ type PutObjectAclInput struct { // Allows grantee to write the ACL for the applicable bucket. // - // This action is not supported by Amazon S3 on Outposts. + // This functionality is not supported for Amazon S3 on Outposts. GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"` // Key for which the PUT action was initiated. // - // When using this action with an access point, you must direct requests to - // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. - // When using this action with an access point through the Amazon Web Services - // SDKs, you provide the access point ARN in place of the bucket name. For more - // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) - // in the Amazon S3 User Guide. - // - // When you use this action with Amazon S3 on Outposts, you must direct requests - // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form - // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When - // you use this action with S3 on Outposts through the Amazon Web Services SDKs, - // you provide the Outposts access point ARN in place of the bucket name. For - // more information about S3 on Outposts ARNs, see What is S3 on Outposts? (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) - // in the Amazon S3 User Guide. - // // Key is a required field Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` // Confirms that the requester knows that they will be charged for the request. - // Bucket owners need not specify this parameter in their requests. For information - // about downloading objects from Requester Pays buckets, see Downloading Objects + // Bucket owners need not specify this parameter in their requests. If either + // the source or destination S3 bucket has Requester Pays enabled, the requester + // will pay for corresponding charges to copy the object. For information about + // downloading objects from Requester Pays buckets, see Downloading Objects // in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) // in the Amazon S3 User Guide. + // + // This functionality is not supported for directory buckets. RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` - // VersionId used to reference a specific version of the object. + // Version ID used to reference a specific version of the object. + // + // This functionality is not supported for directory buckets. VersionId *string `location:"querystring" locationName:"versionId" type:"string"` } @@ -34496,6 +37240,8 @@ type PutObjectAclOutput struct { // If present, indicates that the requester was successfully charged for the // request. + // + // This functionality is not supported for directory buckets. RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` } @@ -34527,9 +37273,32 @@ type PutObjectInput struct { _ struct{} `locationName:"PutObjectRequest" type:"structure" payload:"Body"` // The canned ACL to apply to the object. For more information, see Canned ACL - // (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL). + // (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL) + // in the Amazon S3 User Guide. // - // This action is not supported by Amazon S3 on Outposts. + // When adding a new object, you can use headers to grant ACL-based permissions + // to individual Amazon Web Services accounts or to predefined groups defined + // by Amazon S3. These permissions are then added to the ACL on the object. + // By default, all objects are private. Only the owner has full access control. + // For more information, see Access Control List (ACL) Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html) + // and Managing ACLs Using the REST API (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-using-rest-api.html) + // in the Amazon S3 User Guide. + // + // If the bucket that you're uploading objects to uses the bucket owner enforced + // setting for S3 Object Ownership, ACLs are disabled and no longer affect permissions. + // Buckets that use this setting only accept PUT requests that don't specify + // an ACL or PUT requests that specify bucket owner full control ACLs, such + // as the bucket-owner-full-control canned ACL or an equivalent form of this + // ACL expressed in the XML format. PUT requests that contain other ACLs (for + // example, custom grants to certain Amazon Web Services accounts) fail and + // return a 400 error with the error code AccessControlListNotSupported. For + // more information, see Controlling ownership of objects and disabling ACLs + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html) + // in the Amazon S3 User Guide. + // + // * This functionality is not supported for directory buckets. + // + // * This functionality is not supported for Amazon S3 on Outposts. ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"ObjectCannedACL"` // Object data. @@ -34537,19 +37306,33 @@ type PutObjectInput struct { // The bucket name to which the PUT action was initiated. // - // When using this action with an access point, you must direct requests to - // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // Directory buckets - When you use this operation with a directory bucket, + // you must use virtual-hosted-style requests in the format Bucket_name.s3express-az_id.region.amazonaws.com. + // Path-style requests are not supported. Directory bucket names must be unique + // in the chosen Availability Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3 + // (for example, DOC-EXAMPLE-BUCKET--usw2-az2--x-s3). For information about + // bucket naming restrictions, see Directory bucket naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) + // in the Amazon S3 User Guide. + // + // Access points - When you use this action with an access point, you must provide + // the alias of the access point in place of the bucket name or specify the + // access point ARN. When using the access point ARN, you must direct requests + // to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. // When using this action with an access point through the Amazon Web Services // SDKs, you provide the access point ARN in place of the bucket name. For more // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) // in the Amazon S3 User Guide. // - // When you use this action with Amazon S3 on Outposts, you must direct requests - // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form - // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When - // you use this action with S3 on Outposts through the Amazon Web Services SDKs, - // you provide the Outposts access point ARN in place of the bucket name. For - // more information about S3 on Outposts ARNs, see What is S3 on Outposts? (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // Access points and Object Lambda access points are not supported by directory + // buckets. + // + // S3 on Outposts - When you use this action with Amazon S3 on Outposts, you + // must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname + // takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. + // When you use this action with S3 on Outposts through the Amazon Web Services + // SDKs, you provide the Outposts access point ARN in place of the bucket name. + // For more information about S3 on Outposts ARNs, see What is S3 on Outposts? + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) // in the Amazon S3 User Guide. // // Bucket is a required field @@ -34562,6 +37345,8 @@ type PutObjectInput struct { // // Specifying this header with a PUT action doesn’t affect bucket-level settings // for S3 Bucket Key. + // + // This functionality is not supported for directory buckets. BucketKeyEnabled *bool `location:"header" locationName:"x-amz-server-side-encryption-bucket-key-enabled" type:"boolean"` // Can be used to specify caching behavior along the request/reply chain. For @@ -34569,16 +37354,33 @@ type PutObjectInput struct { // (http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.9). CacheControl *string `location:"header" locationName:"Cache-Control" type:"string"` - // Indicates the algorithm used to create the checksum for the object when using - // the SDK. This header will not provide any additional functionality if not - // using the SDK. When sending this header, there must be a corresponding x-amz-checksum - // or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with - // the HTTP status code 400 Bad Request. For more information, see Checking - // object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // Indicates the algorithm used to create the checksum for the object when you + // use the SDK. This header will not provide any additional functionality if + // you don't use the SDK. When you send this header, there must be a corresponding + // x-amz-checksum-algorithm or x-amz-trailer header sent. Otherwise, Amazon + // S3 fails the request with the HTTP status code 400 Bad Request. + // + // For the x-amz-checksum-algorithm header, replace algorithm with the supported + // algorithm from the following list: + // + // * CRC32 + // + // * CRC32C + // + // * SHA1 + // + // * SHA256 + // + // For more information, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) // in the Amazon S3 User Guide. // - // If you provide an individual checksum, Amazon S3 ignores any provided ChecksumAlgorithm - // parameter. + // If the individual checksum value you provide through x-amz-checksum-algorithm + // doesn't match the checksum algorithm you set through x-amz-sdk-checksum-algorithm, + // Amazon S3 ignores any provided ChecksumAlgorithm parameter and uses the checksum + // algorithm that matches the provided value in x-amz-checksum-algorithm . + // + // For directory buckets, when you use Amazon Web Services SDKs, CRC32 is the + // default checksum algorithm that's used for performance. // // The AWS SDK for Go v1 does not support automatic computing request payload // checksum. This feature is available in the AWS SDK for Go v2. If a value @@ -34638,15 +37440,22 @@ type PutObjectInput struct { // it is optional, we recommend using the Content-MD5 mechanism as an end-to-end // integrity check. For more information about REST request authentication, // see REST Authentication (https://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html). + // + // The Content-MD5 header is required for any request to upload an object with + // a retention period configured using Amazon S3 Object Lock. For more information + // about Amazon S3 Object Lock, see Amazon S3 Object Lock Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock-overview.html) + // in the Amazon S3 User Guide. + // + // This functionality is not supported for directory buckets. ContentMD5 *string `location:"header" locationName:"Content-MD5" type:"string"` // A standard MIME type describing the format of the contents. For more information, // see https://www.rfc-editor.org/rfc/rfc9110.html#name-content-type (https://www.rfc-editor.org/rfc/rfc9110.html#name-content-type). ContentType *string `location:"header" locationName:"Content-Type" type:"string"` - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` // The date and time at which the object is no longer cacheable. For more information, @@ -34655,22 +37464,30 @@ type PutObjectInput struct { // Gives the grantee READ, READ_ACP, and WRITE_ACP permissions on the object. // - // This action is not supported by Amazon S3 on Outposts. + // * This functionality is not supported for directory buckets. + // + // * This functionality is not supported for Amazon S3 on Outposts. GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"` // Allows grantee to read the object data and its metadata. // - // This action is not supported by Amazon S3 on Outposts. + // * This functionality is not supported for directory buckets. + // + // * This functionality is not supported for Amazon S3 on Outposts. GrantRead *string `location:"header" locationName:"x-amz-grant-read" type:"string"` // Allows grantee to read the object ACL. // - // This action is not supported by Amazon S3 on Outposts. + // * This functionality is not supported for directory buckets. + // + // * This functionality is not supported for Amazon S3 on Outposts. GrantReadACP *string `location:"header" locationName:"x-amz-grant-read-acp" type:"string"` // Allows grantee to write the ACL for the applicable object. // - // This action is not supported by Amazon S3 on Outposts. + // * This functionality is not supported for directory buckets. + // + // * This functionality is not supported for Amazon S3 on Outposts. GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"` // Object key for which the PUT action was initiated. @@ -34682,25 +37499,37 @@ type PutObjectInput struct { Metadata map[string]*string `location:"headers" locationName:"x-amz-meta-" type:"map"` // Specifies whether a legal hold will be applied to this object. For more information - // about S3 Object Lock, see Object Lock (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html). + // about S3 Object Lock, see Object Lock (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html) + // in the Amazon S3 User Guide. + // + // This functionality is not supported for directory buckets. ObjectLockLegalHoldStatus *string `location:"header" locationName:"x-amz-object-lock-legal-hold" type:"string" enum:"ObjectLockLegalHoldStatus"` // The Object Lock mode that you want to apply to this object. + // + // This functionality is not supported for directory buckets. ObjectLockMode *string `location:"header" locationName:"x-amz-object-lock-mode" type:"string" enum:"ObjectLockMode"` // The date and time when you want this object's Object Lock to expire. Must // be formatted as a timestamp parameter. + // + // This functionality is not supported for directory buckets. ObjectLockRetainUntilDate *time.Time `location:"header" locationName:"x-amz-object-lock-retain-until-date" type:"timestamp" timestampFormat:"iso8601"` // Confirms that the requester knows that they will be charged for the request. - // Bucket owners need not specify this parameter in their requests. For information - // about downloading objects from Requester Pays buckets, see Downloading Objects + // Bucket owners need not specify this parameter in their requests. If either + // the source or destination S3 bucket has Requester Pays enabled, the requester + // will pay for corresponding charges to copy the object. For information about + // downloading objects from Requester Pays buckets, see Downloading Objects // in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) // in the Amazon S3 User Guide. + // + // This functionality is not supported for directory buckets. RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` - // Specifies the algorithm to use to when encrypting the object (for example, - // AES256). + // Specifies the algorithm to use when encrypting the object (for example, AES256). + // + // This functionality is not supported for directory buckets. SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting @@ -34709,6 +37538,8 @@ type PutObjectInput struct { // with the algorithm specified in the x-amz-server-side-encryption-customer-algorithm // header. // + // This functionality is not supported for directory buckets. + // // SSECustomerKey is a sensitive parameter and its value will be // replaced with "sensitive" in string returned by PutObjectInput's // String and GoString methods. @@ -34717,13 +37548,18 @@ type PutObjectInput struct { // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. // Amazon S3 uses this header for a message integrity check to ensure that the // encryption key was transmitted without error. + // + // This functionality is not supported for directory buckets. SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` // Specifies the Amazon Web Services KMS Encryption Context to use for object // encryption. The value of this header is a base64-encoded UTF-8 string holding // JSON with the encryption context key-value pairs. This value is stored as // object metadata and automatically gets passed on to Amazon Web Services KMS - // for future GetObject or CopyObject operations on this object. + // for future GetObject or CopyObject operations on this object. This value + // must be explicitly added during CopyObject operations. + // + // This functionality is not supported for directory buckets. // // SSEKMSEncryptionContext is a sensitive parameter and its value will be // replaced with "sensitive" in string returned by PutObjectInput's @@ -34731,39 +37567,62 @@ type PutObjectInput struct { SSEKMSEncryptionContext *string `location:"header" locationName:"x-amz-server-side-encryption-context" type:"string" sensitive:"true"` // If x-amz-server-side-encryption has a valid value of aws:kms or aws:kms:dsse, - // this header specifies the ID of the Key Management Service (KMS) symmetric - // encryption customer managed key that was used for the object. If you specify - // x-amz-server-side-encryption:aws:kms or x-amz-server-side-encryption:aws:kms:dsse, + // this header specifies the ID (Key ID, Key ARN, or Key Alias) of the Key Management + // Service (KMS) symmetric encryption customer managed key that was used for + // the object. If you specify x-amz-server-side-encryption:aws:kms or x-amz-server-side-encryption:aws:kms:dsse, // but do not providex-amz-server-side-encryption-aws-kms-key-id, Amazon S3 // uses the Amazon Web Services managed key (aws/s3) to protect the data. If // the KMS key does not exist in the same account that's issuing the command, // you must use the full ARN and not just the ID. // + // This functionality is not supported for directory buckets. + // // SSEKMSKeyId is a sensitive parameter and its value will be // replaced with "sensitive" in string returned by PutObjectInput's // String and GoString methods. SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` - // The server-side encryption algorithm used when storing this object in Amazon - // S3 (for example, AES256, aws:kms, aws:kms:dsse). + // The server-side encryption algorithm that was used when you store this object + // in Amazon S3 (for example, AES256, aws:kms, aws:kms:dsse). + // + // General purpose buckets - You have four mutually exclusive options to protect + // data using server-side encryption in Amazon S3, depending on how you choose + // to manage the encryption keys. Specifically, the encryption key options are + // Amazon S3 managed keys (SSE-S3), Amazon Web Services KMS keys (SSE-KMS or + // DSSE-KMS), and customer-provided keys (SSE-C). Amazon S3 encrypts data with + // server-side encryption by using Amazon S3 managed keys (SSE-S3) by default. + // You can optionally tell Amazon S3 to encrypt data at rest by using server-side + // encryption with other key options. For more information, see Using Server-Side + // Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingServerSideEncryption.html) + // in the Amazon S3 User Guide. + // + // Directory buckets - For directory buckets, only the server-side encryption + // with Amazon S3 managed keys (SSE-S3) (AES256) value is supported. ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` // By default, Amazon S3 uses the STANDARD Storage Class to store newly created // objects. The STANDARD storage class provides high durability and high availability. // Depending on performance needs, you can specify a different Storage Class. - // Amazon S3 on Outposts only uses the OUTPOSTS Storage Class. For more information, - // see Storage Classes (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html) + // For more information, see Storage Classes (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html) // in the Amazon S3 User Guide. + // + // * For directory buckets, only the S3 Express One Zone storage class is + // supported to store newly created objects. + // + // * Amazon S3 on Outposts only uses the OUTPOSTS Storage Class. StorageClass *string `location:"header" locationName:"x-amz-storage-class" type:"string" enum:"StorageClass"` // The tag-set for the object. The tag-set must be encoded as URL Query parameters. // (For example, "Key1=Value1") + // + // This functionality is not supported for directory buckets. Tagging *string `location:"header" locationName:"x-amz-tagging" type:"string"` // If the bucket is configured as a website, redirects requests for this object // to another object in the same bucket or to an external URL. Amazon S3 stores // the value of this header in the object metadata. For information about object - // metadata, see Object Key and Metadata (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMetadata.html). + // metadata, see Object Key and Metadata (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMetadata.html) + // in the Amazon S3 User Guide. // // In the following example, the request header sets the redirect to an object // (anotherPage.html) in the same bucket: @@ -34777,7 +37636,10 @@ type PutObjectInput struct { // // For more information about website hosting in Amazon S3, see Hosting Websites // on Amazon S3 (https://docs.aws.amazon.com/AmazonS3/latest/dev/WebsiteHosting.html) - // and How to Configure Website Page Redirects (https://docs.aws.amazon.com/AmazonS3/latest/dev/how-to-page-redirect.html). + // and How to Configure Website Page Redirects (https://docs.aws.amazon.com/AmazonS3/latest/dev/how-to-page-redirect.html) + // in the Amazon S3 User Guide. + // + // This functionality is not supported for directory buckets. WebsiteRedirectLocation *string `location:"header" locationName:"x-amz-website-redirect-location" type:"string"` } @@ -35090,8 +37952,10 @@ type PutObjectLegalHoldInput struct { // The bucket name containing the object that you want to place a legal hold // on. // - // When using this action with an access point, you must direct requests to - // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // Access points - When you use this action with an access point, you must provide + // the alias of the access point in place of the bucket name or specify the + // access point ARN. When using the access point ARN, you must direct requests + // to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. // When using this action with an access point through the Amazon Web Services // SDKs, you provide the access point ARN in place of the bucket name. For more // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) @@ -35100,12 +37964,12 @@ type PutObjectLegalHoldInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // Indicates the algorithm used to create the checksum for the object when using - // the SDK. This header will not provide any additional functionality if not - // using the SDK. When sending this header, there must be a corresponding x-amz-checksum - // or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with - // the HTTP status code 400 Bad Request. For more information, see Checking - // object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // Indicates the algorithm used to create the checksum for the object when you + // use the SDK. This header will not provide any additional functionality if + // you don't use the SDK. When you send this header, there must be a corresponding + // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the + // request with the HTTP status code 400 Bad Request. For more information, + // see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) // in the Amazon S3 User Guide. // // If you provide an individual checksum, Amazon S3 ignores any provided ChecksumAlgorithm @@ -35121,9 +37985,9 @@ type PutObjectLegalHoldInput struct { // to be used. ChecksumAlgorithm *string `location:"header" locationName:"x-amz-sdk-checksum-algorithm" type:"string" enum:"ChecksumAlgorithm"` - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` // The key name for the object that you want to place a legal hold on. @@ -35136,10 +38000,14 @@ type PutObjectLegalHoldInput struct { LegalHold *ObjectLockLegalHold `locationName:"LegalHold" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` // Confirms that the requester knows that they will be charged for the request. - // Bucket owners need not specify this parameter in their requests. For information - // about downloading objects from Requester Pays buckets, see Downloading Objects + // Bucket owners need not specify this parameter in their requests. If either + // the source or destination S3 bucket has Requester Pays enabled, the requester + // will pay for corresponding charges to copy the object. For information about + // downloading objects from Requester Pays buckets, see Downloading Objects // in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) // in the Amazon S3 User Guide. + // + // This functionality is not supported for directory buckets. RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` // The version ID of the object that you want to place a legal hold on. @@ -35267,6 +38135,8 @@ type PutObjectLegalHoldOutput struct { // If present, indicates that the requester was successfully charged for the // request. + // + // This functionality is not supported for directory buckets. RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` } @@ -35302,12 +38172,12 @@ type PutObjectLockConfigurationInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // Indicates the algorithm used to create the checksum for the object when using - // the SDK. This header will not provide any additional functionality if not - // using the SDK. When sending this header, there must be a corresponding x-amz-checksum - // or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with - // the HTTP status code 400 Bad Request. For more information, see Checking - // object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // Indicates the algorithm used to create the checksum for the object when you + // use the SDK. This header will not provide any additional functionality if + // you don't use the SDK. When you send this header, there must be a corresponding + // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the + // request with the HTTP status code 400 Bad Request. For more information, + // see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) // in the Amazon S3 User Guide. // // If you provide an individual checksum, Amazon S3 ignores any provided ChecksumAlgorithm @@ -35323,19 +38193,23 @@ type PutObjectLockConfigurationInput struct { // to be used. ChecksumAlgorithm *string `location:"header" locationName:"x-amz-sdk-checksum-algorithm" type:"string" enum:"ChecksumAlgorithm"` - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` // The Object Lock configuration that you want to apply to the specified bucket. ObjectLockConfiguration *ObjectLockConfiguration `locationName:"ObjectLockConfiguration" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` // Confirms that the requester knows that they will be charged for the request. - // Bucket owners need not specify this parameter in their requests. For information - // about downloading objects from Requester Pays buckets, see Downloading Objects + // Bucket owners need not specify this parameter in their requests. If either + // the source or destination S3 bucket has Requester Pays enabled, the requester + // will pay for corresponding charges to copy the object. For information about + // downloading objects from Requester Pays buckets, see Downloading Objects // in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) // in the Amazon S3 User Guide. + // + // This functionality is not supported for directory buckets. RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` // A token to allow Object Lock to be enabled for an existing bucket. @@ -35451,6 +38325,8 @@ type PutObjectLockConfigurationOutput struct { // If present, indicates that the requester was successfully charged for the // request. + // + // This functionality is not supported for directory buckets. RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` } @@ -35483,89 +38359,133 @@ type PutObjectOutput struct { // Indicates whether the uploaded object uses an S3 Bucket Key for server-side // encryption with Key Management Service (KMS) keys (SSE-KMS). + // + // This functionality is not supported for directory buckets. BucketKeyEnabled *bool `location:"header" locationName:"x-amz-server-side-encryption-bucket-key-enabled" type:"boolean"` // The base64-encoded, 32-bit CRC32 checksum of the object. This will only be - // present if it was uploaded with the object. With multipart uploads, this - // may not be a checksum value of the object. For more information about how - // checksums are calculated with multipart uploads, see Checking object integrity - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // present if it was uploaded with the object. When you use an API operation + // on an object that was uploaded using multipart uploads, this value may not + // be a direct checksum value of the full object. Instead, it's a calculation + // based on the checksum values of each individual part. For more information + // about how checksums are calculated with multipart uploads, see Checking object + // integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) // in the Amazon S3 User Guide. ChecksumCRC32 *string `location:"header" locationName:"x-amz-checksum-crc32" type:"string"` // The base64-encoded, 32-bit CRC32C checksum of the object. This will only - // be present if it was uploaded with the object. With multipart uploads, this - // may not be a checksum value of the object. For more information about how - // checksums are calculated with multipart uploads, see Checking object integrity - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // be present if it was uploaded with the object. When you use an API operation + // on an object that was uploaded using multipart uploads, this value may not + // be a direct checksum value of the full object. Instead, it's a calculation + // based on the checksum values of each individual part. For more information + // about how checksums are calculated with multipart uploads, see Checking object + // integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) // in the Amazon S3 User Guide. ChecksumCRC32C *string `location:"header" locationName:"x-amz-checksum-crc32c" type:"string"` // The base64-encoded, 160-bit SHA-1 digest of the object. This will only be - // present if it was uploaded with the object. With multipart uploads, this - // may not be a checksum value of the object. For more information about how - // checksums are calculated with multipart uploads, see Checking object integrity - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // present if it was uploaded with the object. When you use the API operation + // on an object that was uploaded using multipart uploads, this value may not + // be a direct checksum value of the full object. Instead, it's a calculation + // based on the checksum values of each individual part. For more information + // about how checksums are calculated with multipart uploads, see Checking object + // integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) // in the Amazon S3 User Guide. ChecksumSHA1 *string `location:"header" locationName:"x-amz-checksum-sha1" type:"string"` // The base64-encoded, 256-bit SHA-256 digest of the object. This will only - // be present if it was uploaded with the object. With multipart uploads, this - // may not be a checksum value of the object. For more information about how - // checksums are calculated with multipart uploads, see Checking object integrity - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // be present if it was uploaded with the object. When you use an API operation + // on an object that was uploaded using multipart uploads, this value may not + // be a direct checksum value of the full object. Instead, it's a calculation + // based on the checksum values of each individual part. For more information + // about how checksums are calculated with multipart uploads, see Checking object + // integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) // in the Amazon S3 User Guide. ChecksumSHA256 *string `location:"header" locationName:"x-amz-checksum-sha256" type:"string"` // Entity tag for the uploaded object. + // + // General purpose buckets - To ensure that data is not corrupted traversing + // the network, for objects where the ETag is the MD5 digest of the object, + // you can calculate the MD5 while putting an object to Amazon S3 and compare + // the returned ETag to the calculated MD5 value. + // + // Directory buckets - The ETag for the object in a directory bucket isn't the + // MD5 digest of the object. ETag *string `location:"header" locationName:"ETag" type:"string"` // If the expiration is configured for the object (see PutBucketLifecycleConfiguration - // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html)), - // the response includes this header. It includes the expiry-date and rule-id - // key-value pairs that provide information about object expiration. The value - // of the rule-id is URL-encoded. + // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html)) + // in the Amazon S3 User Guide, the response includes this header. It includes + // the expiry-date and rule-id key-value pairs that provide information about + // object expiration. The value of the rule-id is URL-encoded. + // + // This functionality is not supported for directory buckets. Expiration *string `location:"header" locationName:"x-amz-expiration" type:"string"` // If present, indicates that the requester was successfully charged for the // request. + // + // This functionality is not supported for directory buckets. RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` // If server-side encryption with a customer-provided encryption key was requested, - // the response will include this header confirming the encryption algorithm - // used. + // the response will include this header to confirm the encryption algorithm + // that's used. + // + // This functionality is not supported for directory buckets. SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` // If server-side encryption with a customer-provided encryption key was requested, - // the response will include this header to provide round-trip message integrity + // the response will include this header to provide the round-trip message integrity // verification of the customer-provided encryption key. + // + // This functionality is not supported for directory buckets. SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` - // If present, specifies the Amazon Web Services KMS Encryption Context to use + // If present, indicates the Amazon Web Services KMS Encryption Context to use // for object encryption. The value of this header is a base64-encoded UTF-8 // string holding JSON with the encryption context key-value pairs. This value // is stored as object metadata and automatically gets passed on to Amazon Web // Services KMS for future GetObject or CopyObject operations on this object. // + // This functionality is not supported for directory buckets. + // // SSEKMSEncryptionContext is a sensitive parameter and its value will be // replaced with "sensitive" in string returned by PutObjectOutput's // String and GoString methods. SSEKMSEncryptionContext *string `location:"header" locationName:"x-amz-server-side-encryption-context" type:"string" sensitive:"true"` // If x-amz-server-side-encryption has a valid value of aws:kms or aws:kms:dsse, - // this header specifies the ID of the Key Management Service (KMS) symmetric + // this header indicates the ID of the Key Management Service (KMS) symmetric // encryption customer managed key that was used for the object. // + // This functionality is not supported for directory buckets. + // // SSEKMSKeyId is a sensitive parameter and its value will be // replaced with "sensitive" in string returned by PutObjectOutput's // String and GoString methods. SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` - // The server-side encryption algorithm used when storing this object in Amazon + // The server-side encryption algorithm used when you store this object in Amazon // S3 (for example, AES256, aws:kms, aws:kms:dsse). + // + // For directory buckets, only server-side encryption with Amazon S3 managed + // keys (SSE-S3) (AES256) is supported. ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` - // Version of the object. + // Version ID of the object. + // + // If you enable versioning for a bucket, Amazon S3 automatically generates + // a unique version ID for the object being stored. Amazon S3 returns this ID + // in the response. When you enable versioning for a bucket, if Amazon S3 receives + // multiple write requests for the same object simultaneously, it stores all + // of the objects. For more information about versioning, see Adding Objects + // to Versioning-Enabled Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/AddingObjectstoVersioningEnabledBuckets.html) + // in the Amazon S3 User Guide. For information about returning the versioning + // state of a bucket, see GetBucketVersioning (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketVersioning.html). + // + // This functionality is not supported for directory buckets. VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"` } @@ -35677,8 +38597,10 @@ type PutObjectRetentionInput struct { // The bucket name that contains the object you want to apply this Object Retention // configuration to. // - // When using this action with an access point, you must direct requests to - // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // Access points - When you use this action with an access point, you must provide + // the alias of the access point in place of the bucket name or specify the + // access point ARN. When using the access point ARN, you must direct requests + // to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. // When using this action with an access point through the Amazon Web Services // SDKs, you provide the access point ARN in place of the bucket name. For more // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) @@ -35690,12 +38612,12 @@ type PutObjectRetentionInput struct { // Indicates whether this action should bypass Governance-mode restrictions. BypassGovernanceRetention *bool `location:"header" locationName:"x-amz-bypass-governance-retention" type:"boolean"` - // Indicates the algorithm used to create the checksum for the object when using - // the SDK. This header will not provide any additional functionality if not - // using the SDK. When sending this header, there must be a corresponding x-amz-checksum - // or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with - // the HTTP status code 400 Bad Request. For more information, see Checking - // object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // Indicates the algorithm used to create the checksum for the object when you + // use the SDK. This header will not provide any additional functionality if + // you don't use the SDK. When you send this header, there must be a corresponding + // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the + // request with the HTTP status code 400 Bad Request. For more information, + // see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) // in the Amazon S3 User Guide. // // If you provide an individual checksum, Amazon S3 ignores any provided ChecksumAlgorithm @@ -35711,9 +38633,9 @@ type PutObjectRetentionInput struct { // to be used. ChecksumAlgorithm *string `location:"header" locationName:"x-amz-sdk-checksum-algorithm" type:"string" enum:"ChecksumAlgorithm"` - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` // The key name for the object that you want to apply this Object Retention @@ -35723,10 +38645,14 @@ type PutObjectRetentionInput struct { Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` // Confirms that the requester knows that they will be charged for the request. - // Bucket owners need not specify this parameter in their requests. For information - // about downloading objects from Requester Pays buckets, see Downloading Objects + // Bucket owners need not specify this parameter in their requests. If either + // the source or destination S3 bucket has Requester Pays enabled, the requester + // will pay for corresponding charges to copy the object. For information about + // downloading objects from Requester Pays buckets, see Downloading Objects // in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) // in the Amazon S3 User Guide. + // + // This functionality is not supported for directory buckets. RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` // The container element for the Object Retention configuration. @@ -35864,6 +38790,8 @@ type PutObjectRetentionOutput struct { // If present, indicates that the requester was successfully charged for the // request. + // + // This functionality is not supported for directory buckets. RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` } @@ -35896,30 +38824,33 @@ type PutObjectTaggingInput struct { // The bucket name containing the object. // - // When using this action with an access point, you must direct requests to - // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // Access points - When you use this action with an access point, you must provide + // the alias of the access point in place of the bucket name or specify the + // access point ARN. When using the access point ARN, you must direct requests + // to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. // When using this action with an access point through the Amazon Web Services // SDKs, you provide the access point ARN in place of the bucket name. For more // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) // in the Amazon S3 User Guide. // - // When you use this action with Amazon S3 on Outposts, you must direct requests - // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form - // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When - // you use this action with S3 on Outposts through the Amazon Web Services SDKs, - // you provide the Outposts access point ARN in place of the bucket name. For - // more information about S3 on Outposts ARNs, see What is S3 on Outposts? (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // S3 on Outposts - When you use this action with Amazon S3 on Outposts, you + // must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname + // takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. + // When you use this action with S3 on Outposts through the Amazon Web Services + // SDKs, you provide the Outposts access point ARN in place of the bucket name. + // For more information about S3 on Outposts ARNs, see What is S3 on Outposts? + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) // in the Amazon S3 User Guide. // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // Indicates the algorithm used to create the checksum for the object when using - // the SDK. This header will not provide any additional functionality if not - // using the SDK. When sending this header, there must be a corresponding x-amz-checksum - // or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with - // the HTTP status code 400 Bad Request. For more information, see Checking - // object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // Indicates the algorithm used to create the checksum for the object when you + // use the SDK. This header will not provide any additional functionality if + // you don't use the SDK. When you send this header, there must be a corresponding + // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the + // request with the HTTP status code 400 Bad Request. For more information, + // see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) // in the Amazon S3 User Guide. // // If you provide an individual checksum, Amazon S3 ignores any provided ChecksumAlgorithm @@ -35935,9 +38866,9 @@ type PutObjectTaggingInput struct { // to be used. ChecksumAlgorithm *string `location:"header" locationName:"x-amz-sdk-checksum-algorithm" type:"string" enum:"ChecksumAlgorithm"` - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` // Name of the object key. @@ -35946,10 +38877,14 @@ type PutObjectTaggingInput struct { Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` // Confirms that the requester knows that they will be charged for the request. - // Bucket owners need not specify this parameter in their requests. For information - // about downloading objects from Requester Pays buckets, see Downloading Objects + // Bucket owners need not specify this parameter in their requests. If either + // the source or destination S3 bucket has Requester Pays enabled, the requester + // will pay for corresponding charges to copy the object. For information about + // downloading objects from Requester Pays buckets, see Downloading Objects // in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) // in the Amazon S3 User Guide. + // + // This functionality is not supported for directory buckets. RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` // Container for the TagSet and Tag elements @@ -36125,12 +39060,12 @@ type PutPublicAccessBlockInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // Indicates the algorithm used to create the checksum for the object when using - // the SDK. This header will not provide any additional functionality if not - // using the SDK. When sending this header, there must be a corresponding x-amz-checksum - // or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with - // the HTTP status code 400 Bad Request. For more information, see Checking - // object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // Indicates the algorithm used to create the checksum for the object when you + // use the SDK. This header will not provide any additional functionality if + // you don't use the SDK. When you send this header, there must be a corresponding + // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the + // request with the HTTP status code 400 Bad Request. For more information, + // see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) // in the Amazon S3 User Guide. // // If you provide an individual checksum, Amazon S3 ignores any provided ChecksumAlgorithm @@ -36146,9 +39081,9 @@ type PutPublicAccessBlockInput struct { // to be used. ChecksumAlgorithm *string `location:"header" locationName:"x-amz-sdk-checksum-algorithm" type:"string" enum:"ChecksumAlgorithm"` - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` // The PublicAccessBlock configuration that you want to apply to this Amazon @@ -37283,30 +40218,33 @@ type RestoreObjectInput struct { // The bucket name containing the object to restore. // - // When using this action with an access point, you must direct requests to - // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // Access points - When you use this action with an access point, you must provide + // the alias of the access point in place of the bucket name or specify the + // access point ARN. When using the access point ARN, you must direct requests + // to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. // When using this action with an access point through the Amazon Web Services // SDKs, you provide the access point ARN in place of the bucket name. For more // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) // in the Amazon S3 User Guide. // - // When you use this action with Amazon S3 on Outposts, you must direct requests - // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form - // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When - // you use this action with S3 on Outposts through the Amazon Web Services SDKs, - // you provide the Outposts access point ARN in place of the bucket name. For - // more information about S3 on Outposts ARNs, see What is S3 on Outposts? (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // S3 on Outposts - When you use this action with Amazon S3 on Outposts, you + // must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname + // takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. + // When you use this action with S3 on Outposts through the Amazon Web Services + // SDKs, you provide the Outposts access point ARN in place of the bucket name. + // For more information about S3 on Outposts ARNs, see What is S3 on Outposts? + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) // in the Amazon S3 User Guide. // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // Indicates the algorithm used to create the checksum for the object when using - // the SDK. This header will not provide any additional functionality if not - // using the SDK. When sending this header, there must be a corresponding x-amz-checksum - // or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with - // the HTTP status code 400 Bad Request. For more information, see Checking - // object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // Indicates the algorithm used to create the checksum for the object when you + // use the SDK. This header will not provide any additional functionality if + // you don't use the SDK. When you send this header, there must be a corresponding + // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the + // request with the HTTP status code 400 Bad Request. For more information, + // see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) // in the Amazon S3 User Guide. // // If you provide an individual checksum, Amazon S3 ignores any provided ChecksumAlgorithm @@ -37318,9 +40256,9 @@ type RestoreObjectInput struct { // must be populated with the algorithm's checksum of the request payload. ChecksumAlgorithm *string `location:"header" locationName:"x-amz-sdk-checksum-algorithm" type:"string" enum:"ChecksumAlgorithm"` - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` // Object key for which the action was initiated. @@ -37329,10 +40267,14 @@ type RestoreObjectInput struct { Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` // Confirms that the requester knows that they will be charged for the request. - // Bucket owners need not specify this parameter in their requests. For information - // about downloading objects from Requester Pays buckets, see Downloading Objects + // Bucket owners need not specify this parameter in their requests. If either + // the source or destination S3 bucket has Requester Pays enabled, the requester + // will pay for corresponding charges to copy the object. For information about + // downloading objects from Requester Pays buckets, see Downloading Objects // in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) // in the Amazon S3 User Guide. + // + // This functionality is not supported for directory buckets. RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` // Container for restore job parameters. @@ -37468,6 +40410,8 @@ type RestoreObjectOutput struct { // If present, indicates that the requester was successfully charged for the // request. + // + // This functionality is not supported for directory buckets. RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` // Indicates the path in the provided S3 output location where Select results @@ -37626,6 +40570,9 @@ func (s *RestoreRequest) SetType(v string) *RestoreRequest { // about these storage classes and how to work with archived objects, see Working // with archived objects (https://docs.aws.amazon.com/AmazonS3/latest/userguide/archived-objects.html) // in the Amazon S3 User Guide. +// +// This functionality is not supported for directory buckets. Only the S3 Express +// One Zone storage class is supported by directory buckets to store objects. type RestoreStatus struct { _ struct{} `type:"structure"` @@ -38193,9 +41140,9 @@ type SelectObjectContentInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` // The expression that is used to query the object. @@ -38574,7 +41521,15 @@ type ServerSideEncryptionByDefault struct { // KMS key ID to use for the default encryption. This parameter is allowed if // and only if SSEAlgorithm is set to aws:kms. // - // You can specify the key ID or the Amazon Resource Name (ARN) of the KMS key. + // You can specify the key ID, key alias, or the Amazon Resource Name (ARN) + // of the KMS key. + // + // * Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab + // + // * Key ARN: arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab + // + // * Key Alias: alias/alias-name + // // If you use a key ID, you can run into a LogDestination undeliverable error // when creating a VPC flow log. // @@ -38582,10 +41537,6 @@ type ServerSideEncryptionByDefault struct { // operations you must use a fully qualified KMS key ARN. For more information, // see Using encryption for cross-account operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html#bucket-encryption-update-bucket-policy). // - // * Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab - // - // * Key ARN: arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab - // // Amazon S3 only supports symmetric encryption KMS keys. For more information, // see Asymmetric keys in Amazon Web Services KMS (https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html) // in the Amazon Web Services Key Management Service Developer Guide. @@ -38766,6 +41717,118 @@ func (s *ServerSideEncryptionRule) SetBucketKeyEnabled(v bool) *ServerSideEncryp return s } +// The established temporary security credentials of the session. +// +// Directory buckets - These session credentials are only supported for the +// authentication and authorization of Zonal endpoint APIs on directory buckets. +type SessionCredentials struct { + _ struct{} `type:"structure"` + + // A unique identifier that's associated with a secret access key. The access + // key ID and the secret access key are used together to sign programmatic Amazon + // Web Services requests cryptographically. + // + // AccessKeyId is a required field + AccessKeyId *string `locationName:"AccessKeyId" type:"string" required:"true"` + + // Temporary security credentials expire after a specified interval. After temporary + // credentials expire, any calls that you make with those credentials will fail. + // So you must generate a new set of temporary credentials. Temporary credentials + // cannot be extended or refreshed beyond the original specified interval. + // + // Expiration is a required field + Expiration *time.Time `locationName:"Expiration" type:"timestamp" required:"true"` + + // A key that's used with the access key ID to cryptographically sign programmatic + // Amazon Web Services requests. Signing a request identifies the sender and + // prevents the request from being altered. + // + // SecretAccessKey is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by SessionCredentials's + // String and GoString methods. + // + // SecretAccessKey is a required field + SecretAccessKey *string `locationName:"SecretAccessKey" type:"string" required:"true" sensitive:"true"` + + // A part of the temporary security credentials. The session token is used to + // validate the temporary security credentials. + // + // SessionToken is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by SessionCredentials's + // String and GoString methods. + // + // SessionToken is a required field + SessionToken *string `locationName:"SessionToken" type:"string" required:"true" sensitive:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s SessionCredentials) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s SessionCredentials) GoString() string { + return s.String() +} + +// SetAccessKeyId sets the AccessKeyId field's value. +func (s *SessionCredentials) SetAccessKeyId(v string) *SessionCredentials { + s.AccessKeyId = &v + return s +} + +// SetExpiration sets the Expiration field's value. +func (s *SessionCredentials) SetExpiration(v time.Time) *SessionCredentials { + s.Expiration = &v + return s +} + +// SetSecretAccessKey sets the SecretAccessKey field's value. +func (s *SessionCredentials) SetSecretAccessKey(v string) *SessionCredentials { + s.SecretAccessKey = &v + return s +} + +// SetSessionToken sets the SessionToken field's value. +func (s *SessionCredentials) SetSessionToken(v string) *SessionCredentials { + s.SessionToken = &v + return s +} + +// To use simple format for S3 keys for log objects, set SimplePrefix to an +// empty object. +// +// [DestinationPrefix][YYYY]-[MM]-[DD]-[hh]-[mm]-[ss]-[UniqueString] +type SimplePrefix struct { + _ struct{} `locationName:"SimplePrefix" type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s SimplePrefix) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s SimplePrefix) GoString() string { + return s.String() +} + // A container that describes additional filters for identifying the source // objects that you want to replicate. You can choose to enable or disable the // replication of these objects. Currently, Amazon S3 supports only the filter @@ -39302,6 +42365,49 @@ func (s *TargetGrant) SetPermission(v string) *TargetGrant { return s } +// Amazon S3 key format for log objects. Only one format, PartitionedPrefix +// or SimplePrefix, is allowed. +type TargetObjectKeyFormat struct { + _ struct{} `type:"structure"` + + // Partitioned S3 key for log objects. + PartitionedPrefix *PartitionedPrefix `locationName:"PartitionedPrefix" type:"structure"` + + // To use the simple format for S3 keys for log objects. To specify SimplePrefix + // format, set SimplePrefix to {}. + SimplePrefix *SimplePrefix `locationName:"SimplePrefix" type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s TargetObjectKeyFormat) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s TargetObjectKeyFormat) GoString() string { + return s.String() +} + +// SetPartitionedPrefix sets the PartitionedPrefix field's value. +func (s *TargetObjectKeyFormat) SetPartitionedPrefix(v *PartitionedPrefix) *TargetObjectKeyFormat { + s.PartitionedPrefix = v + return s +} + +// SetSimplePrefix sets the SimplePrefix field's value. +func (s *TargetObjectKeyFormat) SetSimplePrefix(v *SimplePrefix) *TargetObjectKeyFormat { + s.SimplePrefix = v + return s +} + // The S3 Intelligent-Tiering storage class is designed to optimize storage // costs by automatically moving data to the most cost-effective storage access // tier, without additional operational overhead. @@ -39586,19 +42692,33 @@ type UploadPartCopyInput struct { // The bucket name. // - // When using this action with an access point, you must direct requests to - // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // Directory buckets - When you use this operation with a directory bucket, + // you must use virtual-hosted-style requests in the format Bucket_name.s3express-az_id.region.amazonaws.com. + // Path-style requests are not supported. Directory bucket names must be unique + // in the chosen Availability Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3 + // (for example, DOC-EXAMPLE-BUCKET--usw2-az2--x-s3). For information about + // bucket naming restrictions, see Directory bucket naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) + // in the Amazon S3 User Guide. + // + // Access points - When you use this action with an access point, you must provide + // the alias of the access point in place of the bucket name or specify the + // access point ARN. When using the access point ARN, you must direct requests + // to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. // When using this action with an access point through the Amazon Web Services // SDKs, you provide the access point ARN in place of the bucket name. For more // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) // in the Amazon S3 User Guide. // - // When you use this action with Amazon S3 on Outposts, you must direct requests - // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form - // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When - // you use this action with S3 on Outposts through the Amazon Web Services SDKs, - // you provide the Outposts access point ARN in place of the bucket name. For - // more information about S3 on Outposts ARNs, see What is S3 on Outposts? (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // Access points and Object Lambda access points are not supported by directory + // buckets. + // + // S3 on Outposts - When you use this action with Amazon S3 on Outposts, you + // must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname + // takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. + // When you use this action with S3 on Outposts through the Amazon Web Services + // SDKs, you provide the Outposts access point ARN in place of the bucket name. + // For more information about S3 on Outposts ARNs, see What is S3 on Outposts? + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) // in the Amazon S3 User Guide. // // Bucket is a required field @@ -39620,34 +42740,81 @@ type UploadPartCopyInput struct { // my-access-point owned by account 123456789012 in Region us-west-2, use // the URL encoding of arn:aws:s3:us-west-2:123456789012:accesspoint/my-access-point/object/reports/january.pdf. // The value must be URL encoded. Amazon S3 supports copy operations using - // access points only when the source and destination buckets are in the - // same Amazon Web Services Region. Alternatively, for objects accessed through - // Amazon S3 on Outposts, specify the ARN of the object as accessed in the - // format arn:aws:s3-outposts:::outpost//object/. + // Access points only when the source and destination buckets are in the + // same Amazon Web Services Region. Access points are not supported by directory + // buckets. Alternatively, for objects accessed through Amazon S3 on Outposts, + // specify the ARN of the object as accessed in the format arn:aws:s3-outposts:::outpost//object/. // For example, to copy the object reports/january.pdf through outpost my-outpost // owned by account 123456789012 in Region us-west-2, use the URL encoding // of arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/object/reports/january.pdf. // The value must be URL-encoded. // - // To copy a specific version of an object, append ?versionId= to - // the value (for example, awsexamplebucket/reports/january.pdf?versionId=QUpfdndhfd8438MNFDN93jdnJFkdmqnh893). - // If you don't specify a version ID, Amazon S3 copies the latest version of - // the source object. + // If your bucket has versioning enabled, you could have multiple versions of + // the same object. By default, x-amz-copy-source identifies the current version + // of the source object to copy. To copy a specific version of the source object + // to copy, append ?versionId= to the x-amz-copy-source request + // header (for example, x-amz-copy-source: /awsexamplebucket/reports/january.pdf?versionId=QUpfdndhfd8438MNFDN93jdnJFkdmqnh893). + // + // If the current version is a delete marker and you don't specify a versionId + // in the x-amz-copy-source request header, Amazon S3 returns a 404 Not Found + // error, because the object does not exist. If you specify versionId in the + // x-amz-copy-source and the versionId is a delete marker, Amazon S3 returns + // an HTTP 400 Bad Request error, because you are not allowed to specify a delete + // marker as a version for the x-amz-copy-source. + // + // Directory buckets - S3 Versioning isn't enabled and supported for directory + // buckets. // // CopySource is a required field CopySource *string `location:"header" locationName:"x-amz-copy-source" type:"string" required:"true"` // Copies the object if its entity tag (ETag) matches the specified tag. + // + // If both of the x-amz-copy-source-if-match and x-amz-copy-source-if-unmodified-since + // headers are present in the request as follows: + // + // x-amz-copy-source-if-match condition evaluates to true, and; + // + // x-amz-copy-source-if-unmodified-since condition evaluates to false; + // + // Amazon S3 returns 200 OK and copies the data. CopySourceIfMatch *string `location:"header" locationName:"x-amz-copy-source-if-match" type:"string"` // Copies the object if it has been modified since the specified time. + // + // If both of the x-amz-copy-source-if-none-match and x-amz-copy-source-if-modified-since + // headers are present in the request as follows: + // + // x-amz-copy-source-if-none-match condition evaluates to false, and; + // + // x-amz-copy-source-if-modified-since condition evaluates to true; + // + // Amazon S3 returns 412 Precondition Failed response code. CopySourceIfModifiedSince *time.Time `location:"header" locationName:"x-amz-copy-source-if-modified-since" type:"timestamp"` // Copies the object if its entity tag (ETag) is different than the specified // ETag. + // + // If both of the x-amz-copy-source-if-none-match and x-amz-copy-source-if-modified-since + // headers are present in the request as follows: + // + // x-amz-copy-source-if-none-match condition evaluates to false, and; + // + // x-amz-copy-source-if-modified-since condition evaluates to true; + // + // Amazon S3 returns 412 Precondition Failed response code. CopySourceIfNoneMatch *string `location:"header" locationName:"x-amz-copy-source-if-none-match" type:"string"` // Copies the object if it hasn't been modified since the specified time. + // + // If both of the x-amz-copy-source-if-match and x-amz-copy-source-if-unmodified-since + // headers are present in the request as follows: + // + // x-amz-copy-source-if-match condition evaluates to true, and; + // + // x-amz-copy-source-if-unmodified-since condition evaluates to false; + // + // Amazon S3 returns 200 OK and copies the data. CopySourceIfUnmodifiedSince *time.Time `location:"header" locationName:"x-amz-copy-source-if-unmodified-since" type:"timestamp"` // The range of bytes to copy from the source object. The range value must use @@ -39659,12 +42826,18 @@ type UploadPartCopyInput struct { // Specifies the algorithm to use when decrypting the source object (for example, // AES256). + // + // This functionality is not supported when the source object is in a directory + // bucket. CopySourceSSECustomerAlgorithm *string `location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-algorithm" type:"string"` // Specifies the customer-provided encryption key for Amazon S3 to use to decrypt // the source object. The encryption key provided in this header must be one // that was used when the source object was created. // + // This functionality is not supported when the source object is in a directory + // bucket. + // // CopySourceSSECustomerKey is a sensitive parameter and its value will be // replaced with "sensitive" in string returned by UploadPartCopyInput's // String and GoString methods. @@ -39673,16 +42846,19 @@ type UploadPartCopyInput struct { // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. // Amazon S3 uses this header for a message integrity check to ensure that the // encryption key was transmitted without error. + // + // This functionality is not supported when the source object is in a directory + // bucket. CopySourceSSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-key-MD5" type:"string"` - // The account ID of the expected destination bucket owner. If the destination - // bucket is owned by a different account, the request fails with the HTTP status - // code 403 Forbidden (access denied). + // The account ID of the expected destination bucket owner. If the account ID + // that you provide does not match the actual owner of the destination bucket, + // the request fails with the HTTP status code 403 Forbidden (access denied). ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` - // The account ID of the expected source bucket owner. If the source bucket - // is owned by a different account, the request fails with the HTTP status code - // 403 Forbidden (access denied). + // The account ID of the expected source bucket owner. If the account ID that + // you provide does not match the actual owner of the source bucket, the request + // fails with the HTTP status code 403 Forbidden (access denied). ExpectedSourceBucketOwner *string `location:"header" locationName:"x-amz-source-expected-bucket-owner" type:"string"` // Object key for which the multipart upload was initiated. @@ -39697,14 +42873,20 @@ type UploadPartCopyInput struct { PartNumber *int64 `location:"querystring" locationName:"partNumber" type:"integer" required:"true"` // Confirms that the requester knows that they will be charged for the request. - // Bucket owners need not specify this parameter in their requests. For information - // about downloading objects from Requester Pays buckets, see Downloading Objects + // Bucket owners need not specify this parameter in their requests. If either + // the source or destination S3 bucket has Requester Pays enabled, the requester + // will pay for corresponding charges to copy the object. For information about + // downloading objects from Requester Pays buckets, see Downloading Objects // in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) // in the Amazon S3 User Guide. + // + // This functionality is not supported for directory buckets. RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` - // Specifies the algorithm to use to when encrypting the object (for example, - // AES256). + // Specifies the algorithm to use when encrypting the object (for example, AES256). + // + // This functionality is not supported when the destination bucket is a directory + // bucket. SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting @@ -39714,6 +42896,9 @@ type UploadPartCopyInput struct { // header. This must be the same encryption key specified in the initiate multipart // upload request. // + // This functionality is not supported when the destination bucket is a directory + // bucket. + // // SSECustomerKey is a sensitive parameter and its value will be // replaced with "sensitive" in string returned by UploadPartCopyInput's // String and GoString methods. @@ -39722,6 +42907,9 @@ type UploadPartCopyInput struct { // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. // Amazon S3 uses this header for a message integrity check to ensure that the // encryption key was transmitted without error. + // + // This functionality is not supported when the destination bucket is a directory + // bucket. SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` // Upload ID identifying the multipart upload whose part is being copied. @@ -39946,6 +43134,8 @@ type UploadPartCopyOutput struct { // Indicates whether the multipart upload uses an S3 Bucket Key for server-side // encryption with Key Management Service (KMS) keys (SSE-KMS). + // + // This functionality is not supported for directory buckets. BucketKeyEnabled *bool `location:"header" locationName:"x-amz-server-side-encryption-bucket-key-enabled" type:"boolean"` // Container for all response elements. @@ -39953,32 +43143,46 @@ type UploadPartCopyOutput struct { // The version of the source object that was copied, if you have enabled versioning // on the source bucket. + // + // This functionality is not supported when the source object is in a directory + // bucket. CopySourceVersionId *string `location:"header" locationName:"x-amz-copy-source-version-id" type:"string"` // If present, indicates that the requester was successfully charged for the // request. + // + // This functionality is not supported for directory buckets. RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` // If server-side encryption with a customer-provided encryption key was requested, - // the response will include this header confirming the encryption algorithm - // used. + // the response will include this header to confirm the encryption algorithm + // that's used. + // + // This functionality is not supported for directory buckets. SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` // If server-side encryption with a customer-provided encryption key was requested, - // the response will include this header to provide round-trip message integrity + // the response will include this header to provide the round-trip message integrity // verification of the customer-provided encryption key. + // + // This functionality is not supported for directory buckets. SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` - // If present, specifies the ID of the Key Management Service (KMS) symmetric + // If present, indicates the ID of the Key Management Service (KMS) symmetric // encryption customer managed key that was used for the object. // + // This functionality is not supported for directory buckets. + // // SSEKMSKeyId is a sensitive parameter and its value will be // replaced with "sensitive" in string returned by UploadPartCopyOutput's // String and GoString methods. SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` - // The server-side encryption algorithm used when storing this object in Amazon + // The server-side encryption algorithm used when you store this object in Amazon // S3 (for example, AES256, aws:kms). + // + // For directory buckets, only server-side encryption with Amazon S3 managed + // keys (SSE-S3) (AES256) is supported. ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` } @@ -40056,30 +43260,44 @@ type UploadPartInput struct { // The name of the bucket to which the multipart upload was initiated. // - // When using this action with an access point, you must direct requests to - // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // Directory buckets - When you use this operation with a directory bucket, + // you must use virtual-hosted-style requests in the format Bucket_name.s3express-az_id.region.amazonaws.com. + // Path-style requests are not supported. Directory bucket names must be unique + // in the chosen Availability Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3 + // (for example, DOC-EXAMPLE-BUCKET--usw2-az2--x-s3). For information about + // bucket naming restrictions, see Directory bucket naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) + // in the Amazon S3 User Guide. + // + // Access points - When you use this action with an access point, you must provide + // the alias of the access point in place of the bucket name or specify the + // access point ARN. When using the access point ARN, you must direct requests + // to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. // When using this action with an access point through the Amazon Web Services // SDKs, you provide the access point ARN in place of the bucket name. For more // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) // in the Amazon S3 User Guide. // - // When you use this action with Amazon S3 on Outposts, you must direct requests - // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form - // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When - // you use this action with S3 on Outposts through the Amazon Web Services SDKs, - // you provide the Outposts access point ARN in place of the bucket name. For - // more information about S3 on Outposts ARNs, see What is S3 on Outposts? (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // Access points and Object Lambda access points are not supported by directory + // buckets. + // + // S3 on Outposts - When you use this action with Amazon S3 on Outposts, you + // must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname + // takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. + // When you use this action with S3 on Outposts through the Amazon Web Services + // SDKs, you provide the Outposts access point ARN in place of the bucket name. + // For more information about S3 on Outposts ARNs, see What is S3 on Outposts? + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) // in the Amazon S3 User Guide. // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // Indicates the algorithm used to create the checksum for the object when using - // the SDK. This header will not provide any additional functionality if not - // using the SDK. When sending this header, there must be a corresponding x-amz-checksum - // or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with - // the HTTP status code 400 Bad Request. For more information, see Checking - // object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // Indicates the algorithm used to create the checksum for the object when you + // use the SDK. This header will not provide any additional functionality if + // you don't use the SDK. When you send this header, there must be a corresponding + // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the + // request with the HTTP status code 400 Bad Request. For more information, + // see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) // in the Amazon S3 User Guide. // // If you provide an individual checksum, Amazon S3 ignores any provided ChecksumAlgorithm @@ -40129,11 +43347,13 @@ type UploadPartInput struct { // The base64-encoded 128-bit MD5 digest of the part data. This parameter is // auto-populated when using the command from the CLI. This parameter is required // if object lock parameters are specified. + // + // This functionality is not supported for directory buckets. ContentMD5 *string `location:"header" locationName:"Content-MD5" type:"string"` - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` // Object key for which the multipart upload was initiated. @@ -40148,14 +43368,19 @@ type UploadPartInput struct { PartNumber *int64 `location:"querystring" locationName:"partNumber" type:"integer" required:"true"` // Confirms that the requester knows that they will be charged for the request. - // Bucket owners need not specify this parameter in their requests. For information - // about downloading objects from Requester Pays buckets, see Downloading Objects + // Bucket owners need not specify this parameter in their requests. If either + // the source or destination S3 bucket has Requester Pays enabled, the requester + // will pay for corresponding charges to copy the object. For information about + // downloading objects from Requester Pays buckets, see Downloading Objects // in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) // in the Amazon S3 User Guide. + // + // This functionality is not supported for directory buckets. RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` - // Specifies the algorithm to use to when encrypting the object (for example, - // AES256). + // Specifies the algorithm to use when encrypting the object (for example, AES256). + // + // This functionality is not supported for directory buckets. SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting @@ -40165,6 +43390,8 @@ type UploadPartInput struct { // header. This must be the same encryption key specified in the initiate multipart // upload request. // + // This functionality is not supported for directory buckets. + // // SSECustomerKey is a sensitive parameter and its value will be // replaced with "sensitive" in string returned by UploadPartInput's // String and GoString methods. @@ -40173,6 +43400,8 @@ type UploadPartInput struct { // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. // Amazon S3 uses this header for a message integrity check to ensure that the // encryption key was transmitted without error. + // + // This functionality is not supported for directory buckets. SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` // Upload ID identifying the multipart upload whose part is being uploaded. @@ -40375,37 +43604,47 @@ type UploadPartOutput struct { // Indicates whether the multipart upload uses an S3 Bucket Key for server-side // encryption with Key Management Service (KMS) keys (SSE-KMS). + // + // This functionality is not supported for directory buckets. BucketKeyEnabled *bool `location:"header" locationName:"x-amz-server-side-encryption-bucket-key-enabled" type:"boolean"` // The base64-encoded, 32-bit CRC32 checksum of the object. This will only be - // present if it was uploaded with the object. With multipart uploads, this - // may not be a checksum value of the object. For more information about how - // checksums are calculated with multipart uploads, see Checking object integrity - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // present if it was uploaded with the object. When you use an API operation + // on an object that was uploaded using multipart uploads, this value may not + // be a direct checksum value of the full object. Instead, it's a calculation + // based on the checksum values of each individual part. For more information + // about how checksums are calculated with multipart uploads, see Checking object + // integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) // in the Amazon S3 User Guide. ChecksumCRC32 *string `location:"header" locationName:"x-amz-checksum-crc32" type:"string"` // The base64-encoded, 32-bit CRC32C checksum of the object. This will only - // be present if it was uploaded with the object. With multipart uploads, this - // may not be a checksum value of the object. For more information about how - // checksums are calculated with multipart uploads, see Checking object integrity - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // be present if it was uploaded with the object. When you use an API operation + // on an object that was uploaded using multipart uploads, this value may not + // be a direct checksum value of the full object. Instead, it's a calculation + // based on the checksum values of each individual part. For more information + // about how checksums are calculated with multipart uploads, see Checking object + // integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) // in the Amazon S3 User Guide. ChecksumCRC32C *string `location:"header" locationName:"x-amz-checksum-crc32c" type:"string"` // The base64-encoded, 160-bit SHA-1 digest of the object. This will only be - // present if it was uploaded with the object. With multipart uploads, this - // may not be a checksum value of the object. For more information about how - // checksums are calculated with multipart uploads, see Checking object integrity - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // present if it was uploaded with the object. When you use the API operation + // on an object that was uploaded using multipart uploads, this value may not + // be a direct checksum value of the full object. Instead, it's a calculation + // based on the checksum values of each individual part. For more information + // about how checksums are calculated with multipart uploads, see Checking object + // integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) // in the Amazon S3 User Guide. ChecksumSHA1 *string `location:"header" locationName:"x-amz-checksum-sha1" type:"string"` // The base64-encoded, 256-bit SHA-256 digest of the object. This will only - // be present if it was uploaded with the object. With multipart uploads, this - // may not be a checksum value of the object. For more information about how - // checksums are calculated with multipart uploads, see Checking object integrity - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // be present if it was uploaded with the object. When you use an API operation + // on an object that was uploaded using multipart uploads, this value may not + // be a direct checksum value of the full object. Instead, it's a calculation + // based on the checksum values of each individual part. For more information + // about how checksums are calculated with multipart uploads, see Checking object + // integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) // in the Amazon S3 User Guide. ChecksumSHA256 *string `location:"header" locationName:"x-amz-checksum-sha256" type:"string"` @@ -40414,28 +43653,39 @@ type UploadPartOutput struct { // If present, indicates that the requester was successfully charged for the // request. + // + // This functionality is not supported for directory buckets. RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` // If server-side encryption with a customer-provided encryption key was requested, - // the response will include this header confirming the encryption algorithm - // used. + // the response will include this header to confirm the encryption algorithm + // that's used. + // + // This functionality is not supported for directory buckets. SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` // If server-side encryption with a customer-provided encryption key was requested, - // the response will include this header to provide round-trip message integrity + // the response will include this header to provide the round-trip message integrity // verification of the customer-provided encryption key. + // + // This functionality is not supported for directory buckets. SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` - // If present, specifies the ID of the Key Management Service (KMS) symmetric - // encryption customer managed key was used for the object. + // If present, indicates the ID of the Key Management Service (KMS) symmetric + // encryption customer managed key that was used for the object. + // + // This functionality is not supported for directory buckets. // // SSEKMSKeyId is a sensitive parameter and its value will be // replaced with "sensitive" in string returned by UploadPartOutput's // String and GoString methods. SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` - // The server-side encryption algorithm used when storing this object in Amazon + // The server-side encryption algorithm used when you store this object in Amazon // S3 (for example, AES256, aws:kms). + // + // For directory buckets, only server-side encryption with Amazon S3 managed + // keys (SSE-S3) (AES256) is supported. ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` } @@ -40819,6 +44069,8 @@ type WriteGetObjectResponseInput struct { // If present, indicates that the requester was successfully charged for the // request. + // + // This functionality is not supported for directory buckets. RequestCharged *string `location:"header" locationName:"x-amz-fwd-header-x-amz-request-charged" type:"string" enum:"RequestCharged"` // Route prefix to the HTTP URL generated. @@ -40845,9 +44097,9 @@ type WriteGetObjectResponseInput struct { // server-side encryption with customer-provided encryption keys (SSE-C) (https://docs.aws.amazon.com/AmazonS3/latest/userguide/ServerSideEncryptionCustomerKeys.html). SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-fwd-header-x-amz-server-side-encryption-customer-key-MD5" type:"string"` - // If present, specifies the ID of the Amazon Web Services Key Management Service - // (Amazon Web Services KMS) symmetric encryption customer managed key that - // was used for stored in Amazon S3 object. + // If present, specifies the ID (Key ID, Key ARN, or Key Alias) of the Amazon + // Web Services Key Management Service (Amazon Web Services KMS) symmetric encryption + // customer managed key that was used for stored in Amazon S3 object. // // SSEKMSKeyId is a sensitive parameter and its value will be // replaced with "sensitive" in string returned by WriteGetObjectResponseInput's @@ -41295,6 +44547,9 @@ const ( // BucketLocationConstraintApSouth1 is a BucketLocationConstraint enum value BucketLocationConstraintApSouth1 = "ap-south-1" + // BucketLocationConstraintApSouth2 is a BucketLocationConstraint enum value + BucketLocationConstraintApSouth2 = "ap-south-2" + // BucketLocationConstraintApSoutheast1 is a BucketLocationConstraint enum value BucketLocationConstraintApSoutheast1 = "ap-southeast-1" @@ -41325,6 +44580,9 @@ const ( // BucketLocationConstraintEuSouth1 is a BucketLocationConstraint enum value BucketLocationConstraintEuSouth1 = "eu-south-1" + // BucketLocationConstraintEuSouth2 is a BucketLocationConstraint enum value + BucketLocationConstraintEuSouth2 = "eu-south-2" + // BucketLocationConstraintEuWest1 is a BucketLocationConstraint enum value BucketLocationConstraintEuWest1 = "eu-west-1" @@ -41354,12 +44612,6 @@ const ( // BucketLocationConstraintUsWest2 is a BucketLocationConstraint enum value BucketLocationConstraintUsWest2 = "us-west-2" - - // BucketLocationConstraintApSouth2 is a BucketLocationConstraint enum value - BucketLocationConstraintApSouth2 = "ap-south-2" - - // BucketLocationConstraintEuSouth2 is a BucketLocationConstraint enum value - BucketLocationConstraintEuSouth2 = "eu-south-2" ) // BucketLocationConstraint_Values returns all elements of the BucketLocationConstraint enum @@ -41371,6 +44623,7 @@ func BucketLocationConstraint_Values() []string { BucketLocationConstraintApNortheast2, BucketLocationConstraintApNortheast3, BucketLocationConstraintApSouth1, + BucketLocationConstraintApSouth2, BucketLocationConstraintApSoutheast1, BucketLocationConstraintApSoutheast2, BucketLocationConstraintApSoutheast3, @@ -41381,6 +44634,7 @@ func BucketLocationConstraint_Values() []string { BucketLocationConstraintEuCentral1, BucketLocationConstraintEuNorth1, BucketLocationConstraintEuSouth1, + BucketLocationConstraintEuSouth2, BucketLocationConstraintEuWest1, BucketLocationConstraintEuWest2, BucketLocationConstraintEuWest3, @@ -41391,8 +44645,6 @@ func BucketLocationConstraint_Values() []string { BucketLocationConstraintUsGovWest1, BucketLocationConstraintUsWest1, BucketLocationConstraintUsWest2, - BucketLocationConstraintApSouth2, - BucketLocationConstraintEuSouth2, } } @@ -41416,6 +44668,18 @@ func BucketLogsPermission_Values() []string { } } +const ( + // BucketTypeDirectory is a BucketType enum value + BucketTypeDirectory = "Directory" +) + +// BucketType_Values returns all elements of the BucketType enum +func BucketType_Values() []string { + return []string{ + BucketTypeDirectory, + } +} + const ( // BucketVersioningStatusEnabled is a BucketVersioningStatus enum value BucketVersioningStatusEnabled = "Enabled" @@ -41488,6 +44752,18 @@ func CompressionType_Values() []string { } } +const ( + // DataRedundancySingleAvailabilityZone is a DataRedundancy enum value + DataRedundancySingleAvailabilityZone = "SingleAvailabilityZone" +) + +// DataRedundancy_Values returns all elements of the DataRedundancy enum +func DataRedundancy_Values() []string { + return []string{ + DataRedundancySingleAvailabilityZone, + } +} + const ( // DeleteMarkerReplicationStatusEnabled is a DeleteMarkerReplicationStatus enum value DeleteMarkerReplicationStatusEnabled = "Enabled" @@ -41887,6 +45163,18 @@ func JSONType_Values() []string { } } +const ( + // LocationTypeAvailabilityZone is a LocationType enum value + LocationTypeAvailabilityZone = "AvailabilityZone" +) + +// LocationType_Values returns all elements of the LocationType enum +func LocationType_Values() []string { + return []string{ + LocationTypeAvailabilityZone, + } +} + const ( // MFADeleteEnabled is a MFADelete enum value MFADeleteEnabled = "Enabled" @@ -42087,8 +45375,19 @@ func ObjectLockRetentionMode_Values() []string { // BucketOwnerEnforced - Access control lists (ACLs) are disabled and no longer // affect permissions. The bucket owner automatically owns and has full control // over every object in the bucket. The bucket only accepts PUT requests that -// don't specify an ACL or bucket owner full control ACLs, such as the bucket-owner-full-control -// canned ACL or an equivalent form of this ACL expressed in the XML format. +// don't specify an ACL or specify bucket owner full control ACLs (such as the +// predefined bucket-owner-full-control canned ACL or a custom ACL in XML format +// that grants the same permissions). +// +// By default, ObjectOwnership is set to BucketOwnerEnforced and ACLs are disabled. +// We recommend keeping ACLs disabled, except in uncommon use cases where you +// must control access for each object individually. For more information about +// S3 Object Ownership, see Controlling ownership of objects and disabling ACLs +// for your bucket (https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html) +// in the Amazon S3 User Guide. +// +// This functionality is not supported for directory buckets. Directory buckets +// use the bucket owner enforced setting for S3 Object Ownership. const ( // ObjectOwnershipBucketOwnerPreferred is a ObjectOwnership enum value ObjectOwnershipBucketOwnerPreferred = "BucketOwnerPreferred" @@ -42139,6 +45438,9 @@ const ( // ObjectStorageClassSnow is a ObjectStorageClass enum value ObjectStorageClassSnow = "SNOW" + + // ObjectStorageClassExpressOnezone is a ObjectStorageClass enum value + ObjectStorageClassExpressOnezone = "EXPRESS_ONEZONE" ) // ObjectStorageClass_Values returns all elements of the ObjectStorageClass enum @@ -42154,6 +45456,7 @@ func ObjectStorageClass_Values() []string { ObjectStorageClassOutposts, ObjectStorageClassGlacierIr, ObjectStorageClassSnow, + ObjectStorageClassExpressOnezone, } } @@ -42193,6 +45496,22 @@ func OwnerOverride_Values() []string { } } +const ( + // PartitionDateSourceEventTime is a PartitionDateSource enum value + PartitionDateSourceEventTime = "EventTime" + + // PartitionDateSourceDeliveryTime is a PartitionDateSource enum value + PartitionDateSourceDeliveryTime = "DeliveryTime" +) + +// PartitionDateSource_Values returns all elements of the PartitionDateSource enum +func PartitionDateSource_Values() []string { + return []string{ + PartitionDateSourceEventTime, + PartitionDateSourceDeliveryTime, + } +} + const ( // PayerRequester is a Payer enum value PayerRequester = "Requester" @@ -42313,6 +45632,9 @@ const ( // ReplicationStatusReplica is a ReplicationStatus enum value ReplicationStatusReplica = "REPLICA" + + // ReplicationStatusCompleted is a ReplicationStatus enum value + ReplicationStatusCompleted = "COMPLETED" ) // ReplicationStatus_Values returns all elements of the ReplicationStatus enum @@ -42322,6 +45644,7 @@ func ReplicationStatus_Values() []string { ReplicationStatusPending, ReplicationStatusFailed, ReplicationStatusReplica, + ReplicationStatusCompleted, } } @@ -42343,6 +45666,8 @@ func ReplicationTimeStatus_Values() []string { // If present, indicates that the requester was successfully charged for the // request. +// +// This functionality is not supported for directory buckets. const ( // RequestChargedRequester is a RequestCharged enum value RequestChargedRequester = "requester" @@ -42356,10 +45681,14 @@ func RequestCharged_Values() []string { } // Confirms that the requester knows that they will be charged for the request. -// Bucket owners need not specify this parameter in their requests. For information -// about downloading objects from Requester Pays buckets, see Downloading Objects +// Bucket owners need not specify this parameter in their requests. If either +// the source or destination S3 bucket has Requester Pays enabled, the requester +// will pay for corresponding charges to copy the object. For information about +// downloading objects from Requester Pays buckets, see Downloading Objects // in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) // in the Amazon S3 User Guide. +// +// This functionality is not supported for directory buckets. const ( // RequestPayerRequester is a RequestPayer enum value RequestPayerRequester = "requester" @@ -42404,6 +45733,22 @@ func ServerSideEncryption_Values() []string { } } +const ( + // SessionModeReadOnly is a SessionMode enum value + SessionModeReadOnly = "ReadOnly" + + // SessionModeReadWrite is a SessionMode enum value + SessionModeReadWrite = "ReadWrite" +) + +// SessionMode_Values returns all elements of the SessionMode enum +func SessionMode_Values() []string { + return []string{ + SessionModeReadOnly, + SessionModeReadWrite, + } +} + const ( // SseKmsEncryptedObjectsStatusEnabled is a SseKmsEncryptedObjectsStatus enum value SseKmsEncryptedObjectsStatusEnabled = "Enabled" @@ -42450,6 +45795,9 @@ const ( // StorageClassSnow is a StorageClass enum value StorageClassSnow = "SNOW" + + // StorageClassExpressOnezone is a StorageClass enum value + StorageClassExpressOnezone = "EXPRESS_ONEZONE" ) // StorageClass_Values returns all elements of the StorageClass enum @@ -42465,6 +45813,7 @@ func StorageClass_Values() []string { StorageClassOutposts, StorageClassGlacierIr, StorageClassSnow, + StorageClassExpressOnezone, } } diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/errors.go b/vendor/github.com/aws/aws-sdk-go/service/s3/errors.go index cd6a2e8ae4..8a67333ab2 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/errors.go +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/errors.go @@ -25,6 +25,15 @@ const ( // "InvalidObjectState". // // Object is archived and inaccessible until restored. + // + // If the object you are retrieving is stored in the S3 Glacier Flexible Retrieval + // storage class, the S3 Glacier Deep Archive storage class, the S3 Intelligent-Tiering + // Archive Access tier, or the S3 Intelligent-Tiering Deep Archive Access tier, + // before you can retrieve the object you must first restore a copy using RestoreObject + // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_RestoreObject.html). + // Otherwise, this operation returns an InvalidObjectState error. For information + // about restoring archived objects, see Restoring Archived Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/restoring-objects.html) + // in the Amazon S3 User Guide. ErrCodeInvalidObjectState = "InvalidObjectState" // ErrCodeNoSuchBucket for service response error code diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/s3iface/interface.go b/vendor/github.com/aws/aws-sdk-go/service/s3/s3iface/interface.go index 6d679a2990..d13b461703 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/s3iface/interface.go +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/s3iface/interface.go @@ -80,6 +80,10 @@ type S3API interface { CreateMultipartUploadWithContext(aws.Context, *s3.CreateMultipartUploadInput, ...request.Option) (*s3.CreateMultipartUploadOutput, error) CreateMultipartUploadRequest(*s3.CreateMultipartUploadInput) (*request.Request, *s3.CreateMultipartUploadOutput) + CreateSession(*s3.CreateSessionInput) (*s3.CreateSessionOutput, error) + CreateSessionWithContext(aws.Context, *s3.CreateSessionInput, ...request.Option) (*s3.CreateSessionOutput, error) + CreateSessionRequest(*s3.CreateSessionInput) (*request.Request, *s3.CreateSessionOutput) + DeleteBucket(*s3.DeleteBucketInput) (*s3.DeleteBucketOutput, error) DeleteBucketWithContext(aws.Context, *s3.DeleteBucketInput, ...request.Option) (*s3.DeleteBucketOutput, error) DeleteBucketRequest(*s3.DeleteBucketInput) (*request.Request, *s3.DeleteBucketOutput) @@ -300,6 +304,13 @@ type S3API interface { ListBucketsWithContext(aws.Context, *s3.ListBucketsInput, ...request.Option) (*s3.ListBucketsOutput, error) ListBucketsRequest(*s3.ListBucketsInput) (*request.Request, *s3.ListBucketsOutput) + ListDirectoryBuckets(*s3.ListDirectoryBucketsInput) (*s3.ListDirectoryBucketsOutput, error) + ListDirectoryBucketsWithContext(aws.Context, *s3.ListDirectoryBucketsInput, ...request.Option) (*s3.ListDirectoryBucketsOutput, error) + ListDirectoryBucketsRequest(*s3.ListDirectoryBucketsInput) (*request.Request, *s3.ListDirectoryBucketsOutput) + + ListDirectoryBucketsPages(*s3.ListDirectoryBucketsInput, func(*s3.ListDirectoryBucketsOutput, bool) bool) error + ListDirectoryBucketsPagesWithContext(aws.Context, *s3.ListDirectoryBucketsInput, func(*s3.ListDirectoryBucketsOutput, bool) bool, ...request.Option) error + ListMultipartUploads(*s3.ListMultipartUploadsInput) (*s3.ListMultipartUploadsOutput, error) ListMultipartUploadsWithContext(aws.Context, *s3.ListMultipartUploadsInput, ...request.Option) (*s3.ListMultipartUploadsOutput, error) ListMultipartUploadsRequest(*s3.ListMultipartUploadsInput) (*request.Request, *s3.ListMultipartUploadsOutput) diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/upload_input.go b/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/upload_input.go index 0086334985..8f9e068f75 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/upload_input.go +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/upload_input.go @@ -23,9 +23,32 @@ type UploadInput struct { _ struct{} `locationName:"PutObjectRequest" type:"structure" payload:"Body"` // The canned ACL to apply to the object. For more information, see Canned ACL - // (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL). + // (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL) + // in the Amazon S3 User Guide. + // + // When adding a new object, you can use headers to grant ACL-based permissions + // to individual Amazon Web Services accounts or to predefined groups defined + // by Amazon S3. These permissions are then added to the ACL on the object. + // By default, all objects are private. Only the owner has full access control. + // For more information, see Access Control List (ACL) Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html) + // and Managing ACLs Using the REST API (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-using-rest-api.html) + // in the Amazon S3 User Guide. + // + // If the bucket that you're uploading objects to uses the bucket owner enforced + // setting for S3 Object Ownership, ACLs are disabled and no longer affect permissions. + // Buckets that use this setting only accept PUT requests that don't specify + // an ACL or PUT requests that specify bucket owner full control ACLs, such + // as the bucket-owner-full-control canned ACL or an equivalent form of this + // ACL expressed in the XML format. PUT requests that contain other ACLs (for + // example, custom grants to certain Amazon Web Services accounts) fail and + // return a 400 error with the error code AccessControlListNotSupported. For + // more information, see Controlling ownership of objects and disabling ACLs + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html) + // in the Amazon S3 User Guide. + // + // * This functionality is not supported for directory buckets. // - // This action is not supported by Amazon S3 on Outposts. + // * This functionality is not supported for Amazon S3 on Outposts. ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"ObjectCannedACL"` // The readable body payload to send to S3. @@ -33,19 +56,33 @@ type UploadInput struct { // The bucket name to which the PUT action was initiated. // - // When using this action with an access point, you must direct requests to - // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // Directory buckets - When you use this operation with a directory bucket, + // you must use virtual-hosted-style requests in the format Bucket_name.s3express-az_id.region.amazonaws.com. + // Path-style requests are not supported. Directory bucket names must be unique + // in the chosen Availability Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3 + // (for example, DOC-EXAMPLE-BUCKET--usw2-az2--x-s3). For information about + // bucket naming restrictions, see Directory bucket naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) + // in the Amazon S3 User Guide. + // + // Access points - When you use this action with an access point, you must provide + // the alias of the access point in place of the bucket name or specify the + // access point ARN. When using the access point ARN, you must direct requests + // to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. // When using this action with an access point through the Amazon Web Services // SDKs, you provide the access point ARN in place of the bucket name. For more // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) // in the Amazon S3 User Guide. // - // When you use this action with Amazon S3 on Outposts, you must direct requests - // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form - // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When - // you use this action with S3 on Outposts through the Amazon Web Services SDKs, - // you provide the Outposts access point ARN in place of the bucket name. For - // more information about S3 on Outposts ARNs, see What is S3 on Outposts? (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // Access points and Object Lambda access points are not supported by directory + // buckets. + // + // S3 on Outposts - When you use this action with Amazon S3 on Outposts, you + // must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname + // takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. + // When you use this action with S3 on Outposts through the Amazon Web Services + // SDKs, you provide the Outposts access point ARN in place of the bucket name. + // For more information about S3 on Outposts ARNs, see What is S3 on Outposts? + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) // in the Amazon S3 User Guide. // // Bucket is a required field @@ -58,6 +95,8 @@ type UploadInput struct { // // Specifying this header with a PUT action doesn’t affect bucket-level settings // for S3 Bucket Key. + // + // This functionality is not supported for directory buckets. BucketKeyEnabled *bool `location:"header" locationName:"x-amz-server-side-encryption-bucket-key-enabled" type:"boolean"` // Can be used to specify caching behavior along the request/reply chain. For @@ -65,16 +104,33 @@ type UploadInput struct { // (http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.9). CacheControl *string `location:"header" locationName:"Cache-Control" type:"string"` - // Indicates the algorithm used to create the checksum for the object when using - // the SDK. This header will not provide any additional functionality if not - // using the SDK. When sending this header, there must be a corresponding x-amz-checksum - // or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with - // the HTTP status code 400 Bad Request. For more information, see Checking - // object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // Indicates the algorithm used to create the checksum for the object when you + // use the SDK. This header will not provide any additional functionality if + // you don't use the SDK. When you send this header, there must be a corresponding + // x-amz-checksum-algorithm or x-amz-trailer header sent. Otherwise, Amazon + // S3 fails the request with the HTTP status code 400 Bad Request. + // + // For the x-amz-checksum-algorithm header, replace algorithm with the supported + // algorithm from the following list: + // + // * CRC32 + // + // * CRC32C + // + // * SHA1 + // + // * SHA256 + // + // For more information, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) // in the Amazon S3 User Guide. // - // If you provide an individual checksum, Amazon S3 ignores any provided ChecksumAlgorithm - // parameter. + // If the individual checksum value you provide through x-amz-checksum-algorithm + // doesn't match the checksum algorithm you set through x-amz-sdk-checksum-algorithm, + // Amazon S3 ignores any provided ChecksumAlgorithm parameter and uses the checksum + // algorithm that matches the provided value in x-amz-checksum-algorithm . + // + // For directory buckets, when you use Amazon Web Services SDKs, CRC32 is the + // default checksum algorithm that's used for performance. // // The AWS SDK for Go v1 does not support automatic computing request payload // checksum. This feature is available in the AWS SDK for Go v2. If a value @@ -130,6 +186,13 @@ type UploadInput struct { // integrity check. For more information about REST request authentication, // see REST Authentication (https://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html). // + // The Content-MD5 header is required for any request to upload an object with + // a retention period configured using Amazon S3 Object Lock. For more information + // about Amazon S3 Object Lock, see Amazon S3 Object Lock Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock-overview.html) + // in the Amazon S3 User Guide. + // + // This functionality is not supported for directory buckets. + // // If the ContentMD5 is provided for a multipart upload, it will be ignored. // Objects that will be uploaded in a single part, the ContentMD5 will be used. ContentMD5 *string `location:"header" locationName:"Content-MD5" type:"string"` @@ -138,9 +201,9 @@ type UploadInput struct { // see https://www.rfc-editor.org/rfc/rfc9110.html#name-content-type (https://www.rfc-editor.org/rfc/rfc9110.html#name-content-type). ContentType *string `location:"header" locationName:"Content-Type" type:"string"` - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` // The date and time at which the object is no longer cacheable. For more information, @@ -149,22 +212,30 @@ type UploadInput struct { // Gives the grantee READ, READ_ACP, and WRITE_ACP permissions on the object. // - // This action is not supported by Amazon S3 on Outposts. + // * This functionality is not supported for directory buckets. + // + // * This functionality is not supported for Amazon S3 on Outposts. GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"` // Allows grantee to read the object data and its metadata. // - // This action is not supported by Amazon S3 on Outposts. + // * This functionality is not supported for directory buckets. + // + // * This functionality is not supported for Amazon S3 on Outposts. GrantRead *string `location:"header" locationName:"x-amz-grant-read" type:"string"` // Allows grantee to read the object ACL. // - // This action is not supported by Amazon S3 on Outposts. + // * This functionality is not supported for directory buckets. + // + // * This functionality is not supported for Amazon S3 on Outposts. GrantReadACP *string `location:"header" locationName:"x-amz-grant-read-acp" type:"string"` // Allows grantee to write the ACL for the applicable object. // - // This action is not supported by Amazon S3 on Outposts. + // * This functionality is not supported for directory buckets. + // + // * This functionality is not supported for Amazon S3 on Outposts. GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"` // Object key for which the PUT action was initiated. @@ -176,25 +247,37 @@ type UploadInput struct { Metadata map[string]*string `location:"headers" locationName:"x-amz-meta-" type:"map"` // Specifies whether a legal hold will be applied to this object. For more information - // about S3 Object Lock, see Object Lock (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html). + // about S3 Object Lock, see Object Lock (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html) + // in the Amazon S3 User Guide. + // + // This functionality is not supported for directory buckets. ObjectLockLegalHoldStatus *string `location:"header" locationName:"x-amz-object-lock-legal-hold" type:"string" enum:"ObjectLockLegalHoldStatus"` // The Object Lock mode that you want to apply to this object. + // + // This functionality is not supported for directory buckets. ObjectLockMode *string `location:"header" locationName:"x-amz-object-lock-mode" type:"string" enum:"ObjectLockMode"` // The date and time when you want this object's Object Lock to expire. Must // be formatted as a timestamp parameter. + // + // This functionality is not supported for directory buckets. ObjectLockRetainUntilDate *time.Time `location:"header" locationName:"x-amz-object-lock-retain-until-date" type:"timestamp" timestampFormat:"iso8601"` // Confirms that the requester knows that they will be charged for the request. - // Bucket owners need not specify this parameter in their requests. For information - // about downloading objects from Requester Pays buckets, see Downloading Objects + // Bucket owners need not specify this parameter in their requests. If either + // the source or destination S3 bucket has Requester Pays enabled, the requester + // will pay for corresponding charges to copy the object. For information about + // downloading objects from Requester Pays buckets, see Downloading Objects // in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) // in the Amazon S3 User Guide. + // + // This functionality is not supported for directory buckets. RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` - // Specifies the algorithm to use to when encrypting the object (for example, - // AES256). + // Specifies the algorithm to use when encrypting the object (for example, AES256). + // + // This functionality is not supported for directory buckets. SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting @@ -202,50 +285,80 @@ type UploadInput struct { // S3 does not store the encryption key. The key must be appropriate for use // with the algorithm specified in the x-amz-server-side-encryption-customer-algorithm // header. + // + // This functionality is not supported for directory buckets. SSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"` // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. // Amazon S3 uses this header for a message integrity check to ensure that the // encryption key was transmitted without error. + // + // This functionality is not supported for directory buckets. SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` // Specifies the Amazon Web Services KMS Encryption Context to use for object // encryption. The value of this header is a base64-encoded UTF-8 string holding // JSON with the encryption context key-value pairs. This value is stored as // object metadata and automatically gets passed on to Amazon Web Services KMS - // for future GetObject or CopyObject operations on this object. + // for future GetObject or CopyObject operations on this object. This value + // must be explicitly added during CopyObject operations. + // + // This functionality is not supported for directory buckets. SSEKMSEncryptionContext *string `location:"header" locationName:"x-amz-server-side-encryption-context" type:"string" sensitive:"true"` // If x-amz-server-side-encryption has a valid value of aws:kms or aws:kms:dsse, - // this header specifies the ID of the Key Management Service (KMS) symmetric - // encryption customer managed key that was used for the object. If you specify - // x-amz-server-side-encryption:aws:kms or x-amz-server-side-encryption:aws:kms:dsse, + // this header specifies the ID (Key ID, Key ARN, or Key Alias) of the Key Management + // Service (KMS) symmetric encryption customer managed key that was used for + // the object. If you specify x-amz-server-side-encryption:aws:kms or x-amz-server-side-encryption:aws:kms:dsse, // but do not providex-amz-server-side-encryption-aws-kms-key-id, Amazon S3 // uses the Amazon Web Services managed key (aws/s3) to protect the data. If // the KMS key does not exist in the same account that's issuing the command, // you must use the full ARN and not just the ID. + // + // This functionality is not supported for directory buckets. SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` - // The server-side encryption algorithm used when storing this object in Amazon - // S3 (for example, AES256, aws:kms, aws:kms:dsse). + // The server-side encryption algorithm that was used when you store this object + // in Amazon S3 (for example, AES256, aws:kms, aws:kms:dsse). + // + // General purpose buckets - You have four mutually exclusive options to protect + // data using server-side encryption in Amazon S3, depending on how you choose + // to manage the encryption keys. Specifically, the encryption key options are + // Amazon S3 managed keys (SSE-S3), Amazon Web Services KMS keys (SSE-KMS or + // DSSE-KMS), and customer-provided keys (SSE-C). Amazon S3 encrypts data with + // server-side encryption by using Amazon S3 managed keys (SSE-S3) by default. + // You can optionally tell Amazon S3 to encrypt data at rest by using server-side + // encryption with other key options. For more information, see Using Server-Side + // Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingServerSideEncryption.html) + // in the Amazon S3 User Guide. + // + // Directory buckets - For directory buckets, only the server-side encryption + // with Amazon S3 managed keys (SSE-S3) (AES256) value is supported. ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` // By default, Amazon S3 uses the STANDARD Storage Class to store newly created // objects. The STANDARD storage class provides high durability and high availability. // Depending on performance needs, you can specify a different Storage Class. - // Amazon S3 on Outposts only uses the OUTPOSTS Storage Class. For more information, - // see Storage Classes (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html) + // For more information, see Storage Classes (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html) // in the Amazon S3 User Guide. + // + // * For directory buckets, only the S3 Express One Zone storage class is + // supported to store newly created objects. + // + // * Amazon S3 on Outposts only uses the OUTPOSTS Storage Class. StorageClass *string `location:"header" locationName:"x-amz-storage-class" type:"string" enum:"StorageClass"` // The tag-set for the object. The tag-set must be encoded as URL Query parameters. // (For example, "Key1=Value1") + // + // This functionality is not supported for directory buckets. Tagging *string `location:"header" locationName:"x-amz-tagging" type:"string"` // If the bucket is configured as a website, redirects requests for this object // to another object in the same bucket or to an external URL. Amazon S3 stores // the value of this header in the object metadata. For information about object - // metadata, see Object Key and Metadata (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMetadata.html). + // metadata, see Object Key and Metadata (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMetadata.html) + // in the Amazon S3 User Guide. // // In the following example, the request header sets the redirect to an object // (anotherPage.html) in the same bucket: @@ -259,6 +372,9 @@ type UploadInput struct { // // For more information about website hosting in Amazon S3, see Hosting Websites // on Amazon S3 (https://docs.aws.amazon.com/AmazonS3/latest/dev/WebsiteHosting.html) - // and How to Configure Website Page Redirects (https://docs.aws.amazon.com/AmazonS3/latest/dev/how-to-page-redirect.html). + // and How to Configure Website Page Redirects (https://docs.aws.amazon.com/AmazonS3/latest/dev/how-to-page-redirect.html) + // in the Amazon S3 User Guide. + // + // This functionality is not supported for directory buckets. WebsiteRedirectLocation *string `location:"header" locationName:"x-amz-website-redirect-location" type:"string"` } diff --git a/vendor/github.com/aws/aws-sdk-go/service/ssooidc/api.go b/vendor/github.com/aws/aws-sdk-go/service/ssooidc/api.go index c743913c57..04f6c811b6 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/ssooidc/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/ssooidc/api.go @@ -56,9 +56,10 @@ func (c *SSOOIDC) CreateTokenRequest(input *CreateTokenInput) (req *request.Requ // CreateToken API operation for AWS SSO OIDC. // -// Creates and returns an access token for the authorized client. The access -// token issued will be used to fetch short-term credentials for the assigned -// roles in the AWS account. +// Creates and returns access and refresh tokens for clients that are authenticated +// using client secrets. The access token can be used to fetch short-term credentials +// for the assigned AWS accounts or to access application APIs using bearer +// authentication. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -133,6 +134,131 @@ func (c *SSOOIDC) CreateTokenWithContext(ctx aws.Context, input *CreateTokenInpu return out, req.Send() } +const opCreateTokenWithIAM = "CreateTokenWithIAM" + +// CreateTokenWithIAMRequest generates a "aws/request.Request" representing the +// client's request for the CreateTokenWithIAM operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateTokenWithIAM for more information on using the CreateTokenWithIAM +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the CreateTokenWithIAMRequest method. +// req, resp := client.CreateTokenWithIAMRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sso-oidc-2019-06-10/CreateTokenWithIAM +func (c *SSOOIDC) CreateTokenWithIAMRequest(input *CreateTokenWithIAMInput) (req *request.Request, output *CreateTokenWithIAMOutput) { + op := &request.Operation{ + Name: opCreateTokenWithIAM, + HTTPMethod: "POST", + HTTPPath: "/token?aws_iam=t", + } + + if input == nil { + input = &CreateTokenWithIAMInput{} + } + + output = &CreateTokenWithIAMOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreateTokenWithIAM API operation for AWS SSO OIDC. +// +// Creates and returns access and refresh tokens for clients and applications +// that are authenticated using IAM entities. The access token can be used to +// fetch short-term credentials for the assigned AWS accounts or to access application +// APIs using bearer authentication. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS SSO OIDC's +// API operation CreateTokenWithIAM for usage and error information. +// +// Returned Error Types: +// +// - InvalidRequestException +// Indicates that something is wrong with the input to the request. For example, +// a required parameter might be missing or out of range. +// +// - InvalidClientException +// Indicates that the clientId or clientSecret in the request is invalid. For +// example, this can occur when a client sends an incorrect clientId or an expired +// clientSecret. +// +// - InvalidGrantException +// Indicates that a request contains an invalid grant. This can occur if a client +// makes a CreateToken request with an invalid grant type. +// +// - UnauthorizedClientException +// Indicates that the client is not currently authorized to make the request. +// This can happen when a clientId is not issued for a public client. +// +// - UnsupportedGrantTypeException +// Indicates that the grant type in the request is not supported by the service. +// +// - InvalidScopeException +// Indicates that the scope provided in the request is invalid. +// +// - AuthorizationPendingException +// Indicates that a request to authorize a client with an access user session +// token is pending. +// +// - SlowDownException +// Indicates that the client is making the request too frequently and is more +// than the service can handle. +// +// - AccessDeniedException +// You do not have sufficient access to perform this action. +// +// - ExpiredTokenException +// Indicates that the token issued by the service is expired and is no longer +// valid. +// +// - InternalServerException +// Indicates that an error from the service occurred while trying to process +// a request. +// +// - InvalidRequestRegionException +// Indicates that a token provided as input to the request was issued by and +// is only usable by calling IAM Identity Center endpoints in another region. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sso-oidc-2019-06-10/CreateTokenWithIAM +func (c *SSOOIDC) CreateTokenWithIAM(input *CreateTokenWithIAMInput) (*CreateTokenWithIAMOutput, error) { + req, out := c.CreateTokenWithIAMRequest(input) + return out, req.Send() +} + +// CreateTokenWithIAMWithContext is the same as CreateTokenWithIAM with the addition of +// the ability to pass a context and additional request options. +// +// See CreateTokenWithIAM for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSOOIDC) CreateTokenWithIAMWithContext(ctx aws.Context, input *CreateTokenWithIAMInput, opts ...request.Option) (*CreateTokenWithIAMOutput, error) { + req, out := c.CreateTokenWithIAMRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opRegisterClient = "RegisterClient" // RegisterClientRequest generates a "aws/request.Request" representing the @@ -331,8 +457,11 @@ type AccessDeniedException struct { _ struct{} `type:"structure"` RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + // Single error code. For this exception the value will be access_denied. Error_ *string `locationName:"error" type:"string"` + // Human-readable text providing additional information, used to assist the + // client developer in understanding the error that occurred. Error_description *string `locationName:"error_description" type:"string"` Message_ *string `locationName:"message" type:"string"` @@ -400,8 +529,11 @@ type AuthorizationPendingException struct { _ struct{} `type:"structure"` RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + // Single error code. For this exception the value will be authorization_pending. Error_ *string `locationName:"error" type:"string"` + // Human-readable text providing additional information, used to assist the + // client developer in understanding the error that occurred. Error_description *string `locationName:"error_description" type:"string"` Message_ *string `locationName:"message" type:"string"` @@ -466,8 +598,8 @@ func (s *AuthorizationPendingException) RequestID() string { type CreateTokenInput struct { _ struct{} `type:"structure"` - // The unique identifier string for each client. This value should come from - // the persisted result of the RegisterClient API. + // The unique identifier string for the client or application. This value comes + // from the result of the RegisterClient API. // // ClientId is a required field ClientId *string `locationName:"clientId" type:"string" required:"true"` @@ -475,23 +607,30 @@ type CreateTokenInput struct { // A secret string generated for the client. This value should come from the // persisted result of the RegisterClient API. // + // ClientSecret is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by CreateTokenInput's + // String and GoString methods. + // // ClientSecret is a required field - ClientSecret *string `locationName:"clientSecret" type:"string" required:"true"` + ClientSecret *string `locationName:"clientSecret" type:"string" required:"true" sensitive:"true"` - // The authorization code received from the authorization service. This parameter - // is required to perform an authorization grant request to get access to a - // token. + // Used only when calling this API for the Authorization Code grant type. The + // short-term code is used to identify this authorization request. This grant + // type is currently unsupported for the CreateToken API. Code *string `locationName:"code" type:"string"` - // Used only when calling this API for the device code grant type. This short-term - // code is used to identify this authentication attempt. This should come from - // an in-memory reference to the result of the StartDeviceAuthorization API. + // Used only when calling this API for the Device Code grant type. This short-term + // code is used to identify this authorization request. This comes from the + // result of the StartDeviceAuthorization API. DeviceCode *string `locationName:"deviceCode" type:"string"` - // Supports grant types for the authorization code, refresh token, and device - // code request. For device code requests, specify the following value: + // Supports the following OAuth grant types: Device Code and Refresh Token. + // Specify either of the following values, depending on the grant type that + // you want: + // + // * Device Code - urn:ietf:params:oauth:grant-type:device_code // - // urn:ietf:params:oauth:grant-type:device_code + // * Refresh Token - refresh_token // // For information about how to obtain the device code, see the StartDeviceAuthorization // topic. @@ -499,21 +638,28 @@ type CreateTokenInput struct { // GrantType is a required field GrantType *string `locationName:"grantType" type:"string" required:"true"` - // The location of the application that will receive the authorization code. - // Users authorize the service to send the request to this location. + // Used only when calling this API for the Authorization Code grant type. This + // value specifies the location of the client or application that has registered + // to receive the authorization code. RedirectUri *string `locationName:"redirectUri" type:"string"` - // Currently, refreshToken is not yet implemented and is not supported. For - // more information about the features and limitations of the current IAM Identity - // Center OIDC implementation, see Considerations for Using this Guide in the - // IAM Identity Center OIDC API Reference (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/Welcome.html). + // Used only when calling this API for the Refresh Token grant type. This token + // is used to refresh short-term tokens, such as the access token, that might + // expire. // - // The token used to obtain an access token in the event that the access token - // is invalid or expired. - RefreshToken *string `locationName:"refreshToken" type:"string"` - - // The list of scopes that is defined by the client. Upon authorization, this - // list is used to restrict permissions when granting an access token. + // For more information about the features and limitations of the current IAM + // Identity Center OIDC implementation, see Considerations for Using this Guide + // in the IAM Identity Center OIDC API Reference (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/Welcome.html). + // + // RefreshToken is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by CreateTokenInput's + // String and GoString methods. + RefreshToken *string `locationName:"refreshToken" type:"string" sensitive:"true"` + + // The list of scopes for which authorization is requested. The access token + // that is issued is limited to the scopes that are granted. If this value is + // not specified, IAM Identity Center authorizes all scopes that are configured + // for the client during the call to RegisterClient. Scope []*string `locationName:"scope" type:"list"` } @@ -605,31 +751,43 @@ func (s *CreateTokenInput) SetScope(v []*string) *CreateTokenInput { type CreateTokenOutput struct { _ struct{} `type:"structure"` - // An opaque token to access IAM Identity Center resources assigned to a user. - AccessToken *string `locationName:"accessToken" type:"string"` + // A bearer token to access AWS accounts and applications assigned to a user. + // + // AccessToken is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by CreateTokenOutput's + // String and GoString methods. + AccessToken *string `locationName:"accessToken" type:"string" sensitive:"true"` // Indicates the time in seconds when an access token will expire. ExpiresIn *int64 `locationName:"expiresIn" type:"integer"` - // Currently, idToken is not yet implemented and is not supported. For more - // information about the features and limitations of the current IAM Identity - // Center OIDC implementation, see Considerations for Using this Guide in the - // IAM Identity Center OIDC API Reference (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/Welcome.html). + // The idToken is not implemented or supported. For more information about the + // features and limitations of the current IAM Identity Center OIDC implementation, + // see Considerations for Using this Guide in the IAM Identity Center OIDC API + // Reference (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/Welcome.html). // - // The identifier of the user that associated with the access token, if present. - IdToken *string `locationName:"idToken" type:"string"` - - // Currently, refreshToken is not yet implemented and is not supported. For - // more information about the features and limitations of the current IAM Identity - // Center OIDC implementation, see Considerations for Using this Guide in the - // IAM Identity Center OIDC API Reference (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/Welcome.html). + // A JSON Web Token (JWT) that identifies who is associated with the issued + // access token. // + // IdToken is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by CreateTokenOutput's + // String and GoString methods. + IdToken *string `locationName:"idToken" type:"string" sensitive:"true"` + // A token that, if present, can be used to refresh a previously issued access // token that might have expired. - RefreshToken *string `locationName:"refreshToken" type:"string"` + // + // For more information about the features and limitations of the current IAM + // Identity Center OIDC implementation, see Considerations for Using this Guide + // in the IAM Identity Center OIDC API Reference (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/Welcome.html). + // + // RefreshToken is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by CreateTokenOutput's + // String and GoString methods. + RefreshToken *string `locationName:"refreshToken" type:"string" sensitive:"true"` // Used to notify the client that the returned token is an access token. The - // supported type is BearerToken. + // supported token type is Bearer. TokenType *string `locationName:"tokenType" type:"string"` } @@ -681,14 +839,312 @@ func (s *CreateTokenOutput) SetTokenType(v string) *CreateTokenOutput { return s } +type CreateTokenWithIAMInput struct { + _ struct{} `type:"structure"` + + // Used only when calling this API for the JWT Bearer grant type. This value + // specifies the JSON Web Token (JWT) issued by a trusted token issuer. To authorize + // a trusted token issuer, configure the JWT Bearer GrantOptions for the application. + // + // Assertion is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by CreateTokenWithIAMInput's + // String and GoString methods. + Assertion *string `locationName:"assertion" type:"string" sensitive:"true"` + + // The unique identifier string for the client or application. This value is + // an application ARN that has OAuth grants configured. + // + // ClientId is a required field + ClientId *string `locationName:"clientId" type:"string" required:"true"` + + // Used only when calling this API for the Authorization Code grant type. This + // short-term code is used to identify this authorization request. The code + // is obtained through a redirect from IAM Identity Center to a redirect URI + // persisted in the Authorization Code GrantOptions for the application. + Code *string `locationName:"code" type:"string"` + + // Supports the following OAuth grant types: Authorization Code, Refresh Token, + // JWT Bearer, and Token Exchange. Specify one of the following values, depending + // on the grant type that you want: + // + // * Authorization Code - authorization_code + // + // * Refresh Token - refresh_token + // + // * JWT Bearer - urn:ietf:params:oauth:grant-type:jwt-bearer + // + // * Token Exchange - urn:ietf:params:oauth:grant-type:token-exchange + // + // GrantType is a required field + GrantType *string `locationName:"grantType" type:"string" required:"true"` + + // Used only when calling this API for the Authorization Code grant type. This + // value specifies the location of the client or application that has registered + // to receive the authorization code. + RedirectUri *string `locationName:"redirectUri" type:"string"` + + // Used only when calling this API for the Refresh Token grant type. This token + // is used to refresh short-term tokens, such as the access token, that might + // expire. + // + // For more information about the features and limitations of the current IAM + // Identity Center OIDC implementation, see Considerations for Using this Guide + // in the IAM Identity Center OIDC API Reference (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/Welcome.html). + // + // RefreshToken is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by CreateTokenWithIAMInput's + // String and GoString methods. + RefreshToken *string `locationName:"refreshToken" type:"string" sensitive:"true"` + + // Used only when calling this API for the Token Exchange grant type. This value + // specifies the type of token that the requester can receive. The following + // values are supported: + // + // * Access Token - urn:ietf:params:oauth:token-type:access_token + // + // * Refresh Token - urn:ietf:params:oauth:token-type:refresh_token + RequestedTokenType *string `locationName:"requestedTokenType" type:"string"` + + // The list of scopes for which authorization is requested. The access token + // that is issued is limited to the scopes that are granted. If the value is + // not specified, IAM Identity Center authorizes all scopes configured for the + // application, including the following default scopes: openid, aws, sts:identity_context. + Scope []*string `locationName:"scope" type:"list"` + + // Used only when calling this API for the Token Exchange grant type. This value + // specifies the subject of the exchange. The value of the subject token must + // be an access token issued by IAM Identity Center to a different client or + // application. The access token must have authorized scopes that indicate the + // requested application as a target audience. + // + // SubjectToken is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by CreateTokenWithIAMInput's + // String and GoString methods. + SubjectToken *string `locationName:"subjectToken" type:"string" sensitive:"true"` + + // Used only when calling this API for the Token Exchange grant type. This value + // specifies the type of token that is passed as the subject of the exchange. + // The following value is supported: + // + // * Access Token - urn:ietf:params:oauth:token-type:access_token + SubjectTokenType *string `locationName:"subjectTokenType" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CreateTokenWithIAMInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CreateTokenWithIAMInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateTokenWithIAMInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateTokenWithIAMInput"} + if s.ClientId == nil { + invalidParams.Add(request.NewErrParamRequired("ClientId")) + } + if s.GrantType == nil { + invalidParams.Add(request.NewErrParamRequired("GrantType")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAssertion sets the Assertion field's value. +func (s *CreateTokenWithIAMInput) SetAssertion(v string) *CreateTokenWithIAMInput { + s.Assertion = &v + return s +} + +// SetClientId sets the ClientId field's value. +func (s *CreateTokenWithIAMInput) SetClientId(v string) *CreateTokenWithIAMInput { + s.ClientId = &v + return s +} + +// SetCode sets the Code field's value. +func (s *CreateTokenWithIAMInput) SetCode(v string) *CreateTokenWithIAMInput { + s.Code = &v + return s +} + +// SetGrantType sets the GrantType field's value. +func (s *CreateTokenWithIAMInput) SetGrantType(v string) *CreateTokenWithIAMInput { + s.GrantType = &v + return s +} + +// SetRedirectUri sets the RedirectUri field's value. +func (s *CreateTokenWithIAMInput) SetRedirectUri(v string) *CreateTokenWithIAMInput { + s.RedirectUri = &v + return s +} + +// SetRefreshToken sets the RefreshToken field's value. +func (s *CreateTokenWithIAMInput) SetRefreshToken(v string) *CreateTokenWithIAMInput { + s.RefreshToken = &v + return s +} + +// SetRequestedTokenType sets the RequestedTokenType field's value. +func (s *CreateTokenWithIAMInput) SetRequestedTokenType(v string) *CreateTokenWithIAMInput { + s.RequestedTokenType = &v + return s +} + +// SetScope sets the Scope field's value. +func (s *CreateTokenWithIAMInput) SetScope(v []*string) *CreateTokenWithIAMInput { + s.Scope = v + return s +} + +// SetSubjectToken sets the SubjectToken field's value. +func (s *CreateTokenWithIAMInput) SetSubjectToken(v string) *CreateTokenWithIAMInput { + s.SubjectToken = &v + return s +} + +// SetSubjectTokenType sets the SubjectTokenType field's value. +func (s *CreateTokenWithIAMInput) SetSubjectTokenType(v string) *CreateTokenWithIAMInput { + s.SubjectTokenType = &v + return s +} + +type CreateTokenWithIAMOutput struct { + _ struct{} `type:"structure"` + + // A bearer token to access AWS accounts and applications assigned to a user. + // + // AccessToken is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by CreateTokenWithIAMOutput's + // String and GoString methods. + AccessToken *string `locationName:"accessToken" type:"string" sensitive:"true"` + + // Indicates the time in seconds when an access token will expire. + ExpiresIn *int64 `locationName:"expiresIn" type:"integer"` + + // A JSON Web Token (JWT) that identifies the user associated with the issued + // access token. + // + // IdToken is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by CreateTokenWithIAMOutput's + // String and GoString methods. + IdToken *string `locationName:"idToken" type:"string" sensitive:"true"` + + // Indicates the type of tokens that are issued by IAM Identity Center. The + // following values are supported: + // + // * Access Token - urn:ietf:params:oauth:token-type:access_token + // + // * Refresh Token - urn:ietf:params:oauth:token-type:refresh_token + IssuedTokenType *string `locationName:"issuedTokenType" type:"string"` + + // A token that, if present, can be used to refresh a previously issued access + // token that might have expired. + // + // For more information about the features and limitations of the current IAM + // Identity Center OIDC implementation, see Considerations for Using this Guide + // in the IAM Identity Center OIDC API Reference (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/Welcome.html). + // + // RefreshToken is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by CreateTokenWithIAMOutput's + // String and GoString methods. + RefreshToken *string `locationName:"refreshToken" type:"string" sensitive:"true"` + + // The list of scopes for which authorization is granted. The access token that + // is issued is limited to the scopes that are granted. + Scope []*string `locationName:"scope" type:"list"` + + // Used to notify the requester that the returned token is an access token. + // The supported token type is Bearer. + TokenType *string `locationName:"tokenType" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CreateTokenWithIAMOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CreateTokenWithIAMOutput) GoString() string { + return s.String() +} + +// SetAccessToken sets the AccessToken field's value. +func (s *CreateTokenWithIAMOutput) SetAccessToken(v string) *CreateTokenWithIAMOutput { + s.AccessToken = &v + return s +} + +// SetExpiresIn sets the ExpiresIn field's value. +func (s *CreateTokenWithIAMOutput) SetExpiresIn(v int64) *CreateTokenWithIAMOutput { + s.ExpiresIn = &v + return s +} + +// SetIdToken sets the IdToken field's value. +func (s *CreateTokenWithIAMOutput) SetIdToken(v string) *CreateTokenWithIAMOutput { + s.IdToken = &v + return s +} + +// SetIssuedTokenType sets the IssuedTokenType field's value. +func (s *CreateTokenWithIAMOutput) SetIssuedTokenType(v string) *CreateTokenWithIAMOutput { + s.IssuedTokenType = &v + return s +} + +// SetRefreshToken sets the RefreshToken field's value. +func (s *CreateTokenWithIAMOutput) SetRefreshToken(v string) *CreateTokenWithIAMOutput { + s.RefreshToken = &v + return s +} + +// SetScope sets the Scope field's value. +func (s *CreateTokenWithIAMOutput) SetScope(v []*string) *CreateTokenWithIAMOutput { + s.Scope = v + return s +} + +// SetTokenType sets the TokenType field's value. +func (s *CreateTokenWithIAMOutput) SetTokenType(v string) *CreateTokenWithIAMOutput { + s.TokenType = &v + return s +} + // Indicates that the token issued by the service is expired and is no longer // valid. type ExpiredTokenException struct { _ struct{} `type:"structure"` RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + // Single error code. For this exception the value will be expired_token. Error_ *string `locationName:"error" type:"string"` + // Human-readable text providing additional information, used to assist the + // client developer in understanding the error that occurred. Error_description *string `locationName:"error_description" type:"string"` Message_ *string `locationName:"message" type:"string"` @@ -756,8 +1212,11 @@ type InternalServerException struct { _ struct{} `type:"structure"` RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + // Single error code. For this exception the value will be server_error. Error_ *string `locationName:"error" type:"string"` + // Human-readable text providing additional information, used to assist the + // client developer in understanding the error that occurred. Error_description *string `locationName:"error_description" type:"string"` Message_ *string `locationName:"message" type:"string"` @@ -826,8 +1285,11 @@ type InvalidClientException struct { _ struct{} `type:"structure"` RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + // Single error code. For this exception the value will be invalid_client. Error_ *string `locationName:"error" type:"string"` + // Human-readable text providing additional information, used to assist the + // client developer in understanding the error that occurred. Error_description *string `locationName:"error_description" type:"string"` Message_ *string `locationName:"message" type:"string"` @@ -895,8 +1357,11 @@ type InvalidClientMetadataException struct { _ struct{} `type:"structure"` RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + // Single error code. For this exception the value will be invalid_client_metadata. Error_ *string `locationName:"error" type:"string"` + // Human-readable text providing additional information, used to assist the + // client developer in understanding the error that occurred. Error_description *string `locationName:"error_description" type:"string"` Message_ *string `locationName:"message" type:"string"` @@ -964,8 +1429,11 @@ type InvalidGrantException struct { _ struct{} `type:"structure"` RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + // Single error code. For this exception the value will be invalid_grant. Error_ *string `locationName:"error" type:"string"` + // Human-readable text providing additional information, used to assist the + // client developer in understanding the error that occurred. Error_description *string `locationName:"error_description" type:"string"` Message_ *string `locationName:"message" type:"string"` @@ -1033,8 +1501,11 @@ type InvalidRequestException struct { _ struct{} `type:"structure"` RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + // Single error code. For this exception the value will be invalid_request. Error_ *string `locationName:"error" type:"string"` + // Human-readable text providing additional information, used to assist the + // client developer in understanding the error that occurred. Error_description *string `locationName:"error_description" type:"string"` Message_ *string `locationName:"message" type:"string"` @@ -1096,13 +1567,95 @@ func (s *InvalidRequestException) RequestID() string { return s.RespMetadata.RequestID } +// Indicates that a token provided as input to the request was issued by and +// is only usable by calling IAM Identity Center endpoints in another region. +type InvalidRequestRegionException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + // Indicates the IAM Identity Center endpoint which the requester may call with + // this token. + Endpoint *string `locationName:"endpoint" type:"string"` + + // Single error code. For this exception the value will be invalid_request. + Error_ *string `locationName:"error" type:"string"` + + // Human-readable text providing additional information, used to assist the + // client developer in understanding the error that occurred. + Error_description *string `locationName:"error_description" type:"string"` + + Message_ *string `locationName:"message" type:"string"` + + // Indicates the region which the requester may call with this token. + Region *string `locationName:"region" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s InvalidRequestRegionException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s InvalidRequestRegionException) GoString() string { + return s.String() +} + +func newErrorInvalidRequestRegionException(v protocol.ResponseMetadata) error { + return &InvalidRequestRegionException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *InvalidRequestRegionException) Code() string { + return "InvalidRequestRegionException" +} + +// Message returns the exception's message. +func (s *InvalidRequestRegionException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *InvalidRequestRegionException) OrigErr() error { + return nil +} + +func (s *InvalidRequestRegionException) Error() string { + return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *InvalidRequestRegionException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *InvalidRequestRegionException) RequestID() string { + return s.RespMetadata.RequestID +} + // Indicates that the scope provided in the request is invalid. type InvalidScopeException struct { _ struct{} `type:"structure"` RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + // Single error code. For this exception the value will be invalid_scope. Error_ *string `locationName:"error" type:"string"` + // Human-readable text providing additional information, used to assist the + // client developer in understanding the error that occurred. Error_description *string `locationName:"error_description" type:"string"` Message_ *string `locationName:"message" type:"string"` @@ -1238,7 +1791,7 @@ func (s *RegisterClientInput) SetScopes(v []*string) *RegisterClientInput { type RegisterClientOutput struct { _ struct{} `type:"structure"` - // The endpoint where the client can request authorization. + // An endpoint that the client can use to request authorization. AuthorizationEndpoint *string `locationName:"authorizationEndpoint" type:"string"` // The unique identifier string for each client. This client uses this identifier @@ -1250,12 +1803,16 @@ type RegisterClientOutput struct { // A secret string generated for the client. The client will use this string // to get authenticated by the service in subsequent calls. - ClientSecret *string `locationName:"clientSecret" type:"string"` + // + // ClientSecret is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by RegisterClientOutput's + // String and GoString methods. + ClientSecret *string `locationName:"clientSecret" type:"string" sensitive:"true"` // Indicates the time at which the clientId and clientSecret will become invalid. ClientSecretExpiresAt *int64 `locationName:"clientSecretExpiresAt" type:"long"` - // The endpoint where the client can get an access token. + // An endpoint that the client can use to create tokens. TokenEndpoint *string `locationName:"tokenEndpoint" type:"string"` } @@ -1319,8 +1876,11 @@ type SlowDownException struct { _ struct{} `type:"structure"` RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + // Single error code. For this exception the value will be slow_down. Error_ *string `locationName:"error" type:"string"` + // Human-readable text providing additional information, used to assist the + // client developer in understanding the error that occurred. Error_description *string `locationName:"error_description" type:"string"` Message_ *string `locationName:"message" type:"string"` @@ -1395,11 +1955,15 @@ type StartDeviceAuthorizationInput struct { // A secret string that is generated for the client. This value should come // from the persisted result of the RegisterClient API operation. // + // ClientSecret is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by StartDeviceAuthorizationInput's + // String and GoString methods. + // // ClientSecret is a required field - ClientSecret *string `locationName:"clientSecret" type:"string" required:"true"` + ClientSecret *string `locationName:"clientSecret" type:"string" required:"true" sensitive:"true"` - // The URL for the AWS access portal. For more information, see Using the AWS - // access portal (https://docs.aws.amazon.com/singlesignon/latest/userguide/using-the-portal.html) + // The URL for the Amazon Web Services access portal. For more information, + // see Using the Amazon Web Services access portal (https://docs.aws.amazon.com/singlesignon/latest/userguide/using-the-portal.html) // in the IAM Identity Center User Guide. // // StartUrl is a required field @@ -1550,8 +2114,11 @@ type UnauthorizedClientException struct { _ struct{} `type:"structure"` RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + // Single error code. For this exception the value will be unauthorized_client. Error_ *string `locationName:"error" type:"string"` + // Human-readable text providing additional information, used to assist the + // client developer in understanding the error that occurred. Error_description *string `locationName:"error_description" type:"string"` Message_ *string `locationName:"message" type:"string"` @@ -1618,8 +2185,11 @@ type UnsupportedGrantTypeException struct { _ struct{} `type:"structure"` RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + // Single error code. For this exception the value will be unsupported_grant_type. Error_ *string `locationName:"error" type:"string"` + // Human-readable text providing additional information, used to assist the + // client developer in understanding the error that occurred. Error_description *string `locationName:"error_description" type:"string"` Message_ *string `locationName:"message" type:"string"` diff --git a/vendor/github.com/aws/aws-sdk-go/service/ssooidc/doc.go b/vendor/github.com/aws/aws-sdk-go/service/ssooidc/doc.go index 8b5ee6019a..083568c616 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/ssooidc/doc.go +++ b/vendor/github.com/aws/aws-sdk-go/service/ssooidc/doc.go @@ -3,15 +3,13 @@ // Package ssooidc provides the client and types for making API // requests to AWS SSO OIDC. // -// AWS IAM Identity Center (successor to AWS Single Sign-On) OpenID Connect -// (OIDC) is a web service that enables a client (such as AWS CLI or a native -// application) to register with IAM Identity Center. The service also enables -// the client to fetch the user’s access token upon successful authentication -// and authorization with IAM Identity Center. +// IAM Identity Center OpenID Connect (OIDC) is a web service that enables a +// client (such as CLI or a native application) to register with IAM Identity +// Center. The service also enables the client to fetch the user’s access +// token upon successful authentication and authorization with IAM Identity +// Center. // -// Although AWS Single Sign-On was renamed, the sso and identitystore API namespaces -// will continue to retain their original name for backward compatibility purposes. -// For more information, see IAM Identity Center rename (https://docs.aws.amazon.com/singlesignon/latest/userguide/what-is.html#renamed). +// IAM Identity Center uses the sso and identitystore API namespaces. // // # Considerations for Using This Guide // @@ -22,21 +20,24 @@ // - The IAM Identity Center OIDC service currently implements only the portions // of the OAuth 2.0 Device Authorization Grant standard (https://tools.ietf.org/html/rfc8628 // (https://tools.ietf.org/html/rfc8628)) that are necessary to enable single -// sign-on authentication with the AWS CLI. Support for other OIDC flows -// frequently needed for native applications, such as Authorization Code -// Flow (+ PKCE), will be addressed in future releases. +// sign-on authentication with the CLI. // -// - The service emits only OIDC access tokens, such that obtaining a new -// token (For example, token refresh) requires explicit user re-authentication. +// - With older versions of the CLI, the service only emits OIDC access tokens, +// so to obtain a new token, users must explicitly re-authenticate. To access +// the OIDC flow that supports token refresh and doesn’t require re-authentication, +// update to the latest CLI version (1.27.10 for CLI V1 and 2.9.0 for CLI +// V2) with support for OIDC token refresh and configurable IAM Identity +// Center session durations. For more information, see Configure Amazon Web +// Services access portal session duration (https://docs.aws.amazon.com/singlesignon/latest/userguide/configure-user-session.html). // -// - The access tokens provided by this service grant access to all AWS account -// entitlements assigned to an IAM Identity Center user, not just a particular -// application. +// - The access tokens provided by this service grant access to all Amazon +// Web Services account entitlements assigned to an IAM Identity Center user, +// not just a particular application. // // - The documentation in this guide does not describe the mechanism to convert -// the access token into AWS Auth (“sigv4”) credentials for use with -// IAM-protected AWS service endpoints. For more information, see GetRoleCredentials -// (https://docs.aws.amazon.com/singlesignon/latest/PortalAPIReference/API_GetRoleCredentials.html) +// the access token into Amazon Web Services Auth (“sigv4”) credentials +// for use with IAM-protected Amazon Web Services service endpoints. For +// more information, see GetRoleCredentials (https://docs.aws.amazon.com/singlesignon/latest/PortalAPIReference/API_GetRoleCredentials.html) // in the IAM Identity Center Portal API Reference Guide. // // For general information about IAM Identity Center, see What is IAM Identity diff --git a/vendor/github.com/aws/aws-sdk-go/service/ssooidc/errors.go b/vendor/github.com/aws/aws-sdk-go/service/ssooidc/errors.go index 6983770126..e6242e4928 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/ssooidc/errors.go +++ b/vendor/github.com/aws/aws-sdk-go/service/ssooidc/errors.go @@ -64,6 +64,13 @@ const ( // a required parameter might be missing or out of range. ErrCodeInvalidRequestException = "InvalidRequestException" + // ErrCodeInvalidRequestRegionException for service response error code + // "InvalidRequestRegionException". + // + // Indicates that a token provided as input to the request was issued by and + // is only usable by calling IAM Identity Center endpoints in another region. + ErrCodeInvalidRequestRegionException = "InvalidRequestRegionException" + // ErrCodeInvalidScopeException for service response error code // "InvalidScopeException". // @@ -100,6 +107,7 @@ var exceptionFromCode = map[string]func(protocol.ResponseMetadata) error{ "InvalidClientMetadataException": newErrorInvalidClientMetadataException, "InvalidGrantException": newErrorInvalidGrantException, "InvalidRequestException": newErrorInvalidRequestException, + "InvalidRequestRegionException": newErrorInvalidRequestRegionException, "InvalidScopeException": newErrorInvalidScopeException, "SlowDownException": newErrorSlowDownException, "UnauthorizedClientException": newErrorUnauthorizedClientException, diff --git a/vendor/github.com/aws/aws-sdk-go/service/ssooidc/service.go b/vendor/github.com/aws/aws-sdk-go/service/ssooidc/service.go index 969f33c37b..782bae3692 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/ssooidc/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/ssooidc/service.go @@ -51,7 +51,7 @@ const ( func New(p client.ConfigProvider, cfgs ...*aws.Config) *SSOOIDC { c := p.ClientConfig(EndpointsID, cfgs...) if c.SigningNameDerived || len(c.SigningName) == 0 { - c.SigningName = "awsssooidc" + c.SigningName = "sso-oauth" } return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName, c.ResolvedRegion) } diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/api.go b/vendor/github.com/aws/aws-sdk-go/service/sts/api.go index 11af63b4d8..2c395f5f67 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/sts/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/sts/api.go @@ -1460,7 +1460,15 @@ type AssumeRoleInput struct { // in the IAM User Guide. PolicyArns []*PolicyDescriptorType `type:"list"` - // Reserved for future use. + // A list of previously acquired trusted context assertions in the format of + // a JSON array. The trusted context assertion is signed and encrypted by Amazon + // Web Services STS. + // + // The following is an example of a ProvidedContext value that includes a single + // trusted context assertion and the ARN of the context provider from which + // the trusted context assertion was generated. + // + // [{"ProviderArn":"arn:aws:iam::aws:contextProvider/IdentityCenter","ContextAssertion":"trusted-context-assertion"}] ProvidedContexts []*ProvidedContext `type:"list"` // The Amazon Resource Name (ARN) of the role to assume. @@ -3405,14 +3413,18 @@ func (s *PolicyDescriptorType) SetArn(v string) *PolicyDescriptorType { return s } -// Reserved for future use. +// Contains information about the provided context. This includes the signed +// and encrypted trusted context assertion and the context provider ARN from +// which the trusted context assertion was generated. type ProvidedContext struct { _ struct{} `type:"structure"` - // Reserved for future use. + // The signed and encrypted trusted context assertion generated by the context + // provider. The trusted context assertion is signed and encrypted by Amazon + // Web Services STS. ContextAssertion *string `min:"4" type:"string"` - // Reserved for future use. + // The context provider ARN from which the trusted context assertion was generated. ProviderArn *string `min:"20" type:"string"` } diff --git a/vendor/github.com/dlclark/regexp2/.gitignore b/vendor/github.com/dlclark/regexp2/.gitignore new file mode 100644 index 0000000000..fb844c330c --- /dev/null +++ b/vendor/github.com/dlclark/regexp2/.gitignore @@ -0,0 +1,27 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +*.prof +*.out + +.DS_Store diff --git a/vendor/github.com/dlclark/regexp2/.travis.yml b/vendor/github.com/dlclark/regexp2/.travis.yml new file mode 100644 index 0000000000..a2da6be473 --- /dev/null +++ b/vendor/github.com/dlclark/regexp2/.travis.yml @@ -0,0 +1,7 @@ +language: go +arch: + - AMD64 + - ppc64le +go: + - 1.9 + - tip diff --git a/vendor/github.com/dlclark/regexp2/ATTRIB b/vendor/github.com/dlclark/regexp2/ATTRIB new file mode 100644 index 0000000000..cdf4560b9e --- /dev/null +++ b/vendor/github.com/dlclark/regexp2/ATTRIB @@ -0,0 +1,133 @@ +============ +These pieces of code were ported from dotnet/corefx: + +syntax/charclass.go (from RegexCharClass.cs): ported to use the built-in Go unicode classes. Canonicalize is + a direct port, but most of the other code required large changes because the C# implementation + used a string to represent the CharSet data structure and I cleaned that up in my implementation. + +syntax/code.go (from RegexCode.cs): ported literally with various cleanups and layout to make it more Go-ish. + +syntax/escape.go (from RegexParser.cs): ported Escape method and added some optimizations. Unescape is inspired by + the C# implementation but couldn't be directly ported because of the lack of do-while syntax in Go. + +syntax/parser.go (from RegexpParser.cs and RegexOptions.cs): ported parser struct and associated methods as + literally as possible. Several language differences required changes. E.g. lack pre/post-fix increments as + expressions, lack of do-while loops, lack of overloads, etc. + +syntax/prefix.go (from RegexFCD.cs and RegexBoyerMoore.cs): ported as literally as possible and added support + for unicode chars that are longer than the 16-bit char in C# for the 32-bit rune in Go. + +syntax/replacerdata.go (from RegexReplacement.cs): conceptually ported and re-organized to handle differences + in charclass implementation, and fix odd code layout between RegexParser.cs, Regex.cs, and RegexReplacement.cs. + +syntax/tree.go (from RegexTree.cs and RegexNode.cs): ported literally as possible. + +syntax/writer.go (from RegexWriter.cs): ported literally with minor changes to make it more Go-ish. + +match.go (from RegexMatch.cs): ported, simplified, and changed to handle Go's lack of inheritence. + +regexp.go (from Regex.cs and RegexOptions.cs): conceptually serves the same "starting point", but is simplified + and changed to handle differences in C# strings and Go strings/runes. + +replace.go (from RegexReplacement.cs): ported closely and then cleaned up to combine the MatchEvaluator and + simple string replace implementations. + +runner.go (from RegexRunner.cs): ported literally as possible. + +regexp_test.go (from CaptureTests.cs and GroupNamesAndNumbers.cs): conceptually ported, but the code was + manually structured like Go tests. + +replace_test.go (from RegexReplaceStringTest0.cs): conceptually ported + +rtl_test.go (from RightToLeft.cs): conceptually ported +--- +dotnet/corefx was released under this license: + +The MIT License (MIT) + +Copyright (c) Microsoft Corporation + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +============ +These pieces of code are copied from the Go framework: + +- The overall directory structure of regexp2 was inspired by the Go runtime regexp package. +- The optimization in the escape method of syntax/escape.go is from the Go runtime QuoteMeta() func in regexp/regexp.go +- The method signatures in regexp.go are designed to match the Go framework regexp methods closely +- func regexp2.MustCompile and func quote are almost identifical to the regexp package versions +- BenchmarkMatch* and TestProgramTooLong* funcs in regexp_performance_test.go were copied from the framework + regexp/exec_test.go +--- +The Go framework was released under this license: + +Copyright (c) 2012 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +============ +Some test data were gathered from the Mono project. + +regexp_mono_test.go: ported from https://github.com/mono/mono/blob/master/mcs/class/System/Test/System.Text.RegularExpressions/PerlTrials.cs +--- +Mono tests released under this license: + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + diff --git a/vendor/github.com/dlclark/regexp2/LICENSE b/vendor/github.com/dlclark/regexp2/LICENSE new file mode 100644 index 0000000000..fe83dfdc92 --- /dev/null +++ b/vendor/github.com/dlclark/regexp2/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) Doug Clark + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/dlclark/regexp2/README.md b/vendor/github.com/dlclark/regexp2/README.md new file mode 100644 index 0000000000..9cbc1d8d0a --- /dev/null +++ b/vendor/github.com/dlclark/regexp2/README.md @@ -0,0 +1,174 @@ +# regexp2 - full featured regular expressions for Go +Regexp2 is a feature-rich RegExp engine for Go. It doesn't have constant time guarantees like the built-in `regexp` package, but it allows backtracking and is compatible with Perl5 and .NET. You'll likely be better off with the RE2 engine from the `regexp` package and should only use this if you need to write very complex patterns or require compatibility with .NET. + +## Basis of the engine +The engine is ported from the .NET framework's System.Text.RegularExpressions.Regex engine. That engine was open sourced in 2015 under the MIT license. There are some fundamental differences between .NET strings and Go strings that required a bit of borrowing from the Go framework regex engine as well. I cleaned up a couple of the dirtier bits during the port (regexcharclass.cs was terrible), but the parse tree, code emmitted, and therefore patterns matched should be identical. + +## New Code Generation +For extra performance use `regexp2` with [`regexp2cg`](https://github.com/dlclark/regexp2cg). It is a code generation utility for `regexp2` and you can likely improve your regexp runtime performance by 3-10x in hot code paths. As always you should benchmark your specifics to confirm the results. Give it a try! + +## Installing +This is a go-gettable library, so install is easy: + + go get github.com/dlclark/regexp2 + +To use the new Code Generation (while it's in beta) you'll need to use the `code_gen` branch: + + go get github.com/dlclark/regexp2@code_gen + +## Usage +Usage is similar to the Go `regexp` package. Just like in `regexp`, you start by converting a regex into a state machine via the `Compile` or `MustCompile` methods. They ultimately do the same thing, but `MustCompile` will panic if the regex is invalid. You can then use the provided `Regexp` struct to find matches repeatedly. A `Regexp` struct is safe to use across goroutines. + +```go +re := regexp2.MustCompile(`Your pattern`, 0) +if isMatch, _ := re.MatchString(`Something to match`); isMatch { + //do something +} +``` + +The only error that the `*Match*` methods *should* return is a Timeout if you set the `re.MatchTimeout` field. Any other error is a bug in the `regexp2` package. If you need more details about capture groups in a match then use the `FindStringMatch` method, like so: + +```go +if m, _ := re.FindStringMatch(`Something to match`); m != nil { + // the whole match is always group 0 + fmt.Printf("Group 0: %v\n", m.String()) + + // you can get all the groups too + gps := m.Groups() + + // a group can be captured multiple times, so each cap is separately addressable + fmt.Printf("Group 1, first capture", gps[1].Captures[0].String()) + fmt.Printf("Group 1, second capture", gps[1].Captures[1].String()) +} +``` + +Group 0 is embedded in the Match. Group 0 is an automatically-assigned group that encompasses the whole pattern. This means that `m.String()` is the same as `m.Group.String()` and `m.Groups()[0].String()` + +The __last__ capture is embedded in each group, so `g.String()` will return the same thing as `g.Capture.String()` and `g.Captures[len(g.Captures)-1].String()`. + +If you want to find multiple matches from a single input string you should use the `FindNextMatch` method. For example, to implement a function similar to `regexp.FindAllString`: + +```go +func regexp2FindAllString(re *regexp2.Regexp, s string) []string { + var matches []string + m, _ := re.FindStringMatch(s) + for m != nil { + matches = append(matches, m.String()) + m, _ = re.FindNextMatch(m) + } + return matches +} +``` + +`FindNextMatch` is optmized so that it re-uses the underlying string/rune slice. + +The internals of `regexp2` always operate on `[]rune` so `Index` and `Length` data in a `Match` always reference a position in `rune`s rather than `byte`s (even if the input was given as a string). This is a dramatic difference between `regexp` and `regexp2`. It's advisable to use the provided `String()` methods to avoid having to work with indices. + +## Compare `regexp` and `regexp2` +| Category | regexp | regexp2 | +| --- | --- | --- | +| Catastrophic backtracking possible | no, constant execution time guarantees | yes, if your pattern is at risk you can use the `re.MatchTimeout` field | +| Python-style capture groups `(?Pre)` | yes | no (yes in RE2 compat mode) | +| .NET-style capture groups `(?re)` or `(?'name're)` | no | yes | +| comments `(?#comment)` | no | yes | +| branch numbering reset `(?\|a\|b)` | no | no | +| possessive match `(?>re)` | no | yes | +| positive lookahead `(?=re)` | no | yes | +| negative lookahead `(?!re)` | no | yes | +| positive lookbehind `(?<=re)` | no | yes | +| negative lookbehind `(?re)`) +* change singleline behavior for `$` to only match end of string (like RE2) (see [#24](https://github.com/dlclark/regexp2/issues/24)) +* change the character classes `\d` `\s` and `\w` to match the same characters as RE2. NOTE: if you also use the `ECMAScript` option then this will change the `\s` character class to match ECMAScript instead of RE2. ECMAScript allows more whitespace characters in `\s` than RE2 (but still fewer than the the default behavior). +* allow character escape sequences to have defaults. For example, by default `\_` isn't a known character escape and will fail to compile, but in RE2 mode it will match the literal character `_` + +```go +re := regexp2.MustCompile(`Your RE2-compatible pattern`, regexp2.RE2) +if isMatch, _ := re.MatchString(`Something to match`); isMatch { + //do something +} +``` + +This feature is a work in progress and I'm open to ideas for more things to put here (maybe more relaxed character escaping rules?). + +## Catastrophic Backtracking and Timeouts + +`regexp2` supports features that can lead to catastrophic backtracking. +`Regexp.MatchTimeout` can be set to to limit the impact of such behavior; the +match will fail with an error after approximately MatchTimeout. No timeout +checks are done by default. + +Timeout checking is not free. The current timeout checking implementation starts +a background worker that updates a clock value approximately once every 100 +milliseconds. The matching code compares this value against the precomputed +deadline for the match. The performance impact is as follows. + +1. A match with a timeout runs almost as fast as a match without a timeout. +2. If any live matches have a timeout, there will be a background CPU load + (`~0.15%` currently on a modern machine). This load will remain constant + regardless of the number of matches done including matches done in parallel. +3. If no live matches are using a timeout, the background load will remain + until the longest deadline (match timeout + the time when the match started) + is reached. E.g., if you set a timeout of one minute the load will persist + for approximately a minute even if the match finishes quickly. + +See [PR #58](https://github.com/dlclark/regexp2/pull/58) for more details and +alternatives considered. + +## Goroutine leak error +If you're using a library during unit tests (e.g. https://github.com/uber-go/goleak) that validates all goroutines are exited then you'll likely get an error if you or any of your dependencies use regex's with a MatchTimeout. +To remedy the problem you'll need to tell the unit test to wait until the backgroup timeout goroutine is exited. + +```go +func TestSomething(t *testing.T) { + defer goleak.VerifyNone(t) + defer regexp2.StopTimeoutClock() + + // ... test +} + +//or + +func TestMain(m *testing.M) { + // setup + // ... + + // run + m.Run() + + //tear down + regexp2.StopTimeoutClock() + goleak.VerifyNone(t) +} +``` + +This will add ~100ms runtime to each test (or TestMain). If that's too much time you can set the clock cycle rate of the timeout goroutine in an init function in a test file. `regexp2.SetTimeoutCheckPeriod` isn't threadsafe so it must be setup before starting any regex's with Timeouts. + +```go +func init() { + //speed up testing by making the timeout clock 1ms + regexp2.SetTimeoutCheckPeriod(time.Millisecond) +} +``` + +## ECMAScript compatibility mode +In this mode the engine provides compatibility with the [regex engine](https://tc39.es/ecma262/multipage/text-processing.html#sec-regexp-regular-expression-objects) described in the ECMAScript specification. + +Additionally a Unicode mode is provided which allows parsing of `\u{CodePoint}` syntax that is only when both are provided. + +## Library features that I'm still working on +- Regex split + +## Potential bugs +I've run a battery of tests against regexp2 from various sources and found the debug output matches the .NET engine, but .NET and Go handle strings very differently. I've attempted to handle these differences, but most of my testing deals with basic ASCII with a little bit of multi-byte Unicode. There's a chance that there are bugs in the string handling related to character sets with supplementary Unicode chars. Right-to-Left support is coded, but not well tested either. + +## Find a bug? +I'm open to new issues and pull requests with tests if you find something odd! diff --git a/vendor/github.com/dlclark/regexp2/fastclock.go b/vendor/github.com/dlclark/regexp2/fastclock.go new file mode 100644 index 0000000000..caf2c9d882 --- /dev/null +++ b/vendor/github.com/dlclark/regexp2/fastclock.go @@ -0,0 +1,129 @@ +package regexp2 + +import ( + "sync" + "sync/atomic" + "time" +) + +// fasttime holds a time value (ticks since clock initialization) +type fasttime int64 + +// fastclock provides a fast clock implementation. +// +// A background goroutine periodically stores the current time +// into an atomic variable. +// +// A deadline can be quickly checked for expiration by comparing +// its value to the clock stored in the atomic variable. +// +// The goroutine automatically stops once clockEnd is reached. +// (clockEnd covers the largest deadline seen so far + some +// extra time). This ensures that if regexp2 with timeouts +// stops being used we will stop background work. +type fastclock struct { + // instances of atomicTime must be at the start of the struct (or at least 64-bit aligned) + // otherwise 32-bit architectures will panic + + current atomicTime // Current time (approximate) + clockEnd atomicTime // When clock updater is supposed to stop (>= any existing deadline) + + // current and clockEnd can be read via atomic loads. + // Reads and writes of other fields require mu to be held. + mu sync.Mutex + start time.Time // Time corresponding to fasttime(0) + running bool // Is a clock updater running? +} + +var fast fastclock + +// reached returns true if current time is at or past t. +func (t fasttime) reached() bool { + return fast.current.read() >= t +} + +// makeDeadline returns a time that is approximately time.Now().Add(d) +func makeDeadline(d time.Duration) fasttime { + // Increase the deadline since the clock we are reading may be + // just about to tick forwards. + end := fast.current.read() + durationToTicks(d+clockPeriod) + + // Start or extend clock if necessary. + if end > fast.clockEnd.read() { + extendClock(end) + } + return end +} + +// extendClock ensures that clock is live and will run until at least end. +func extendClock(end fasttime) { + fast.mu.Lock() + defer fast.mu.Unlock() + + if fast.start.IsZero() { + fast.start = time.Now() + } + + // Extend the running time to cover end as well as a bit of slop. + if shutdown := end + durationToTicks(time.Second); shutdown > fast.clockEnd.read() { + fast.clockEnd.write(shutdown) + } + + // Start clock if necessary + if !fast.running { + fast.running = true + go runClock() + } +} + +// stop the timeout clock in the background +// should only used for unit tests to abandon the background goroutine +func stopClock() { + fast.mu.Lock() + if fast.running { + fast.clockEnd.write(fasttime(0)) + } + fast.mu.Unlock() + + // pause until not running + // get and release the lock + isRunning := true + for isRunning { + time.Sleep(clockPeriod / 2) + fast.mu.Lock() + isRunning = fast.running + fast.mu.Unlock() + } +} + +func durationToTicks(d time.Duration) fasttime { + // Downscale nanoseconds to approximately a millisecond so that we can avoid + // overflow even if the caller passes in math.MaxInt64. + return fasttime(d) >> 20 +} + +const DefaultClockPeriod = 100 * time.Millisecond + +// clockPeriod is the approximate interval between updates of approximateClock. +var clockPeriod = DefaultClockPeriod + +func runClock() { + fast.mu.Lock() + defer fast.mu.Unlock() + + for fast.current.read() <= fast.clockEnd.read() { + // Unlock while sleeping. + fast.mu.Unlock() + time.Sleep(clockPeriod) + fast.mu.Lock() + + newTime := durationToTicks(time.Since(fast.start)) + fast.current.write(newTime) + } + fast.running = false +} + +type atomicTime struct{ v int64 } // Should change to atomic.Int64 when we can use go 1.19 + +func (t *atomicTime) read() fasttime { return fasttime(atomic.LoadInt64(&t.v)) } +func (t *atomicTime) write(v fasttime) { atomic.StoreInt64(&t.v, int64(v)) } diff --git a/vendor/github.com/dlclark/regexp2/match.go b/vendor/github.com/dlclark/regexp2/match.go new file mode 100644 index 0000000000..759cf8ccf4 --- /dev/null +++ b/vendor/github.com/dlclark/regexp2/match.go @@ -0,0 +1,349 @@ +package regexp2 + +import ( + "bytes" + "fmt" +) + +// Match is a single regex result match that contains groups and repeated captures +// +// -Groups +// -Capture +type Match struct { + Group //embeded group 0 + + regex *Regexp + otherGroups []Group + + // input to the match + textpos int + textstart int + + capcount int + caps []int + sparseCaps map[int]int + + // output from the match + matches [][]int + matchcount []int + + // whether we've done any balancing with this match. If we + // have done balancing, we'll need to do extra work in Tidy(). + balancing bool +} + +// Group is an explicit or implit (group 0) matched group within the pattern +type Group struct { + Capture // the last capture of this group is embeded for ease of use + + Name string // group name + Captures []Capture // captures of this group +} + +// Capture is a single capture of text within the larger original string +type Capture struct { + // the original string + text []rune + // Index is the position in the underlying rune slice where the first character of + // captured substring was found. Even if you pass in a string this will be in Runes. + Index int + // Length is the number of runes in the captured substring. + Length int +} + +// String returns the captured text as a String +func (c *Capture) String() string { + return string(c.text[c.Index : c.Index+c.Length]) +} + +// Runes returns the captured text as a rune slice +func (c *Capture) Runes() []rune { + return c.text[c.Index : c.Index+c.Length] +} + +func newMatch(regex *Regexp, capcount int, text []rune, startpos int) *Match { + m := Match{ + regex: regex, + matchcount: make([]int, capcount), + matches: make([][]int, capcount), + textstart: startpos, + balancing: false, + } + m.Name = "0" + m.text = text + m.matches[0] = make([]int, 2) + return &m +} + +func newMatchSparse(regex *Regexp, caps map[int]int, capcount int, text []rune, startpos int) *Match { + m := newMatch(regex, capcount, text, startpos) + m.sparseCaps = caps + return m +} + +func (m *Match) reset(text []rune, textstart int) { + m.text = text + m.textstart = textstart + for i := 0; i < len(m.matchcount); i++ { + m.matchcount[i] = 0 + } + m.balancing = false +} + +func (m *Match) tidy(textpos int) { + + interval := m.matches[0] + m.Index = interval[0] + m.Length = interval[1] + m.textpos = textpos + m.capcount = m.matchcount[0] + //copy our root capture to the list + m.Group.Captures = []Capture{m.Group.Capture} + + if m.balancing { + // The idea here is that we want to compact all of our unbalanced captures. To do that we + // use j basically as a count of how many unbalanced captures we have at any given time + // (really j is an index, but j/2 is the count). First we skip past all of the real captures + // until we find a balance captures. Then we check each subsequent entry. If it's a balance + // capture (it's negative), we decrement j. If it's a real capture, we increment j and copy + // it down to the last free position. + for cap := 0; cap < len(m.matchcount); cap++ { + limit := m.matchcount[cap] * 2 + matcharray := m.matches[cap] + + var i, j int + + for i = 0; i < limit; i++ { + if matcharray[i] < 0 { + break + } + } + + for j = i; i < limit; i++ { + if matcharray[i] < 0 { + // skip negative values + j-- + } else { + // but if we find something positive (an actual capture), copy it back to the last + // unbalanced position. + if i != j { + matcharray[j] = matcharray[i] + } + j++ + } + } + + m.matchcount[cap] = j / 2 + } + + m.balancing = false + } +} + +// isMatched tells if a group was matched by capnum +func (m *Match) isMatched(cap int) bool { + return cap < len(m.matchcount) && m.matchcount[cap] > 0 && m.matches[cap][m.matchcount[cap]*2-1] != (-3+1) +} + +// matchIndex returns the index of the last specified matched group by capnum +func (m *Match) matchIndex(cap int) int { + i := m.matches[cap][m.matchcount[cap]*2-2] + if i >= 0 { + return i + } + + return m.matches[cap][-3-i] +} + +// matchLength returns the length of the last specified matched group by capnum +func (m *Match) matchLength(cap int) int { + i := m.matches[cap][m.matchcount[cap]*2-1] + if i >= 0 { + return i + } + + return m.matches[cap][-3-i] +} + +// Nonpublic builder: add a capture to the group specified by "c" +func (m *Match) addMatch(c, start, l int) { + + if m.matches[c] == nil { + m.matches[c] = make([]int, 2) + } + + capcount := m.matchcount[c] + + if capcount*2+2 > len(m.matches[c]) { + oldmatches := m.matches[c] + newmatches := make([]int, capcount*8) + copy(newmatches, oldmatches[:capcount*2]) + m.matches[c] = newmatches + } + + m.matches[c][capcount*2] = start + m.matches[c][capcount*2+1] = l + m.matchcount[c] = capcount + 1 + //log.Printf("addMatch: c=%v, i=%v, l=%v ... matches: %v", c, start, l, m.matches) +} + +// Nonpublic builder: Add a capture to balance the specified group. This is used by the +// +// balanced match construct. (?...) +// +// If there were no such thing as backtracking, this would be as simple as calling RemoveMatch(c). +// However, since we have backtracking, we need to keep track of everything. +func (m *Match) balanceMatch(c int) { + m.balancing = true + + // we'll look at the last capture first + capcount := m.matchcount[c] + target := capcount*2 - 2 + + // first see if it is negative, and therefore is a reference to the next available + // capture group for balancing. If it is, we'll reset target to point to that capture. + if m.matches[c][target] < 0 { + target = -3 - m.matches[c][target] + } + + // move back to the previous capture + target -= 2 + + // if the previous capture is a reference, just copy that reference to the end. Otherwise, point to it. + if target >= 0 && m.matches[c][target] < 0 { + m.addMatch(c, m.matches[c][target], m.matches[c][target+1]) + } else { + m.addMatch(c, -3-target, -4-target /* == -3 - (target + 1) */) + } +} + +// Nonpublic builder: removes a group match by capnum +func (m *Match) removeMatch(c int) { + m.matchcount[c]-- +} + +// GroupCount returns the number of groups this match has matched +func (m *Match) GroupCount() int { + return len(m.matchcount) +} + +// GroupByName returns a group based on the name of the group, or nil if the group name does not exist +func (m *Match) GroupByName(name string) *Group { + num := m.regex.GroupNumberFromName(name) + if num < 0 { + return nil + } + return m.GroupByNumber(num) +} + +// GroupByNumber returns a group based on the number of the group, or nil if the group number does not exist +func (m *Match) GroupByNumber(num int) *Group { + // check our sparse map + if m.sparseCaps != nil { + if newNum, ok := m.sparseCaps[num]; ok { + num = newNum + } + } + if num >= len(m.matchcount) || num < 0 { + return nil + } + + if num == 0 { + return &m.Group + } + + m.populateOtherGroups() + + return &m.otherGroups[num-1] +} + +// Groups returns all the capture groups, starting with group 0 (the full match) +func (m *Match) Groups() []Group { + m.populateOtherGroups() + g := make([]Group, len(m.otherGroups)+1) + g[0] = m.Group + copy(g[1:], m.otherGroups) + return g +} + +func (m *Match) populateOtherGroups() { + // Construct all the Group objects first time called + if m.otherGroups == nil { + m.otherGroups = make([]Group, len(m.matchcount)-1) + for i := 0; i < len(m.otherGroups); i++ { + m.otherGroups[i] = newGroup(m.regex.GroupNameFromNumber(i+1), m.text, m.matches[i+1], m.matchcount[i+1]) + } + } +} + +func (m *Match) groupValueAppendToBuf(groupnum int, buf *bytes.Buffer) { + c := m.matchcount[groupnum] + if c == 0 { + return + } + + matches := m.matches[groupnum] + + index := matches[(c-1)*2] + last := index + matches[(c*2)-1] + + for ; index < last; index++ { + buf.WriteRune(m.text[index]) + } +} + +func newGroup(name string, text []rune, caps []int, capcount int) Group { + g := Group{} + g.text = text + if capcount > 0 { + g.Index = caps[(capcount-1)*2] + g.Length = caps[(capcount*2)-1] + } + g.Name = name + g.Captures = make([]Capture, capcount) + for i := 0; i < capcount; i++ { + g.Captures[i] = Capture{ + text: text, + Index: caps[i*2], + Length: caps[i*2+1], + } + } + //log.Printf("newGroup! capcount %v, %+v", capcount, g) + + return g +} + +func (m *Match) dump() string { + buf := &bytes.Buffer{} + buf.WriteRune('\n') + if len(m.sparseCaps) > 0 { + for k, v := range m.sparseCaps { + fmt.Fprintf(buf, "Slot %v -> %v\n", k, v) + } + } + + for i, g := range m.Groups() { + fmt.Fprintf(buf, "Group %v (%v), %v caps:\n", i, g.Name, len(g.Captures)) + + for _, c := range g.Captures { + fmt.Fprintf(buf, " (%v, %v) %v\n", c.Index, c.Length, c.String()) + } + } + /* + for i := 0; i < len(m.matchcount); i++ { + fmt.Fprintf(buf, "\nGroup %v (%v):\n", i, m.regex.GroupNameFromNumber(i)) + + for j := 0; j < m.matchcount[i]; j++ { + text := "" + + if m.matches[i][j*2] >= 0 { + start := m.matches[i][j*2] + text = m.text[start : start+m.matches[i][j*2+1]] + } + + fmt.Fprintf(buf, " (%v, %v) %v\n", m.matches[i][j*2], m.matches[i][j*2+1], text) + } + } + */ + return buf.String() +} diff --git a/vendor/github.com/dlclark/regexp2/regexp.go b/vendor/github.com/dlclark/regexp2/regexp.go new file mode 100644 index 0000000000..a7ddbaf358 --- /dev/null +++ b/vendor/github.com/dlclark/regexp2/regexp.go @@ -0,0 +1,395 @@ +/* +Package regexp2 is a regexp package that has an interface similar to Go's framework regexp engine but uses a +more feature full regex engine behind the scenes. + +It doesn't have constant time guarantees, but it allows backtracking and is compatible with Perl5 and .NET. +You'll likely be better off with the RE2 engine from the regexp package and should only use this if you +need to write very complex patterns or require compatibility with .NET. +*/ +package regexp2 + +import ( + "errors" + "math" + "strconv" + "sync" + "time" + + "github.com/dlclark/regexp2/syntax" +) + +var ( + // DefaultMatchTimeout used when running regexp matches -- "forever" + DefaultMatchTimeout = time.Duration(math.MaxInt64) + // DefaultUnmarshalOptions used when unmarshaling a regex from text + DefaultUnmarshalOptions = None +) + +// Regexp is the representation of a compiled regular expression. +// A Regexp is safe for concurrent use by multiple goroutines. +type Regexp struct { + // A match will time out if it takes (approximately) more than + // MatchTimeout. This is a safety check in case the match + // encounters catastrophic backtracking. The default value + // (DefaultMatchTimeout) causes all time out checking to be + // suppressed. + MatchTimeout time.Duration + + // read-only after Compile + pattern string // as passed to Compile + options RegexOptions // options + + caps map[int]int // capnum->index + capnames map[string]int //capture group name -> index + capslist []string //sorted list of capture group names + capsize int // size of the capture array + + code *syntax.Code // compiled program + + // cache of machines for running regexp + muRun *sync.Mutex + runner []*runner +} + +// Compile parses a regular expression and returns, if successful, +// a Regexp object that can be used to match against text. +func Compile(expr string, opt RegexOptions) (*Regexp, error) { + // parse it + tree, err := syntax.Parse(expr, syntax.RegexOptions(opt)) + if err != nil { + return nil, err + } + + // translate it to code + code, err := syntax.Write(tree) + if err != nil { + return nil, err + } + + // return it + return &Regexp{ + pattern: expr, + options: opt, + caps: code.Caps, + capnames: tree.Capnames, + capslist: tree.Caplist, + capsize: code.Capsize, + code: code, + MatchTimeout: DefaultMatchTimeout, + muRun: &sync.Mutex{}, + }, nil +} + +// MustCompile is like Compile but panics if the expression cannot be parsed. +// It simplifies safe initialization of global variables holding compiled regular +// expressions. +func MustCompile(str string, opt RegexOptions) *Regexp { + regexp, error := Compile(str, opt) + if error != nil { + panic(`regexp2: Compile(` + quote(str) + `): ` + error.Error()) + } + return regexp +} + +// Escape adds backslashes to any special characters in the input string +func Escape(input string) string { + return syntax.Escape(input) +} + +// Unescape removes any backslashes from previously-escaped special characters in the input string +func Unescape(input string) (string, error) { + return syntax.Unescape(input) +} + +// SetTimeoutPeriod is a debug function that sets the frequency of the timeout goroutine's sleep cycle. +// Defaults to 100ms. The only benefit of setting this lower is that the 1 background goroutine that manages +// timeouts may exit slightly sooner after all the timeouts have expired. See Github issue #63 +func SetTimeoutCheckPeriod(d time.Duration) { + clockPeriod = d +} + +// StopTimeoutClock should only be used in unit tests to prevent the timeout clock goroutine +// from appearing like a leaking goroutine +func StopTimeoutClock() { + stopClock() +} + +// String returns the source text used to compile the regular expression. +func (re *Regexp) String() string { + return re.pattern +} + +func quote(s string) string { + if strconv.CanBackquote(s) { + return "`" + s + "`" + } + return strconv.Quote(s) +} + +// RegexOptions impact the runtime and parsing behavior +// for each specific regex. They are setable in code as well +// as in the regex pattern itself. +type RegexOptions int32 + +const ( + None RegexOptions = 0x0 + IgnoreCase = 0x0001 // "i" + Multiline = 0x0002 // "m" + ExplicitCapture = 0x0004 // "n" + Compiled = 0x0008 // "c" + Singleline = 0x0010 // "s" + IgnorePatternWhitespace = 0x0020 // "x" + RightToLeft = 0x0040 // "r" + Debug = 0x0080 // "d" + ECMAScript = 0x0100 // "e" + RE2 = 0x0200 // RE2 (regexp package) compatibility mode + Unicode = 0x0400 // "u" +) + +func (re *Regexp) RightToLeft() bool { + return re.options&RightToLeft != 0 +} + +func (re *Regexp) Debug() bool { + return re.options&Debug != 0 +} + +// Replace searches the input string and replaces each match found with the replacement text. +// Count will limit the number of matches attempted and startAt will allow +// us to skip past possible matches at the start of the input (left or right depending on RightToLeft option). +// Set startAt and count to -1 to go through the whole string +func (re *Regexp) Replace(input, replacement string, startAt, count int) (string, error) { + data, err := syntax.NewReplacerData(replacement, re.caps, re.capsize, re.capnames, syntax.RegexOptions(re.options)) + if err != nil { + return "", err + } + //TODO: cache ReplacerData + + return replace(re, data, nil, input, startAt, count) +} + +// ReplaceFunc searches the input string and replaces each match found using the string from the evaluator +// Count will limit the number of matches attempted and startAt will allow +// us to skip past possible matches at the start of the input (left or right depending on RightToLeft option). +// Set startAt and count to -1 to go through the whole string. +func (re *Regexp) ReplaceFunc(input string, evaluator MatchEvaluator, startAt, count int) (string, error) { + return replace(re, nil, evaluator, input, startAt, count) +} + +// FindStringMatch searches the input string for a Regexp match +func (re *Regexp) FindStringMatch(s string) (*Match, error) { + // convert string to runes + return re.run(false, -1, getRunes(s)) +} + +// FindRunesMatch searches the input rune slice for a Regexp match +func (re *Regexp) FindRunesMatch(r []rune) (*Match, error) { + return re.run(false, -1, r) +} + +// FindStringMatchStartingAt searches the input string for a Regexp match starting at the startAt index +func (re *Regexp) FindStringMatchStartingAt(s string, startAt int) (*Match, error) { + if startAt > len(s) { + return nil, errors.New("startAt must be less than the length of the input string") + } + r, startAt := re.getRunesAndStart(s, startAt) + if startAt == -1 { + // we didn't find our start index in the string -- that's a problem + return nil, errors.New("startAt must align to the start of a valid rune in the input string") + } + + return re.run(false, startAt, r) +} + +// FindRunesMatchStartingAt searches the input rune slice for a Regexp match starting at the startAt index +func (re *Regexp) FindRunesMatchStartingAt(r []rune, startAt int) (*Match, error) { + return re.run(false, startAt, r) +} + +// FindNextMatch returns the next match in the same input string as the match parameter. +// Will return nil if there is no next match or if given a nil match. +func (re *Regexp) FindNextMatch(m *Match) (*Match, error) { + if m == nil { + return nil, nil + } + + // If previous match was empty, advance by one before matching to prevent + // infinite loop + startAt := m.textpos + if m.Length == 0 { + if m.textpos == len(m.text) { + return nil, nil + } + + if re.RightToLeft() { + startAt-- + } else { + startAt++ + } + } + return re.run(false, startAt, m.text) +} + +// MatchString return true if the string matches the regex +// error will be set if a timeout occurs +func (re *Regexp) MatchString(s string) (bool, error) { + m, err := re.run(true, -1, getRunes(s)) + if err != nil { + return false, err + } + return m != nil, nil +} + +func (re *Regexp) getRunesAndStart(s string, startAt int) ([]rune, int) { + if startAt < 0 { + if re.RightToLeft() { + r := getRunes(s) + return r, len(r) + } + return getRunes(s), 0 + } + ret := make([]rune, len(s)) + i := 0 + runeIdx := -1 + for strIdx, r := range s { + if strIdx == startAt { + runeIdx = i + } + ret[i] = r + i++ + } + if startAt == len(s) { + runeIdx = i + } + return ret[:i], runeIdx +} + +func getRunes(s string) []rune { + return []rune(s) +} + +// MatchRunes return true if the runes matches the regex +// error will be set if a timeout occurs +func (re *Regexp) MatchRunes(r []rune) (bool, error) { + m, err := re.run(true, -1, r) + if err != nil { + return false, err + } + return m != nil, nil +} + +// GetGroupNames Returns the set of strings used to name capturing groups in the expression. +func (re *Regexp) GetGroupNames() []string { + var result []string + + if re.capslist == nil { + result = make([]string, re.capsize) + + for i := 0; i < len(result); i++ { + result[i] = strconv.Itoa(i) + } + } else { + result = make([]string, len(re.capslist)) + copy(result, re.capslist) + } + + return result +} + +// GetGroupNumbers returns the integer group numbers corresponding to a group name. +func (re *Regexp) GetGroupNumbers() []int { + var result []int + + if re.caps == nil { + result = make([]int, re.capsize) + + for i := 0; i < len(result); i++ { + result[i] = i + } + } else { + result = make([]int, len(re.caps)) + + for k, v := range re.caps { + result[v] = k + } + } + + return result +} + +// GroupNameFromNumber retrieves a group name that corresponds to a group number. +// It will return "" for and unknown group number. Unnamed groups automatically +// receive a name that is the decimal string equivalent of its number. +func (re *Regexp) GroupNameFromNumber(i int) string { + if re.capslist == nil { + if i >= 0 && i < re.capsize { + return strconv.Itoa(i) + } + + return "" + } + + if re.caps != nil { + var ok bool + if i, ok = re.caps[i]; !ok { + return "" + } + } + + if i >= 0 && i < len(re.capslist) { + return re.capslist[i] + } + + return "" +} + +// GroupNumberFromName returns a group number that corresponds to a group name. +// Returns -1 if the name is not a recognized group name. Numbered groups +// automatically get a group name that is the decimal string equivalent of its number. +func (re *Regexp) GroupNumberFromName(name string) int { + // look up name if we have a hashtable of names + if re.capnames != nil { + if k, ok := re.capnames[name]; ok { + return k + } + + return -1 + } + + // convert to an int if it looks like a number + result := 0 + for i := 0; i < len(name); i++ { + ch := name[i] + + if ch > '9' || ch < '0' { + return -1 + } + + result *= 10 + result += int(ch - '0') + } + + // return int if it's in range + if result >= 0 && result < re.capsize { + return result + } + + return -1 +} + +// MarshalText implements [encoding.TextMarshaler]. The output +// matches that of calling the [Regexp.String] method. +func (re *Regexp) MarshalText() ([]byte, error) { + return []byte(re.String()), nil +} + +// UnmarshalText implements [encoding.TextUnmarshaler] by calling +// [Compile] on the encoded value. +func (re *Regexp) UnmarshalText(text []byte) error { + newRE, err := Compile(string(text), DefaultUnmarshalOptions) + if err != nil { + return err + } + *re = *newRE + return nil +} diff --git a/vendor/github.com/dlclark/regexp2/replace.go b/vendor/github.com/dlclark/regexp2/replace.go new file mode 100644 index 0000000000..0376bd9d37 --- /dev/null +++ b/vendor/github.com/dlclark/regexp2/replace.go @@ -0,0 +1,177 @@ +package regexp2 + +import ( + "bytes" + "errors" + + "github.com/dlclark/regexp2/syntax" +) + +const ( + replaceSpecials = 4 + replaceLeftPortion = -1 + replaceRightPortion = -2 + replaceLastGroup = -3 + replaceWholeString = -4 +) + +// MatchEvaluator is a function that takes a match and returns a replacement string to be used +type MatchEvaluator func(Match) string + +// Three very similar algorithms appear below: replace (pattern), +// replace (evaluator), and split. + +// Replace Replaces all occurrences of the regex in the string with the +// replacement pattern. +// +// Note that the special case of no matches is handled on its own: +// with no matches, the input string is returned unchanged. +// The right-to-left case is split out because StringBuilder +// doesn't handle right-to-left string building directly very well. +func replace(regex *Regexp, data *syntax.ReplacerData, evaluator MatchEvaluator, input string, startAt, count int) (string, error) { + if count < -1 { + return "", errors.New("Count too small") + } + if count == 0 { + return "", nil + } + + m, err := regex.FindStringMatchStartingAt(input, startAt) + + if err != nil { + return "", err + } + if m == nil { + return input, nil + } + + buf := &bytes.Buffer{} + text := m.text + + if !regex.RightToLeft() { + prevat := 0 + for m != nil { + if m.Index != prevat { + buf.WriteString(string(text[prevat:m.Index])) + } + prevat = m.Index + m.Length + if evaluator == nil { + replacementImpl(data, buf, m) + } else { + buf.WriteString(evaluator(*m)) + } + + count-- + if count == 0 { + break + } + m, err = regex.FindNextMatch(m) + if err != nil { + return "", nil + } + } + + if prevat < len(text) { + buf.WriteString(string(text[prevat:])) + } + } else { + prevat := len(text) + var al []string + + for m != nil { + if m.Index+m.Length != prevat { + al = append(al, string(text[m.Index+m.Length:prevat])) + } + prevat = m.Index + if evaluator == nil { + replacementImplRTL(data, &al, m) + } else { + al = append(al, evaluator(*m)) + } + + count-- + if count == 0 { + break + } + m, err = regex.FindNextMatch(m) + if err != nil { + return "", nil + } + } + + if prevat > 0 { + buf.WriteString(string(text[:prevat])) + } + + for i := len(al) - 1; i >= 0; i-- { + buf.WriteString(al[i]) + } + } + + return buf.String(), nil +} + +// Given a Match, emits into the StringBuilder the evaluated +// substitution pattern. +func replacementImpl(data *syntax.ReplacerData, buf *bytes.Buffer, m *Match) { + for _, r := range data.Rules { + + if r >= 0 { // string lookup + buf.WriteString(data.Strings[r]) + } else if r < -replaceSpecials { // group lookup + m.groupValueAppendToBuf(-replaceSpecials-1-r, buf) + } else { + switch -replaceSpecials - 1 - r { // special insertion patterns + case replaceLeftPortion: + for i := 0; i < m.Index; i++ { + buf.WriteRune(m.text[i]) + } + case replaceRightPortion: + for i := m.Index + m.Length; i < len(m.text); i++ { + buf.WriteRune(m.text[i]) + } + case replaceLastGroup: + m.groupValueAppendToBuf(m.GroupCount()-1, buf) + case replaceWholeString: + for i := 0; i < len(m.text); i++ { + buf.WriteRune(m.text[i]) + } + } + } + } +} + +func replacementImplRTL(data *syntax.ReplacerData, al *[]string, m *Match) { + l := *al + buf := &bytes.Buffer{} + + for _, r := range data.Rules { + buf.Reset() + if r >= 0 { // string lookup + l = append(l, data.Strings[r]) + } else if r < -replaceSpecials { // group lookup + m.groupValueAppendToBuf(-replaceSpecials-1-r, buf) + l = append(l, buf.String()) + } else { + switch -replaceSpecials - 1 - r { // special insertion patterns + case replaceLeftPortion: + for i := 0; i < m.Index; i++ { + buf.WriteRune(m.text[i]) + } + case replaceRightPortion: + for i := m.Index + m.Length; i < len(m.text); i++ { + buf.WriteRune(m.text[i]) + } + case replaceLastGroup: + m.groupValueAppendToBuf(m.GroupCount()-1, buf) + case replaceWholeString: + for i := 0; i < len(m.text); i++ { + buf.WriteRune(m.text[i]) + } + } + l = append(l, buf.String()) + } + } + + *al = l +} diff --git a/vendor/github.com/dlclark/regexp2/runner.go b/vendor/github.com/dlclark/regexp2/runner.go new file mode 100644 index 0000000000..56759f1474 --- /dev/null +++ b/vendor/github.com/dlclark/regexp2/runner.go @@ -0,0 +1,1613 @@ +package regexp2 + +import ( + "bytes" + "errors" + "fmt" + "math" + "strconv" + "strings" + "time" + "unicode" + + "github.com/dlclark/regexp2/syntax" +) + +type runner struct { + re *Regexp + code *syntax.Code + + runtextstart int // starting point for search + + runtext []rune // text to search + runtextpos int // current position in text + runtextend int + + // The backtracking stack. Opcodes use this to store data regarding + // what they have matched and where to backtrack to. Each "frame" on + // the stack takes the form of [CodePosition Data1 Data2...], where + // CodePosition is the position of the current opcode and + // the data values are all optional. The CodePosition can be negative, and + // these values (also called "back2") are used by the BranchMark family of opcodes + // to indicate whether they are backtracking after a successful or failed + // match. + // When we backtrack, we pop the CodePosition off the stack, set the current + // instruction pointer to that code position, and mark the opcode + // with a backtracking flag ("Back"). Each opcode then knows how to + // handle its own data. + runtrack []int + runtrackpos int + + // This stack is used to track text positions across different opcodes. + // For example, in /(a*b)+/, the parentheses result in a SetMark/CaptureMark + // pair. SetMark records the text position before we match a*b. Then + // CaptureMark uses that position to figure out where the capture starts. + // Opcodes which push onto this stack are always paired with other opcodes + // which will pop the value from it later. A successful match should mean + // that this stack is empty. + runstack []int + runstackpos int + + // The crawl stack is used to keep track of captures. Every time a group + // has a capture, we push its group number onto the runcrawl stack. In + // the case of a balanced match, we push BOTH groups onto the stack. + runcrawl []int + runcrawlpos int + + runtrackcount int // count of states that may do backtracking + + runmatch *Match // result object + + ignoreTimeout bool + timeout time.Duration // timeout in milliseconds (needed for actual) + deadline fasttime + + operator syntax.InstOp + codepos int + rightToLeft bool + caseInsensitive bool +} + +// run searches for matches and can continue from the previous match +// +// quick is usually false, but can be true to not return matches, just put it in caches +// textstart is -1 to start at the "beginning" (depending on Right-To-Left), otherwise an index in input +// input is the string to search for our regex pattern +func (re *Regexp) run(quick bool, textstart int, input []rune) (*Match, error) { + + // get a cached runner + runner := re.getRunner() + defer re.putRunner(runner) + + if textstart < 0 { + if re.RightToLeft() { + textstart = len(input) + } else { + textstart = 0 + } + } + + return runner.scan(input, textstart, quick, re.MatchTimeout) +} + +// Scans the string to find the first match. Uses the Match object +// both to feed text in and as a place to store matches that come out. +// +// All the action is in the Go() method. Our +// responsibility is to load up the class members before +// calling Go. +// +// The optimizer can compute a set of candidate starting characters, +// and we could use a separate method Skip() that will quickly scan past +// any characters that we know can't match. +func (r *runner) scan(rt []rune, textstart int, quick bool, timeout time.Duration) (*Match, error) { + r.timeout = timeout + r.ignoreTimeout = (time.Duration(math.MaxInt64) == timeout) + r.runtextstart = textstart + r.runtext = rt + r.runtextend = len(rt) + + stoppos := r.runtextend + bump := 1 + + if r.re.RightToLeft() { + bump = -1 + stoppos = 0 + } + + r.runtextpos = textstart + initted := false + + r.startTimeoutWatch() + for { + if r.re.Debug() { + //fmt.Printf("\nSearch content: %v\n", string(r.runtext)) + fmt.Printf("\nSearch range: from 0 to %v\n", r.runtextend) + fmt.Printf("Firstchar search starting at %v stopping at %v\n", r.runtextpos, stoppos) + } + + if r.findFirstChar() { + if err := r.checkTimeout(); err != nil { + return nil, err + } + + if !initted { + r.initMatch() + initted = true + } + + if r.re.Debug() { + fmt.Printf("Executing engine starting at %v\n\n", r.runtextpos) + } + + if err := r.execute(); err != nil { + return nil, err + } + + if r.runmatch.matchcount[0] > 0 { + // We'll return a match even if it touches a previous empty match + return r.tidyMatch(quick), nil + } + + // reset state for another go + r.runtrackpos = len(r.runtrack) + r.runstackpos = len(r.runstack) + r.runcrawlpos = len(r.runcrawl) + } + + // failure! + + if r.runtextpos == stoppos { + r.tidyMatch(true) + return nil, nil + } + + // Recognize leading []* and various anchors, and bump on failure accordingly + + // r.bump by one and start again + + r.runtextpos += bump + } + // We never get here +} + +func (r *runner) execute() error { + + r.goTo(0) + + for { + + if r.re.Debug() { + r.dumpState() + } + + if err := r.checkTimeout(); err != nil { + return err + } + + switch r.operator { + case syntax.Stop: + return nil + + case syntax.Nothing: + break + + case syntax.Goto: + r.goTo(r.operand(0)) + continue + + case syntax.Testref: + if !r.runmatch.isMatched(r.operand(0)) { + break + } + r.advance(1) + continue + + case syntax.Lazybranch: + r.trackPush1(r.textPos()) + r.advance(1) + continue + + case syntax.Lazybranch | syntax.Back: + r.trackPop() + r.textto(r.trackPeek()) + r.goTo(r.operand(0)) + continue + + case syntax.Setmark: + r.stackPush(r.textPos()) + r.trackPush() + r.advance(0) + continue + + case syntax.Nullmark: + r.stackPush(-1) + r.trackPush() + r.advance(0) + continue + + case syntax.Setmark | syntax.Back, syntax.Nullmark | syntax.Back: + r.stackPop() + break + + case syntax.Getmark: + r.stackPop() + r.trackPush1(r.stackPeek()) + r.textto(r.stackPeek()) + r.advance(0) + continue + + case syntax.Getmark | syntax.Back: + r.trackPop() + r.stackPush(r.trackPeek()) + break + + case syntax.Capturemark: + if r.operand(1) != -1 && !r.runmatch.isMatched(r.operand(1)) { + break + } + r.stackPop() + if r.operand(1) != -1 { + r.transferCapture(r.operand(0), r.operand(1), r.stackPeek(), r.textPos()) + } else { + r.capture(r.operand(0), r.stackPeek(), r.textPos()) + } + r.trackPush1(r.stackPeek()) + + r.advance(2) + + continue + + case syntax.Capturemark | syntax.Back: + r.trackPop() + r.stackPush(r.trackPeek()) + r.uncapture() + if r.operand(0) != -1 && r.operand(1) != -1 { + r.uncapture() + } + + break + + case syntax.Branchmark: + r.stackPop() + + matched := r.textPos() - r.stackPeek() + + if matched != 0 { // Nonempty match -> loop now + r.trackPush2(r.stackPeek(), r.textPos()) // Save old mark, textpos + r.stackPush(r.textPos()) // Make new mark + r.goTo(r.operand(0)) // Loop + } else { // Empty match -> straight now + r.trackPushNeg1(r.stackPeek()) // Save old mark + r.advance(1) // Straight + } + continue + + case syntax.Branchmark | syntax.Back: + r.trackPopN(2) + r.stackPop() + r.textto(r.trackPeekN(1)) // Recall position + r.trackPushNeg1(r.trackPeek()) // Save old mark + r.advance(1) // Straight + continue + + case syntax.Branchmark | syntax.Back2: + r.trackPop() + r.stackPush(r.trackPeek()) // Recall old mark + break // Backtrack + + case syntax.Lazybranchmark: + { + // We hit this the first time through a lazy loop and after each + // successful match of the inner expression. It simply continues + // on and doesn't loop. + r.stackPop() + + oldMarkPos := r.stackPeek() + + if r.textPos() != oldMarkPos { // Nonempty match -> try to loop again by going to 'back' state + if oldMarkPos != -1 { + r.trackPush2(oldMarkPos, r.textPos()) // Save old mark, textpos + } else { + r.trackPush2(r.textPos(), r.textPos()) + } + } else { + // The inner expression found an empty match, so we'll go directly to 'back2' if we + // backtrack. In this case, we need to push something on the stack, since back2 pops. + // However, in the case of ()+? or similar, this empty match may be legitimate, so push the text + // position associated with that empty match. + r.stackPush(oldMarkPos) + + r.trackPushNeg1(r.stackPeek()) // Save old mark + } + r.advance(1) + continue + } + + case syntax.Lazybranchmark | syntax.Back: + + // After the first time, Lazybranchmark | syntax.Back occurs + // with each iteration of the loop, and therefore with every attempted + // match of the inner expression. We'll try to match the inner expression, + // then go back to Lazybranchmark if successful. If the inner expression + // fails, we go to Lazybranchmark | syntax.Back2 + + r.trackPopN(2) + pos := r.trackPeekN(1) + r.trackPushNeg1(r.trackPeek()) // Save old mark + r.stackPush(pos) // Make new mark + r.textto(pos) // Recall position + r.goTo(r.operand(0)) // Loop + continue + + case syntax.Lazybranchmark | syntax.Back2: + // The lazy loop has failed. We'll do a true backtrack and + // start over before the lazy loop. + r.stackPop() + r.trackPop() + r.stackPush(r.trackPeek()) // Recall old mark + break + + case syntax.Setcount: + r.stackPush2(r.textPos(), r.operand(0)) + r.trackPush() + r.advance(1) + continue + + case syntax.Nullcount: + r.stackPush2(-1, r.operand(0)) + r.trackPush() + r.advance(1) + continue + + case syntax.Setcount | syntax.Back: + r.stackPopN(2) + break + + case syntax.Nullcount | syntax.Back: + r.stackPopN(2) + break + + case syntax.Branchcount: + // r.stackPush: + // 0: Mark + // 1: Count + + r.stackPopN(2) + mark := r.stackPeek() + count := r.stackPeekN(1) + matched := r.textPos() - mark + + if count >= r.operand(1) || (matched == 0 && count >= 0) { // Max loops or empty match -> straight now + r.trackPushNeg2(mark, count) // Save old mark, count + r.advance(2) // Straight + } else { // Nonempty match -> count+loop now + r.trackPush1(mark) // remember mark + r.stackPush2(r.textPos(), count+1) // Make new mark, incr count + r.goTo(r.operand(0)) // Loop + } + continue + + case syntax.Branchcount | syntax.Back: + // r.trackPush: + // 0: Previous mark + // r.stackPush: + // 0: Mark (= current pos, discarded) + // 1: Count + r.trackPop() + r.stackPopN(2) + if r.stackPeekN(1) > 0 { // Positive -> can go straight + r.textto(r.stackPeek()) // Zap to mark + r.trackPushNeg2(r.trackPeek(), r.stackPeekN(1)-1) // Save old mark, old count + r.advance(2) // Straight + continue + } + r.stackPush2(r.trackPeek(), r.stackPeekN(1)-1) // recall old mark, old count + break + + case syntax.Branchcount | syntax.Back2: + // r.trackPush: + // 0: Previous mark + // 1: Previous count + r.trackPopN(2) + r.stackPush2(r.trackPeek(), r.trackPeekN(1)) // Recall old mark, old count + break // Backtrack + + case syntax.Lazybranchcount: + // r.stackPush: + // 0: Mark + // 1: Count + + r.stackPopN(2) + mark := r.stackPeek() + count := r.stackPeekN(1) + + if count < 0 { // Negative count -> loop now + r.trackPushNeg1(mark) // Save old mark + r.stackPush2(r.textPos(), count+1) // Make new mark, incr count + r.goTo(r.operand(0)) // Loop + } else { // Nonneg count -> straight now + r.trackPush3(mark, count, r.textPos()) // Save mark, count, position + r.advance(2) // Straight + } + continue + + case syntax.Lazybranchcount | syntax.Back: + // r.trackPush: + // 0: Mark + // 1: Count + // 2: r.textPos + + r.trackPopN(3) + mark := r.trackPeek() + textpos := r.trackPeekN(2) + + if r.trackPeekN(1) < r.operand(1) && textpos != mark { // Under limit and not empty match -> loop + r.textto(textpos) // Recall position + r.stackPush2(textpos, r.trackPeekN(1)+1) // Make new mark, incr count + r.trackPushNeg1(mark) // Save old mark + r.goTo(r.operand(0)) // Loop + continue + } else { // Max loops or empty match -> backtrack + r.stackPush2(r.trackPeek(), r.trackPeekN(1)) // Recall old mark, count + break // backtrack + } + + case syntax.Lazybranchcount | syntax.Back2: + // r.trackPush: + // 0: Previous mark + // r.stackPush: + // 0: Mark (== current pos, discarded) + // 1: Count + r.trackPop() + r.stackPopN(2) + r.stackPush2(r.trackPeek(), r.stackPeekN(1)-1) // Recall old mark, count + break // Backtrack + + case syntax.Setjump: + r.stackPush2(r.trackpos(), r.crawlpos()) + r.trackPush() + r.advance(0) + continue + + case syntax.Setjump | syntax.Back: + r.stackPopN(2) + break + + case syntax.Backjump: + // r.stackPush: + // 0: Saved trackpos + // 1: r.crawlpos + r.stackPopN(2) + r.trackto(r.stackPeek()) + + for r.crawlpos() != r.stackPeekN(1) { + r.uncapture() + } + + break + + case syntax.Forejump: + // r.stackPush: + // 0: Saved trackpos + // 1: r.crawlpos + r.stackPopN(2) + r.trackto(r.stackPeek()) + r.trackPush1(r.stackPeekN(1)) + r.advance(0) + continue + + case syntax.Forejump | syntax.Back: + // r.trackPush: + // 0: r.crawlpos + r.trackPop() + + for r.crawlpos() != r.trackPeek() { + r.uncapture() + } + + break + + case syntax.Bol: + if r.leftchars() > 0 && r.charAt(r.textPos()-1) != '\n' { + break + } + r.advance(0) + continue + + case syntax.Eol: + if r.rightchars() > 0 && r.charAt(r.textPos()) != '\n' { + break + } + r.advance(0) + continue + + case syntax.Boundary: + if !r.isBoundary(r.textPos(), 0, r.runtextend) { + break + } + r.advance(0) + continue + + case syntax.Nonboundary: + if r.isBoundary(r.textPos(), 0, r.runtextend) { + break + } + r.advance(0) + continue + + case syntax.ECMABoundary: + if !r.isECMABoundary(r.textPos(), 0, r.runtextend) { + break + } + r.advance(0) + continue + + case syntax.NonECMABoundary: + if r.isECMABoundary(r.textPos(), 0, r.runtextend) { + break + } + r.advance(0) + continue + + case syntax.Beginning: + if r.leftchars() > 0 { + break + } + r.advance(0) + continue + + case syntax.Start: + if r.textPos() != r.textstart() { + break + } + r.advance(0) + continue + + case syntax.EndZ: + rchars := r.rightchars() + if rchars > 1 { + break + } + // RE2 and EcmaScript define $ as "asserts position at the end of the string" + // PCRE/.NET adds "or before the line terminator right at the end of the string (if any)" + if (r.re.options & (RE2 | ECMAScript)) != 0 { + // RE2/Ecmascript mode + if rchars > 0 { + break + } + } else if rchars == 1 && r.charAt(r.textPos()) != '\n' { + // "regular" mode + break + } + + r.advance(0) + continue + + case syntax.End: + if r.rightchars() > 0 { + break + } + r.advance(0) + continue + + case syntax.One: + if r.forwardchars() < 1 || r.forwardcharnext() != rune(r.operand(0)) { + break + } + + r.advance(1) + continue + + case syntax.Notone: + if r.forwardchars() < 1 || r.forwardcharnext() == rune(r.operand(0)) { + break + } + + r.advance(1) + continue + + case syntax.Set: + + if r.forwardchars() < 1 || !r.code.Sets[r.operand(0)].CharIn(r.forwardcharnext()) { + break + } + + r.advance(1) + continue + + case syntax.Multi: + if !r.runematch(r.code.Strings[r.operand(0)]) { + break + } + + r.advance(1) + continue + + case syntax.Ref: + + capnum := r.operand(0) + + if r.runmatch.isMatched(capnum) { + if !r.refmatch(r.runmatch.matchIndex(capnum), r.runmatch.matchLength(capnum)) { + break + } + } else { + if (r.re.options & ECMAScript) == 0 { + break + } + } + + r.advance(1) + continue + + case syntax.Onerep: + + c := r.operand(1) + + if r.forwardchars() < c { + break + } + + ch := rune(r.operand(0)) + + for c > 0 { + if r.forwardcharnext() != ch { + goto BreakBackward + } + c-- + } + + r.advance(2) + continue + + case syntax.Notonerep: + + c := r.operand(1) + + if r.forwardchars() < c { + break + } + ch := rune(r.operand(0)) + + for c > 0 { + if r.forwardcharnext() == ch { + goto BreakBackward + } + c-- + } + + r.advance(2) + continue + + case syntax.Setrep: + + c := r.operand(1) + + if r.forwardchars() < c { + break + } + + set := r.code.Sets[r.operand(0)] + + for c > 0 { + if !set.CharIn(r.forwardcharnext()) { + goto BreakBackward + } + c-- + } + + r.advance(2) + continue + + case syntax.Oneloop: + + c := r.operand(1) + + if c > r.forwardchars() { + c = r.forwardchars() + } + + ch := rune(r.operand(0)) + i := c + + for ; i > 0; i-- { + if r.forwardcharnext() != ch { + r.backwardnext() + break + } + } + + if c > i { + r.trackPush2(c-i-1, r.textPos()-r.bump()) + } + + r.advance(2) + continue + + case syntax.Notoneloop: + + c := r.operand(1) + + if c > r.forwardchars() { + c = r.forwardchars() + } + + ch := rune(r.operand(0)) + i := c + + for ; i > 0; i-- { + if r.forwardcharnext() == ch { + r.backwardnext() + break + } + } + + if c > i { + r.trackPush2(c-i-1, r.textPos()-r.bump()) + } + + r.advance(2) + continue + + case syntax.Setloop: + + c := r.operand(1) + + if c > r.forwardchars() { + c = r.forwardchars() + } + + set := r.code.Sets[r.operand(0)] + i := c + + for ; i > 0; i-- { + if !set.CharIn(r.forwardcharnext()) { + r.backwardnext() + break + } + } + + if c > i { + r.trackPush2(c-i-1, r.textPos()-r.bump()) + } + + r.advance(2) + continue + + case syntax.Oneloop | syntax.Back, syntax.Notoneloop | syntax.Back: + + r.trackPopN(2) + i := r.trackPeek() + pos := r.trackPeekN(1) + + r.textto(pos) + + if i > 0 { + r.trackPush2(i-1, pos-r.bump()) + } + + r.advance(2) + continue + + case syntax.Setloop | syntax.Back: + + r.trackPopN(2) + i := r.trackPeek() + pos := r.trackPeekN(1) + + r.textto(pos) + + if i > 0 { + r.trackPush2(i-1, pos-r.bump()) + } + + r.advance(2) + continue + + case syntax.Onelazy, syntax.Notonelazy: + + c := r.operand(1) + + if c > r.forwardchars() { + c = r.forwardchars() + } + + if c > 0 { + r.trackPush2(c-1, r.textPos()) + } + + r.advance(2) + continue + + case syntax.Setlazy: + + c := r.operand(1) + + if c > r.forwardchars() { + c = r.forwardchars() + } + + if c > 0 { + r.trackPush2(c-1, r.textPos()) + } + + r.advance(2) + continue + + case syntax.Onelazy | syntax.Back: + + r.trackPopN(2) + pos := r.trackPeekN(1) + r.textto(pos) + + if r.forwardcharnext() != rune(r.operand(0)) { + break + } + + i := r.trackPeek() + + if i > 0 { + r.trackPush2(i-1, pos+r.bump()) + } + + r.advance(2) + continue + + case syntax.Notonelazy | syntax.Back: + + r.trackPopN(2) + pos := r.trackPeekN(1) + r.textto(pos) + + if r.forwardcharnext() == rune(r.operand(0)) { + break + } + + i := r.trackPeek() + + if i > 0 { + r.trackPush2(i-1, pos+r.bump()) + } + + r.advance(2) + continue + + case syntax.Setlazy | syntax.Back: + + r.trackPopN(2) + pos := r.trackPeekN(1) + r.textto(pos) + + if !r.code.Sets[r.operand(0)].CharIn(r.forwardcharnext()) { + break + } + + i := r.trackPeek() + + if i > 0 { + r.trackPush2(i-1, pos+r.bump()) + } + + r.advance(2) + continue + + default: + return errors.New("unknown state in regex runner") + } + + BreakBackward: + ; + + // "break Backward" comes here: + r.backtrack() + } +} + +// increase the size of stack and track storage +func (r *runner) ensureStorage() { + if r.runstackpos < r.runtrackcount*4 { + doubleIntSlice(&r.runstack, &r.runstackpos) + } + if r.runtrackpos < r.runtrackcount*4 { + doubleIntSlice(&r.runtrack, &r.runtrackpos) + } +} + +func doubleIntSlice(s *[]int, pos *int) { + oldLen := len(*s) + newS := make([]int, oldLen*2) + + copy(newS[oldLen:], *s) + *pos += oldLen + *s = newS +} + +// Save a number on the longjump unrolling stack +func (r *runner) crawl(i int) { + if r.runcrawlpos == 0 { + doubleIntSlice(&r.runcrawl, &r.runcrawlpos) + } + r.runcrawlpos-- + r.runcrawl[r.runcrawlpos] = i +} + +// Remove a number from the longjump unrolling stack +func (r *runner) popcrawl() int { + val := r.runcrawl[r.runcrawlpos] + r.runcrawlpos++ + return val +} + +// Get the height of the stack +func (r *runner) crawlpos() int { + return len(r.runcrawl) - r.runcrawlpos +} + +func (r *runner) advance(i int) { + r.codepos += (i + 1) + r.setOperator(r.code.Codes[r.codepos]) +} + +func (r *runner) goTo(newpos int) { + // when branching backward or in place, ensure storage + if newpos <= r.codepos { + r.ensureStorage() + } + + r.setOperator(r.code.Codes[newpos]) + r.codepos = newpos +} + +func (r *runner) textto(newpos int) { + r.runtextpos = newpos +} + +func (r *runner) trackto(newpos int) { + r.runtrackpos = len(r.runtrack) - newpos +} + +func (r *runner) textstart() int { + return r.runtextstart +} + +func (r *runner) textPos() int { + return r.runtextpos +} + +// push onto the backtracking stack +func (r *runner) trackpos() int { + return len(r.runtrack) - r.runtrackpos +} + +func (r *runner) trackPush() { + r.runtrackpos-- + r.runtrack[r.runtrackpos] = r.codepos +} + +func (r *runner) trackPush1(I1 int) { + r.runtrackpos-- + r.runtrack[r.runtrackpos] = I1 + r.runtrackpos-- + r.runtrack[r.runtrackpos] = r.codepos +} + +func (r *runner) trackPush2(I1, I2 int) { + r.runtrackpos-- + r.runtrack[r.runtrackpos] = I1 + r.runtrackpos-- + r.runtrack[r.runtrackpos] = I2 + r.runtrackpos-- + r.runtrack[r.runtrackpos] = r.codepos +} + +func (r *runner) trackPush3(I1, I2, I3 int) { + r.runtrackpos-- + r.runtrack[r.runtrackpos] = I1 + r.runtrackpos-- + r.runtrack[r.runtrackpos] = I2 + r.runtrackpos-- + r.runtrack[r.runtrackpos] = I3 + r.runtrackpos-- + r.runtrack[r.runtrackpos] = r.codepos +} + +func (r *runner) trackPushNeg1(I1 int) { + r.runtrackpos-- + r.runtrack[r.runtrackpos] = I1 + r.runtrackpos-- + r.runtrack[r.runtrackpos] = -r.codepos +} + +func (r *runner) trackPushNeg2(I1, I2 int) { + r.runtrackpos-- + r.runtrack[r.runtrackpos] = I1 + r.runtrackpos-- + r.runtrack[r.runtrackpos] = I2 + r.runtrackpos-- + r.runtrack[r.runtrackpos] = -r.codepos +} + +func (r *runner) backtrack() { + newpos := r.runtrack[r.runtrackpos] + r.runtrackpos++ + + if r.re.Debug() { + if newpos < 0 { + fmt.Printf(" Backtracking (back2) to code position %v\n", -newpos) + } else { + fmt.Printf(" Backtracking to code position %v\n", newpos) + } + } + + if newpos < 0 { + newpos = -newpos + r.setOperator(r.code.Codes[newpos] | syntax.Back2) + } else { + r.setOperator(r.code.Codes[newpos] | syntax.Back) + } + + // When branching backward, ensure storage + if newpos < r.codepos { + r.ensureStorage() + } + + r.codepos = newpos +} + +func (r *runner) setOperator(op int) { + r.caseInsensitive = (0 != (op & syntax.Ci)) + r.rightToLeft = (0 != (op & syntax.Rtl)) + r.operator = syntax.InstOp(op & ^(syntax.Rtl | syntax.Ci)) +} + +func (r *runner) trackPop() { + r.runtrackpos++ +} + +// pop framesize items from the backtracking stack +func (r *runner) trackPopN(framesize int) { + r.runtrackpos += framesize +} + +// Technically we are actually peeking at items already popped. So if you want to +// get and pop the top item from the stack, you do +// r.trackPop(); +// r.trackPeek(); +func (r *runner) trackPeek() int { + return r.runtrack[r.runtrackpos-1] +} + +// get the ith element down on the backtracking stack +func (r *runner) trackPeekN(i int) int { + return r.runtrack[r.runtrackpos-i-1] +} + +// Push onto the grouping stack +func (r *runner) stackPush(I1 int) { + r.runstackpos-- + r.runstack[r.runstackpos] = I1 +} + +func (r *runner) stackPush2(I1, I2 int) { + r.runstackpos-- + r.runstack[r.runstackpos] = I1 + r.runstackpos-- + r.runstack[r.runstackpos] = I2 +} + +func (r *runner) stackPop() { + r.runstackpos++ +} + +// pop framesize items from the grouping stack +func (r *runner) stackPopN(framesize int) { + r.runstackpos += framesize +} + +// Technically we are actually peeking at items already popped. So if you want to +// get and pop the top item from the stack, you do +// r.stackPop(); +// r.stackPeek(); +func (r *runner) stackPeek() int { + return r.runstack[r.runstackpos-1] +} + +// get the ith element down on the grouping stack +func (r *runner) stackPeekN(i int) int { + return r.runstack[r.runstackpos-i-1] +} + +func (r *runner) operand(i int) int { + return r.code.Codes[r.codepos+i+1] +} + +func (r *runner) leftchars() int { + return r.runtextpos +} + +func (r *runner) rightchars() int { + return r.runtextend - r.runtextpos +} + +func (r *runner) bump() int { + if r.rightToLeft { + return -1 + } + return 1 +} + +func (r *runner) forwardchars() int { + if r.rightToLeft { + return r.runtextpos + } + return r.runtextend - r.runtextpos +} + +func (r *runner) forwardcharnext() rune { + var ch rune + if r.rightToLeft { + r.runtextpos-- + ch = r.runtext[r.runtextpos] + } else { + ch = r.runtext[r.runtextpos] + r.runtextpos++ + } + + if r.caseInsensitive { + return unicode.ToLower(ch) + } + return ch +} + +func (r *runner) runematch(str []rune) bool { + var pos int + + c := len(str) + if !r.rightToLeft { + if r.runtextend-r.runtextpos < c { + return false + } + + pos = r.runtextpos + c + } else { + if r.runtextpos-0 < c { + return false + } + + pos = r.runtextpos + } + + if !r.caseInsensitive { + for c != 0 { + c-- + pos-- + if str[c] != r.runtext[pos] { + return false + } + } + } else { + for c != 0 { + c-- + pos-- + if str[c] != unicode.ToLower(r.runtext[pos]) { + return false + } + } + } + + if !r.rightToLeft { + pos += len(str) + } + + r.runtextpos = pos + + return true +} + +func (r *runner) refmatch(index, len int) bool { + var c, pos, cmpos int + + if !r.rightToLeft { + if r.runtextend-r.runtextpos < len { + return false + } + + pos = r.runtextpos + len + } else { + if r.runtextpos-0 < len { + return false + } + + pos = r.runtextpos + } + cmpos = index + len + + c = len + + if !r.caseInsensitive { + for c != 0 { + c-- + cmpos-- + pos-- + if r.runtext[cmpos] != r.runtext[pos] { + return false + } + + } + } else { + for c != 0 { + c-- + cmpos-- + pos-- + + if unicode.ToLower(r.runtext[cmpos]) != unicode.ToLower(r.runtext[pos]) { + return false + } + } + } + + if !r.rightToLeft { + pos += len + } + + r.runtextpos = pos + + return true +} + +func (r *runner) backwardnext() { + if r.rightToLeft { + r.runtextpos++ + } else { + r.runtextpos-- + } +} + +func (r *runner) charAt(j int) rune { + return r.runtext[j] +} + +func (r *runner) findFirstChar() bool { + + if 0 != (r.code.Anchors & (syntax.AnchorBeginning | syntax.AnchorStart | syntax.AnchorEndZ | syntax.AnchorEnd)) { + if !r.code.RightToLeft { + if (0 != (r.code.Anchors&syntax.AnchorBeginning) && r.runtextpos > 0) || + (0 != (r.code.Anchors&syntax.AnchorStart) && r.runtextpos > r.runtextstart) { + r.runtextpos = r.runtextend + return false + } + if 0 != (r.code.Anchors&syntax.AnchorEndZ) && r.runtextpos < r.runtextend-1 { + r.runtextpos = r.runtextend - 1 + } else if 0 != (r.code.Anchors&syntax.AnchorEnd) && r.runtextpos < r.runtextend { + r.runtextpos = r.runtextend + } + } else { + if (0 != (r.code.Anchors&syntax.AnchorEnd) && r.runtextpos < r.runtextend) || + (0 != (r.code.Anchors&syntax.AnchorEndZ) && (r.runtextpos < r.runtextend-1 || + (r.runtextpos == r.runtextend-1 && r.charAt(r.runtextpos) != '\n'))) || + (0 != (r.code.Anchors&syntax.AnchorStart) && r.runtextpos < r.runtextstart) { + r.runtextpos = 0 + return false + } + if 0 != (r.code.Anchors&syntax.AnchorBeginning) && r.runtextpos > 0 { + r.runtextpos = 0 + } + } + + if r.code.BmPrefix != nil { + return r.code.BmPrefix.IsMatch(r.runtext, r.runtextpos, 0, r.runtextend) + } + + return true // found a valid start or end anchor + } else if r.code.BmPrefix != nil { + r.runtextpos = r.code.BmPrefix.Scan(r.runtext, r.runtextpos, 0, r.runtextend) + + if r.runtextpos == -1 { + if r.code.RightToLeft { + r.runtextpos = 0 + } else { + r.runtextpos = r.runtextend + } + return false + } + + return true + } else if r.code.FcPrefix == nil { + return true + } + + r.rightToLeft = r.code.RightToLeft + r.caseInsensitive = r.code.FcPrefix.CaseInsensitive + + set := r.code.FcPrefix.PrefixSet + if set.IsSingleton() { + ch := set.SingletonChar() + for i := r.forwardchars(); i > 0; i-- { + if ch == r.forwardcharnext() { + r.backwardnext() + return true + } + } + } else { + for i := r.forwardchars(); i > 0; i-- { + n := r.forwardcharnext() + //fmt.Printf("%v in %v: %v\n", string(n), set.String(), set.CharIn(n)) + if set.CharIn(n) { + r.backwardnext() + return true + } + } + } + + return false +} + +func (r *runner) initMatch() { + // Use a hashtable'ed Match object if the capture numbers are sparse + + if r.runmatch == nil { + if r.re.caps != nil { + r.runmatch = newMatchSparse(r.re, r.re.caps, r.re.capsize, r.runtext, r.runtextstart) + } else { + r.runmatch = newMatch(r.re, r.re.capsize, r.runtext, r.runtextstart) + } + } else { + r.runmatch.reset(r.runtext, r.runtextstart) + } + + // note we test runcrawl, because it is the last one to be allocated + // If there is an alloc failure in the middle of the three allocations, + // we may still return to reuse this instance, and we want to behave + // as if the allocations didn't occur. (we used to test _trackcount != 0) + + if r.runcrawl != nil { + r.runtrackpos = len(r.runtrack) + r.runstackpos = len(r.runstack) + r.runcrawlpos = len(r.runcrawl) + return + } + + r.initTrackCount() + + tracksize := r.runtrackcount * 8 + stacksize := r.runtrackcount * 8 + + if tracksize < 32 { + tracksize = 32 + } + if stacksize < 16 { + stacksize = 16 + } + + r.runtrack = make([]int, tracksize) + r.runtrackpos = tracksize + + r.runstack = make([]int, stacksize) + r.runstackpos = stacksize + + r.runcrawl = make([]int, 32) + r.runcrawlpos = 32 +} + +func (r *runner) tidyMatch(quick bool) *Match { + if !quick { + match := r.runmatch + + r.runmatch = nil + + match.tidy(r.runtextpos) + return match + } else { + // send back our match -- it's not leaving the package, so it's safe to not clean it up + // this reduces allocs for frequent calls to the "IsMatch" bool-only functions + return r.runmatch + } +} + +// capture captures a subexpression. Note that the +// capnum used here has already been mapped to a non-sparse +// index (by the code generator RegexWriter). +func (r *runner) capture(capnum, start, end int) { + if end < start { + T := end + end = start + start = T + } + + r.crawl(capnum) + r.runmatch.addMatch(capnum, start, end-start) +} + +// transferCapture captures a subexpression. Note that the +// capnum used here has already been mapped to a non-sparse +// index (by the code generator RegexWriter). +func (r *runner) transferCapture(capnum, uncapnum, start, end int) { + var start2, end2 int + + // these are the two intervals that are cancelling each other + + if end < start { + T := end + end = start + start = T + } + + start2 = r.runmatch.matchIndex(uncapnum) + end2 = start2 + r.runmatch.matchLength(uncapnum) + + // The new capture gets the innermost defined interval + + if start >= end2 { + end = start + start = end2 + } else if end <= start2 { + start = start2 + } else { + if end > end2 { + end = end2 + } + if start2 > start { + start = start2 + } + } + + r.crawl(uncapnum) + r.runmatch.balanceMatch(uncapnum) + + if capnum != -1 { + r.crawl(capnum) + r.runmatch.addMatch(capnum, start, end-start) + } +} + +// revert the last capture +func (r *runner) uncapture() { + capnum := r.popcrawl() + r.runmatch.removeMatch(capnum) +} + +//debug + +func (r *runner) dumpState() { + back := "" + if r.operator&syntax.Back != 0 { + back = " Back" + } + if r.operator&syntax.Back2 != 0 { + back += " Back2" + } + fmt.Printf("Text: %v\nTrack: %v\nStack: %v\n %s%s\n\n", + r.textposDescription(), + r.stackDescription(r.runtrack, r.runtrackpos), + r.stackDescription(r.runstack, r.runstackpos), + r.code.OpcodeDescription(r.codepos), + back) +} + +func (r *runner) stackDescription(a []int, index int) string { + buf := &bytes.Buffer{} + + fmt.Fprintf(buf, "%v/%v", len(a)-index, len(a)) + if buf.Len() < 8 { + buf.WriteString(strings.Repeat(" ", 8-buf.Len())) + } + + buf.WriteRune('(') + for i := index; i < len(a); i++ { + if i > index { + buf.WriteRune(' ') + } + + buf.WriteString(strconv.Itoa(a[i])) + } + + buf.WriteRune(')') + + return buf.String() +} + +func (r *runner) textposDescription() string { + buf := &bytes.Buffer{} + + buf.WriteString(strconv.Itoa(r.runtextpos)) + + if buf.Len() < 8 { + buf.WriteString(strings.Repeat(" ", 8-buf.Len())) + } + + if r.runtextpos > 0 { + buf.WriteString(syntax.CharDescription(r.runtext[r.runtextpos-1])) + } else { + buf.WriteRune('^') + } + + buf.WriteRune('>') + + for i := r.runtextpos; i < r.runtextend; i++ { + buf.WriteString(syntax.CharDescription(r.runtext[i])) + } + if buf.Len() >= 64 { + buf.Truncate(61) + buf.WriteString("...") + } else { + buf.WriteRune('$') + } + + return buf.String() +} + +// decide whether the pos +// at the specified index is a boundary or not. It's just not worth +// emitting inline code for this logic. +func (r *runner) isBoundary(index, startpos, endpos int) bool { + return (index > startpos && syntax.IsWordChar(r.runtext[index-1])) != + (index < endpos && syntax.IsWordChar(r.runtext[index])) +} + +func (r *runner) isECMABoundary(index, startpos, endpos int) bool { + return (index > startpos && syntax.IsECMAWordChar(r.runtext[index-1])) != + (index < endpos && syntax.IsECMAWordChar(r.runtext[index])) +} + +func (r *runner) startTimeoutWatch() { + if r.ignoreTimeout { + return + } + r.deadline = makeDeadline(r.timeout) +} + +func (r *runner) checkTimeout() error { + if r.ignoreTimeout || !r.deadline.reached() { + return nil + } + + if r.re.Debug() { + //Debug.WriteLine("") + //Debug.WriteLine("RegEx match timeout occurred!") + //Debug.WriteLine("Specified timeout: " + TimeSpan.FromMilliseconds(_timeout).ToString()) + //Debug.WriteLine("Timeout check frequency: " + TimeoutCheckFrequency) + //Debug.WriteLine("Search pattern: " + _runregex._pattern) + //Debug.WriteLine("Input: " + r.runtext) + //Debug.WriteLine("About to throw RegexMatchTimeoutException.") + } + + return fmt.Errorf("match timeout after %v on input `%v`", r.timeout, string(r.runtext)) +} + +func (r *runner) initTrackCount() { + r.runtrackcount = r.code.TrackCount +} + +// getRunner returns a run to use for matching re. +// It uses the re's runner cache if possible, to avoid +// unnecessary allocation. +func (re *Regexp) getRunner() *runner { + re.muRun.Lock() + if n := len(re.runner); n > 0 { + z := re.runner[n-1] + re.runner = re.runner[:n-1] + re.muRun.Unlock() + return z + } + re.muRun.Unlock() + z := &runner{ + re: re, + code: re.code, + } + return z +} + +// putRunner returns a runner to the re's cache. +// There is no attempt to limit the size of the cache, so it will +// grow to the maximum number of simultaneous matches +// run using re. (The cache empties when re gets garbage collected.) +func (re *Regexp) putRunner(r *runner) { + re.muRun.Lock() + r.runtext = nil + if r.runmatch != nil { + r.runmatch.text = nil + } + re.runner = append(re.runner, r) + re.muRun.Unlock() +} diff --git a/vendor/github.com/dlclark/regexp2/syntax/charclass.go b/vendor/github.com/dlclark/regexp2/syntax/charclass.go new file mode 100644 index 0000000000..6881a0e297 --- /dev/null +++ b/vendor/github.com/dlclark/regexp2/syntax/charclass.go @@ -0,0 +1,865 @@ +package syntax + +import ( + "bytes" + "encoding/binary" + "fmt" + "sort" + "unicode" + "unicode/utf8" +) + +// CharSet combines start-end rune ranges and unicode categories representing a set of characters +type CharSet struct { + ranges []singleRange + categories []category + sub *CharSet //optional subtractor + negate bool + anything bool +} + +type category struct { + negate bool + cat string +} + +type singleRange struct { + first rune + last rune +} + +const ( + spaceCategoryText = " " + wordCategoryText = "W" +) + +var ( + ecmaSpace = []rune{0x0009, 0x000e, 0x0020, 0x0021, 0x00a0, 0x00a1, 0x1680, 0x1681, 0x2000, 0x200b, 0x2028, 0x202a, 0x202f, 0x2030, 0x205f, 0x2060, 0x3000, 0x3001, 0xfeff, 0xff00} + ecmaWord = []rune{0x0030, 0x003a, 0x0041, 0x005b, 0x005f, 0x0060, 0x0061, 0x007b} + ecmaDigit = []rune{0x0030, 0x003a} + + re2Space = []rune{0x0009, 0x000b, 0x000c, 0x000e, 0x0020, 0x0021} +) + +var ( + AnyClass = getCharSetFromOldString([]rune{0}, false) + ECMAAnyClass = getCharSetFromOldString([]rune{0, 0x000a, 0x000b, 0x000d, 0x000e}, false) + NoneClass = getCharSetFromOldString(nil, false) + ECMAWordClass = getCharSetFromOldString(ecmaWord, false) + NotECMAWordClass = getCharSetFromOldString(ecmaWord, true) + ECMASpaceClass = getCharSetFromOldString(ecmaSpace, false) + NotECMASpaceClass = getCharSetFromOldString(ecmaSpace, true) + ECMADigitClass = getCharSetFromOldString(ecmaDigit, false) + NotECMADigitClass = getCharSetFromOldString(ecmaDigit, true) + + WordClass = getCharSetFromCategoryString(false, false, wordCategoryText) + NotWordClass = getCharSetFromCategoryString(true, false, wordCategoryText) + SpaceClass = getCharSetFromCategoryString(false, false, spaceCategoryText) + NotSpaceClass = getCharSetFromCategoryString(true, false, spaceCategoryText) + DigitClass = getCharSetFromCategoryString(false, false, "Nd") + NotDigitClass = getCharSetFromCategoryString(false, true, "Nd") + + RE2SpaceClass = getCharSetFromOldString(re2Space, false) + NotRE2SpaceClass = getCharSetFromOldString(re2Space, true) +) + +var unicodeCategories = func() map[string]*unicode.RangeTable { + retVal := make(map[string]*unicode.RangeTable) + for k, v := range unicode.Scripts { + retVal[k] = v + } + for k, v := range unicode.Categories { + retVal[k] = v + } + for k, v := range unicode.Properties { + retVal[k] = v + } + return retVal +}() + +func getCharSetFromCategoryString(negateSet bool, negateCat bool, cats ...string) func() *CharSet { + if negateCat && negateSet { + panic("BUG! You should only negate the set OR the category in a constant setup, but not both") + } + + c := CharSet{negate: negateSet} + + c.categories = make([]category, len(cats)) + for i, cat := range cats { + c.categories[i] = category{cat: cat, negate: negateCat} + } + return func() *CharSet { + //make a copy each time + local := c + //return that address + return &local + } +} + +func getCharSetFromOldString(setText []rune, negate bool) func() *CharSet { + c := CharSet{} + if len(setText) > 0 { + fillFirst := false + l := len(setText) + if negate { + if setText[0] == 0 { + setText = setText[1:] + } else { + l++ + fillFirst = true + } + } + + if l%2 == 0 { + c.ranges = make([]singleRange, l/2) + } else { + c.ranges = make([]singleRange, l/2+1) + } + + first := true + if fillFirst { + c.ranges[0] = singleRange{first: 0} + first = false + } + + i := 0 + for _, r := range setText { + if first { + // lower bound in a new range + c.ranges[i] = singleRange{first: r} + first = false + } else { + c.ranges[i].last = r - 1 + i++ + first = true + } + } + if !first { + c.ranges[i].last = utf8.MaxRune + } + } + + return func() *CharSet { + local := c + return &local + } +} + +// Copy makes a deep copy to prevent accidental mutation of a set +func (c CharSet) Copy() CharSet { + ret := CharSet{ + anything: c.anything, + negate: c.negate, + } + + ret.ranges = append(ret.ranges, c.ranges...) + ret.categories = append(ret.categories, c.categories...) + + if c.sub != nil { + sub := c.sub.Copy() + ret.sub = &sub + } + + return ret +} + +// gets a human-readable description for a set string +func (c CharSet) String() string { + buf := &bytes.Buffer{} + buf.WriteRune('[') + + if c.IsNegated() { + buf.WriteRune('^') + } + + for _, r := range c.ranges { + + buf.WriteString(CharDescription(r.first)) + if r.first != r.last { + if r.last-r.first != 1 { + //groups that are 1 char apart skip the dash + buf.WriteRune('-') + } + buf.WriteString(CharDescription(r.last)) + } + } + + for _, c := range c.categories { + buf.WriteString(c.String()) + } + + if c.sub != nil { + buf.WriteRune('-') + buf.WriteString(c.sub.String()) + } + + buf.WriteRune(']') + + return buf.String() +} + +// mapHashFill converts a charset into a buffer for use in maps +func (c CharSet) mapHashFill(buf *bytes.Buffer) { + if c.negate { + buf.WriteByte(0) + } else { + buf.WriteByte(1) + } + + binary.Write(buf, binary.LittleEndian, len(c.ranges)) + binary.Write(buf, binary.LittleEndian, len(c.categories)) + for _, r := range c.ranges { + buf.WriteRune(r.first) + buf.WriteRune(r.last) + } + for _, ct := range c.categories { + buf.WriteString(ct.cat) + if ct.negate { + buf.WriteByte(1) + } else { + buf.WriteByte(0) + } + } + + if c.sub != nil { + c.sub.mapHashFill(buf) + } +} + +// CharIn returns true if the rune is in our character set (either ranges or categories). +// It handles negations and subtracted sub-charsets. +func (c CharSet) CharIn(ch rune) bool { + val := false + // in s && !s.subtracted + + //check ranges + for _, r := range c.ranges { + if ch < r.first { + continue + } + if ch <= r.last { + val = true + break + } + } + + //check categories if we haven't already found a range + if !val && len(c.categories) > 0 { + for _, ct := range c.categories { + // special categories...then unicode + if ct.cat == spaceCategoryText { + if unicode.IsSpace(ch) { + // we found a space so we're done + // negate means this is a "bad" thing + val = !ct.negate + break + } else if ct.negate { + val = true + break + } + } else if ct.cat == wordCategoryText { + if IsWordChar(ch) { + val = !ct.negate + break + } else if ct.negate { + val = true + break + } + } else if unicode.Is(unicodeCategories[ct.cat], ch) { + // if we're in this unicode category then we're done + // if negate=true on this category then we "failed" our test + // otherwise we're good that we found it + val = !ct.negate + break + } else if ct.negate { + val = true + break + } + } + } + + // negate the whole char set + if c.negate { + val = !val + } + + // get subtracted recurse + if val && c.sub != nil { + val = !c.sub.CharIn(ch) + } + + //log.Printf("Char '%v' in %v == %v", string(ch), c.String(), val) + return val +} + +func (c category) String() string { + switch c.cat { + case spaceCategoryText: + if c.negate { + return "\\S" + } + return "\\s" + case wordCategoryText: + if c.negate { + return "\\W" + } + return "\\w" + } + if _, ok := unicodeCategories[c.cat]; ok { + + if c.negate { + return "\\P{" + c.cat + "}" + } + return "\\p{" + c.cat + "}" + } + return "Unknown category: " + c.cat +} + +// CharDescription Produces a human-readable description for a single character. +func CharDescription(ch rune) string { + /*if ch == '\\' { + return "\\\\" + } + + if ch > ' ' && ch <= '~' { + return string(ch) + } else if ch == '\n' { + return "\\n" + } else if ch == ' ' { + return "\\ " + }*/ + + b := &bytes.Buffer{} + escape(b, ch, false) //fmt.Sprintf("%U", ch) + return b.String() +} + +// According to UTS#18 Unicode Regular Expressions (http://www.unicode.org/reports/tr18/) +// RL 1.4 Simple Word Boundaries The class of includes all Alphabetic +// values from the Unicode character database, from UnicodeData.txt [UData], plus the U+200C +// ZERO WIDTH NON-JOINER and U+200D ZERO WIDTH JOINER. +func IsWordChar(r rune) bool { + //"L", "Mn", "Nd", "Pc" + return unicode.In(r, + unicode.Categories["L"], unicode.Categories["Mn"], + unicode.Categories["Nd"], unicode.Categories["Pc"]) || r == '\u200D' || r == '\u200C' + //return 'A' <= r && r <= 'Z' || 'a' <= r && r <= 'z' || '0' <= r && r <= '9' || r == '_' +} + +func IsECMAWordChar(r rune) bool { + return unicode.In(r, + unicode.Categories["L"], unicode.Categories["Mn"], + unicode.Categories["Nd"], unicode.Categories["Pc"]) + + //return 'A' <= r && r <= 'Z' || 'a' <= r && r <= 'z' || '0' <= r && r <= '9' || r == '_' +} + +// SingletonChar will return the char from the first range without validation. +// It assumes you have checked for IsSingleton or IsSingletonInverse and will panic given bad input +func (c CharSet) SingletonChar() rune { + return c.ranges[0].first +} + +func (c CharSet) IsSingleton() bool { + return !c.negate && //negated is multiple chars + len(c.categories) == 0 && len(c.ranges) == 1 && // multiple ranges and unicode classes represent multiple chars + c.sub == nil && // subtraction means we've got multiple chars + c.ranges[0].first == c.ranges[0].last // first and last equal means we're just 1 char +} + +func (c CharSet) IsSingletonInverse() bool { + return c.negate && //same as above, but requires negated + len(c.categories) == 0 && len(c.ranges) == 1 && // multiple ranges and unicode classes represent multiple chars + c.sub == nil && // subtraction means we've got multiple chars + c.ranges[0].first == c.ranges[0].last // first and last equal means we're just 1 char +} + +func (c CharSet) IsMergeable() bool { + return !c.IsNegated() && !c.HasSubtraction() +} + +func (c CharSet) IsNegated() bool { + return c.negate +} + +func (c CharSet) HasSubtraction() bool { + return c.sub != nil +} + +func (c CharSet) IsEmpty() bool { + return len(c.ranges) == 0 && len(c.categories) == 0 && c.sub == nil +} + +func (c *CharSet) addDigit(ecma, negate bool, pattern string) { + if ecma { + if negate { + c.addRanges(NotECMADigitClass().ranges) + } else { + c.addRanges(ECMADigitClass().ranges) + } + } else { + c.addCategories(category{cat: "Nd", negate: negate}) + } +} + +func (c *CharSet) addChar(ch rune) { + c.addRange(ch, ch) +} + +func (c *CharSet) addSpace(ecma, re2, negate bool) { + if ecma { + if negate { + c.addRanges(NotECMASpaceClass().ranges) + } else { + c.addRanges(ECMASpaceClass().ranges) + } + } else if re2 { + if negate { + c.addRanges(NotRE2SpaceClass().ranges) + } else { + c.addRanges(RE2SpaceClass().ranges) + } + } else { + c.addCategories(category{cat: spaceCategoryText, negate: negate}) + } +} + +func (c *CharSet) addWord(ecma, negate bool) { + if ecma { + if negate { + c.addRanges(NotECMAWordClass().ranges) + } else { + c.addRanges(ECMAWordClass().ranges) + } + } else { + c.addCategories(category{cat: wordCategoryText, negate: negate}) + } +} + +// Add set ranges and categories into ours -- no deduping or anything +func (c *CharSet) addSet(set CharSet) { + if c.anything { + return + } + if set.anything { + c.makeAnything() + return + } + // just append here to prevent double-canon + c.ranges = append(c.ranges, set.ranges...) + c.addCategories(set.categories...) + c.canonicalize() +} + +func (c *CharSet) makeAnything() { + c.anything = true + c.categories = []category{} + c.ranges = AnyClass().ranges +} + +func (c *CharSet) addCategories(cats ...category) { + // don't add dupes and remove positive+negative + if c.anything { + // if we've had a previous positive+negative group then + // just return, we're as broad as we can get + return + } + + for _, ct := range cats { + found := false + for _, ct2 := range c.categories { + if ct.cat == ct2.cat { + if ct.negate != ct2.negate { + // oposite negations...this mean we just + // take us as anything and move on + c.makeAnything() + return + } + found = true + break + } + } + + if !found { + c.categories = append(c.categories, ct) + } + } +} + +// Merges new ranges to our own +func (c *CharSet) addRanges(ranges []singleRange) { + if c.anything { + return + } + c.ranges = append(c.ranges, ranges...) + c.canonicalize() +} + +// Merges everything but the new ranges into our own +func (c *CharSet) addNegativeRanges(ranges []singleRange) { + if c.anything { + return + } + + var hi rune + + // convert incoming ranges into opposites, assume they are in order + for _, r := range ranges { + if hi < r.first { + c.ranges = append(c.ranges, singleRange{hi, r.first - 1}) + } + hi = r.last + 1 + } + + if hi < utf8.MaxRune { + c.ranges = append(c.ranges, singleRange{hi, utf8.MaxRune}) + } + + c.canonicalize() +} + +func isValidUnicodeCat(catName string) bool { + _, ok := unicodeCategories[catName] + return ok +} + +func (c *CharSet) addCategory(categoryName string, negate, caseInsensitive bool, pattern string) { + if !isValidUnicodeCat(categoryName) { + // unknown unicode category, script, or property "blah" + panic(fmt.Errorf("Unknown unicode category, script, or property '%v'", categoryName)) + + } + + if caseInsensitive && (categoryName == "Ll" || categoryName == "Lu" || categoryName == "Lt") { + // when RegexOptions.IgnoreCase is specified then {Ll} {Lu} and {Lt} cases should all match + c.addCategories( + category{cat: "Ll", negate: negate}, + category{cat: "Lu", negate: negate}, + category{cat: "Lt", negate: negate}) + } + c.addCategories(category{cat: categoryName, negate: negate}) +} + +func (c *CharSet) addSubtraction(sub *CharSet) { + c.sub = sub +} + +func (c *CharSet) addRange(chMin, chMax rune) { + c.ranges = append(c.ranges, singleRange{first: chMin, last: chMax}) + c.canonicalize() +} + +func (c *CharSet) addNamedASCII(name string, negate bool) bool { + var rs []singleRange + + switch name { + case "alnum": + rs = []singleRange{singleRange{'0', '9'}, singleRange{'A', 'Z'}, singleRange{'a', 'z'}} + case "alpha": + rs = []singleRange{singleRange{'A', 'Z'}, singleRange{'a', 'z'}} + case "ascii": + rs = []singleRange{singleRange{0, 0x7f}} + case "blank": + rs = []singleRange{singleRange{'\t', '\t'}, singleRange{' ', ' '}} + case "cntrl": + rs = []singleRange{singleRange{0, 0x1f}, singleRange{0x7f, 0x7f}} + case "digit": + c.addDigit(false, negate, "") + case "graph": + rs = []singleRange{singleRange{'!', '~'}} + case "lower": + rs = []singleRange{singleRange{'a', 'z'}} + case "print": + rs = []singleRange{singleRange{' ', '~'}} + case "punct": //[!-/:-@[-`{-~] + rs = []singleRange{singleRange{'!', '/'}, singleRange{':', '@'}, singleRange{'[', '`'}, singleRange{'{', '~'}} + case "space": + c.addSpace(true, false, negate) + case "upper": + rs = []singleRange{singleRange{'A', 'Z'}} + case "word": + c.addWord(true, negate) + case "xdigit": + rs = []singleRange{singleRange{'0', '9'}, singleRange{'A', 'F'}, singleRange{'a', 'f'}} + default: + return false + } + + if len(rs) > 0 { + if negate { + c.addNegativeRanges(rs) + } else { + c.addRanges(rs) + } + } + + return true +} + +type singleRangeSorter []singleRange + +func (p singleRangeSorter) Len() int { return len(p) } +func (p singleRangeSorter) Less(i, j int) bool { return p[i].first < p[j].first } +func (p singleRangeSorter) Swap(i, j int) { p[i], p[j] = p[j], p[i] } + +// Logic to reduce a character class to a unique, sorted form. +func (c *CharSet) canonicalize() { + var i, j int + var last rune + + // + // Find and eliminate overlapping or abutting ranges + // + + if len(c.ranges) > 1 { + sort.Sort(singleRangeSorter(c.ranges)) + + done := false + + for i, j = 1, 0; ; i++ { + for last = c.ranges[j].last; ; i++ { + if i == len(c.ranges) || last == utf8.MaxRune { + done = true + break + } + + CurrentRange := c.ranges[i] + if CurrentRange.first > last+1 { + break + } + + if last < CurrentRange.last { + last = CurrentRange.last + } + } + + c.ranges[j] = singleRange{first: c.ranges[j].first, last: last} + + j++ + + if done { + break + } + + if j < i { + c.ranges[j] = c.ranges[i] + } + } + + c.ranges = append(c.ranges[:j], c.ranges[len(c.ranges):]...) + } +} + +// Adds to the class any lowercase versions of characters already +// in the class. Used for case-insensitivity. +func (c *CharSet) addLowercase() { + if c.anything { + return + } + toAdd := []singleRange{} + for i := 0; i < len(c.ranges); i++ { + r := c.ranges[i] + if r.first == r.last { + lower := unicode.ToLower(r.first) + c.ranges[i] = singleRange{first: lower, last: lower} + } else { + toAdd = append(toAdd, r) + } + } + + for _, r := range toAdd { + c.addLowercaseRange(r.first, r.last) + } + c.canonicalize() +} + +/************************************************************************** + Let U be the set of Unicode character values and let L be the lowercase + function, mapping from U to U. To perform case insensitive matching of + character sets, we need to be able to map an interval I in U, say + + I = [chMin, chMax] = { ch : chMin <= ch <= chMax } + + to a set A such that A contains L(I) and A is contained in the union of + I and L(I). + + The table below partitions U into intervals on which L is non-decreasing. + Thus, for any interval J = [a, b] contained in one of these intervals, + L(J) is contained in [L(a), L(b)]. + + It is also true that for any such J, [L(a), L(b)] is contained in the + union of J and L(J). This does not follow from L being non-decreasing on + these intervals. It follows from the nature of the L on each interval. + On each interval, L has one of the following forms: + + (1) L(ch) = constant (LowercaseSet) + (2) L(ch) = ch + offset (LowercaseAdd) + (3) L(ch) = ch | 1 (LowercaseBor) + (4) L(ch) = ch + (ch & 1) (LowercaseBad) + + It is easy to verify that for any of these forms [L(a), L(b)] is + contained in the union of [a, b] and L([a, b]). +***************************************************************************/ + +const ( + LowercaseSet = 0 // Set to arg. + LowercaseAdd = 1 // Add arg. + LowercaseBor = 2 // Bitwise or with 1. + LowercaseBad = 3 // Bitwise and with 1 and add original. +) + +type lcMap struct { + chMin, chMax rune + op, data int32 +} + +var lcTable = []lcMap{ + lcMap{'\u0041', '\u005A', LowercaseAdd, 32}, + lcMap{'\u00C0', '\u00DE', LowercaseAdd, 32}, + lcMap{'\u0100', '\u012E', LowercaseBor, 0}, + lcMap{'\u0130', '\u0130', LowercaseSet, 0x0069}, + lcMap{'\u0132', '\u0136', LowercaseBor, 0}, + lcMap{'\u0139', '\u0147', LowercaseBad, 0}, + lcMap{'\u014A', '\u0176', LowercaseBor, 0}, + lcMap{'\u0178', '\u0178', LowercaseSet, 0x00FF}, + lcMap{'\u0179', '\u017D', LowercaseBad, 0}, + lcMap{'\u0181', '\u0181', LowercaseSet, 0x0253}, + lcMap{'\u0182', '\u0184', LowercaseBor, 0}, + lcMap{'\u0186', '\u0186', LowercaseSet, 0x0254}, + lcMap{'\u0187', '\u0187', LowercaseSet, 0x0188}, + lcMap{'\u0189', '\u018A', LowercaseAdd, 205}, + lcMap{'\u018B', '\u018B', LowercaseSet, 0x018C}, + lcMap{'\u018E', '\u018E', LowercaseSet, 0x01DD}, + lcMap{'\u018F', '\u018F', LowercaseSet, 0x0259}, + lcMap{'\u0190', '\u0190', LowercaseSet, 0x025B}, + lcMap{'\u0191', '\u0191', LowercaseSet, 0x0192}, + lcMap{'\u0193', '\u0193', LowercaseSet, 0x0260}, + lcMap{'\u0194', '\u0194', LowercaseSet, 0x0263}, + lcMap{'\u0196', '\u0196', LowercaseSet, 0x0269}, + lcMap{'\u0197', '\u0197', LowercaseSet, 0x0268}, + lcMap{'\u0198', '\u0198', LowercaseSet, 0x0199}, + lcMap{'\u019C', '\u019C', LowercaseSet, 0x026F}, + lcMap{'\u019D', '\u019D', LowercaseSet, 0x0272}, + lcMap{'\u019F', '\u019F', LowercaseSet, 0x0275}, + lcMap{'\u01A0', '\u01A4', LowercaseBor, 0}, + lcMap{'\u01A7', '\u01A7', LowercaseSet, 0x01A8}, + lcMap{'\u01A9', '\u01A9', LowercaseSet, 0x0283}, + lcMap{'\u01AC', '\u01AC', LowercaseSet, 0x01AD}, + lcMap{'\u01AE', '\u01AE', LowercaseSet, 0x0288}, + lcMap{'\u01AF', '\u01AF', LowercaseSet, 0x01B0}, + lcMap{'\u01B1', '\u01B2', LowercaseAdd, 217}, + lcMap{'\u01B3', '\u01B5', LowercaseBad, 0}, + lcMap{'\u01B7', '\u01B7', LowercaseSet, 0x0292}, + lcMap{'\u01B8', '\u01B8', LowercaseSet, 0x01B9}, + lcMap{'\u01BC', '\u01BC', LowercaseSet, 0x01BD}, + lcMap{'\u01C4', '\u01C5', LowercaseSet, 0x01C6}, + lcMap{'\u01C7', '\u01C8', LowercaseSet, 0x01C9}, + lcMap{'\u01CA', '\u01CB', LowercaseSet, 0x01CC}, + lcMap{'\u01CD', '\u01DB', LowercaseBad, 0}, + lcMap{'\u01DE', '\u01EE', LowercaseBor, 0}, + lcMap{'\u01F1', '\u01F2', LowercaseSet, 0x01F3}, + lcMap{'\u01F4', '\u01F4', LowercaseSet, 0x01F5}, + lcMap{'\u01FA', '\u0216', LowercaseBor, 0}, + lcMap{'\u0386', '\u0386', LowercaseSet, 0x03AC}, + lcMap{'\u0388', '\u038A', LowercaseAdd, 37}, + lcMap{'\u038C', '\u038C', LowercaseSet, 0x03CC}, + lcMap{'\u038E', '\u038F', LowercaseAdd, 63}, + lcMap{'\u0391', '\u03AB', LowercaseAdd, 32}, + lcMap{'\u03E2', '\u03EE', LowercaseBor, 0}, + lcMap{'\u0401', '\u040F', LowercaseAdd, 80}, + lcMap{'\u0410', '\u042F', LowercaseAdd, 32}, + lcMap{'\u0460', '\u0480', LowercaseBor, 0}, + lcMap{'\u0490', '\u04BE', LowercaseBor, 0}, + lcMap{'\u04C1', '\u04C3', LowercaseBad, 0}, + lcMap{'\u04C7', '\u04C7', LowercaseSet, 0x04C8}, + lcMap{'\u04CB', '\u04CB', LowercaseSet, 0x04CC}, + lcMap{'\u04D0', '\u04EA', LowercaseBor, 0}, + lcMap{'\u04EE', '\u04F4', LowercaseBor, 0}, + lcMap{'\u04F8', '\u04F8', LowercaseSet, 0x04F9}, + lcMap{'\u0531', '\u0556', LowercaseAdd, 48}, + lcMap{'\u10A0', '\u10C5', LowercaseAdd, 48}, + lcMap{'\u1E00', '\u1EF8', LowercaseBor, 0}, + lcMap{'\u1F08', '\u1F0F', LowercaseAdd, -8}, + lcMap{'\u1F18', '\u1F1F', LowercaseAdd, -8}, + lcMap{'\u1F28', '\u1F2F', LowercaseAdd, -8}, + lcMap{'\u1F38', '\u1F3F', LowercaseAdd, -8}, + lcMap{'\u1F48', '\u1F4D', LowercaseAdd, -8}, + lcMap{'\u1F59', '\u1F59', LowercaseSet, 0x1F51}, + lcMap{'\u1F5B', '\u1F5B', LowercaseSet, 0x1F53}, + lcMap{'\u1F5D', '\u1F5D', LowercaseSet, 0x1F55}, + lcMap{'\u1F5F', '\u1F5F', LowercaseSet, 0x1F57}, + lcMap{'\u1F68', '\u1F6F', LowercaseAdd, -8}, + lcMap{'\u1F88', '\u1F8F', LowercaseAdd, -8}, + lcMap{'\u1F98', '\u1F9F', LowercaseAdd, -8}, + lcMap{'\u1FA8', '\u1FAF', LowercaseAdd, -8}, + lcMap{'\u1FB8', '\u1FB9', LowercaseAdd, -8}, + lcMap{'\u1FBA', '\u1FBB', LowercaseAdd, -74}, + lcMap{'\u1FBC', '\u1FBC', LowercaseSet, 0x1FB3}, + lcMap{'\u1FC8', '\u1FCB', LowercaseAdd, -86}, + lcMap{'\u1FCC', '\u1FCC', LowercaseSet, 0x1FC3}, + lcMap{'\u1FD8', '\u1FD9', LowercaseAdd, -8}, + lcMap{'\u1FDA', '\u1FDB', LowercaseAdd, -100}, + lcMap{'\u1FE8', '\u1FE9', LowercaseAdd, -8}, + lcMap{'\u1FEA', '\u1FEB', LowercaseAdd, -112}, + lcMap{'\u1FEC', '\u1FEC', LowercaseSet, 0x1FE5}, + lcMap{'\u1FF8', '\u1FF9', LowercaseAdd, -128}, + lcMap{'\u1FFA', '\u1FFB', LowercaseAdd, -126}, + lcMap{'\u1FFC', '\u1FFC', LowercaseSet, 0x1FF3}, + lcMap{'\u2160', '\u216F', LowercaseAdd, 16}, + lcMap{'\u24B6', '\u24D0', LowercaseAdd, 26}, + lcMap{'\uFF21', '\uFF3A', LowercaseAdd, 32}, +} + +func (c *CharSet) addLowercaseRange(chMin, chMax rune) { + var i, iMax, iMid int + var chMinT, chMaxT rune + var lc lcMap + + for i, iMax = 0, len(lcTable); i < iMax; { + iMid = (i + iMax) / 2 + if lcTable[iMid].chMax < chMin { + i = iMid + 1 + } else { + iMax = iMid + } + } + + for ; i < len(lcTable); i++ { + lc = lcTable[i] + if lc.chMin > chMax { + return + } + chMinT = lc.chMin + if chMinT < chMin { + chMinT = chMin + } + + chMaxT = lc.chMax + if chMaxT > chMax { + chMaxT = chMax + } + + switch lc.op { + case LowercaseSet: + chMinT = rune(lc.data) + chMaxT = rune(lc.data) + break + case LowercaseAdd: + chMinT += lc.data + chMaxT += lc.data + break + case LowercaseBor: + chMinT |= 1 + chMaxT |= 1 + break + case LowercaseBad: + chMinT += (chMinT & 1) + chMaxT += (chMaxT & 1) + break + } + + if chMinT < chMin || chMaxT > chMax { + c.addRange(chMinT, chMaxT) + } + } +} diff --git a/vendor/github.com/dlclark/regexp2/syntax/code.go b/vendor/github.com/dlclark/regexp2/syntax/code.go new file mode 100644 index 0000000000..686e822af8 --- /dev/null +++ b/vendor/github.com/dlclark/regexp2/syntax/code.go @@ -0,0 +1,274 @@ +package syntax + +import ( + "bytes" + "fmt" + "math" +) + +// similar to prog.go in the go regex package...also with comment 'may not belong in this package' + +// File provides operator constants for use by the Builder and the Machine. + +// Implementation notes: +// +// Regexps are built into RegexCodes, which contain an operation array, +// a string table, and some constants. +// +// Each operation is one of the codes below, followed by the integer +// operands specified for each op. +// +// Strings and sets are indices into a string table. + +type InstOp int + +const ( + // lef/back operands description + + Onerep InstOp = 0 // lef,back char,min,max a {n} + Notonerep = 1 // lef,back char,min,max .{n} + Setrep = 2 // lef,back set,min,max [\d]{n} + + Oneloop = 3 // lef,back char,min,max a {,n} + Notoneloop = 4 // lef,back char,min,max .{,n} + Setloop = 5 // lef,back set,min,max [\d]{,n} + + Onelazy = 6 // lef,back char,min,max a {,n}? + Notonelazy = 7 // lef,back char,min,max .{,n}? + Setlazy = 8 // lef,back set,min,max [\d]{,n}? + + One = 9 // lef char a + Notone = 10 // lef char [^a] + Set = 11 // lef set [a-z\s] \w \s \d + + Multi = 12 // lef string abcd + Ref = 13 // lef group \# + + Bol = 14 // ^ + Eol = 15 // $ + Boundary = 16 // \b + Nonboundary = 17 // \B + Beginning = 18 // \A + Start = 19 // \G + EndZ = 20 // \Z + End = 21 // \Z + + Nothing = 22 // Reject! + + // Primitive control structures + + Lazybranch = 23 // back jump straight first + Branchmark = 24 // back jump branch first for loop + Lazybranchmark = 25 // back jump straight first for loop + Nullcount = 26 // back val set counter, null mark + Setcount = 27 // back val set counter, make mark + Branchcount = 28 // back jump,limit branch++ if zero<=c impl group slots + Capsize int // number of impl group slots + FcPrefix *Prefix // the set of candidate first characters (may be null) + BmPrefix *BmPrefix // the fixed prefix string as a Boyer-Moore machine (may be null) + Anchors AnchorLoc // the set of zero-length start anchors (RegexFCD.Bol, etc) + RightToLeft bool // true if right to left +} + +func opcodeBacktracks(op InstOp) bool { + op &= Mask + + switch op { + case Oneloop, Notoneloop, Setloop, Onelazy, Notonelazy, Setlazy, Lazybranch, Branchmark, Lazybranchmark, + Nullcount, Setcount, Branchcount, Lazybranchcount, Setmark, Capturemark, Getmark, Setjump, Backjump, + Forejump, Goto: + return true + + default: + return false + } +} + +func opcodeSize(op InstOp) int { + op &= Mask + + switch op { + case Nothing, Bol, Eol, Boundary, Nonboundary, ECMABoundary, NonECMABoundary, Beginning, Start, EndZ, + End, Nullmark, Setmark, Getmark, Setjump, Backjump, Forejump, Stop: + return 1 + + case One, Notone, Multi, Ref, Testref, Goto, Nullcount, Setcount, Lazybranch, Branchmark, Lazybranchmark, + Prune, Set: + return 2 + + case Capturemark, Branchcount, Lazybranchcount, Onerep, Notonerep, Oneloop, Notoneloop, Onelazy, Notonelazy, + Setlazy, Setrep, Setloop: + return 3 + + default: + panic(fmt.Errorf("Unexpected op code: %v", op)) + } +} + +var codeStr = []string{ + "Onerep", "Notonerep", "Setrep", + "Oneloop", "Notoneloop", "Setloop", + "Onelazy", "Notonelazy", "Setlazy", + "One", "Notone", "Set", + "Multi", "Ref", + "Bol", "Eol", "Boundary", "Nonboundary", "Beginning", "Start", "EndZ", "End", + "Nothing", + "Lazybranch", "Branchmark", "Lazybranchmark", + "Nullcount", "Setcount", "Branchcount", "Lazybranchcount", + "Nullmark", "Setmark", "Capturemark", "Getmark", + "Setjump", "Backjump", "Forejump", "Testref", "Goto", + "Prune", "Stop", + "ECMABoundary", "NonECMABoundary", +} + +func operatorDescription(op InstOp) string { + desc := codeStr[op&Mask] + if (op & Ci) != 0 { + desc += "-Ci" + } + if (op & Rtl) != 0 { + desc += "-Rtl" + } + if (op & Back) != 0 { + desc += "-Back" + } + if (op & Back2) != 0 { + desc += "-Back2" + } + + return desc +} + +// OpcodeDescription is a humman readable string of the specific offset +func (c *Code) OpcodeDescription(offset int) string { + buf := &bytes.Buffer{} + + op := InstOp(c.Codes[offset]) + fmt.Fprintf(buf, "%06d ", offset) + + if opcodeBacktracks(op & Mask) { + buf.WriteString("*") + } else { + buf.WriteString(" ") + } + buf.WriteString(operatorDescription(op)) + buf.WriteString("(") + op &= Mask + + switch op { + case One, Notone, Onerep, Notonerep, Oneloop, Notoneloop, Onelazy, Notonelazy: + buf.WriteString("Ch = ") + buf.WriteString(CharDescription(rune(c.Codes[offset+1]))) + + case Set, Setrep, Setloop, Setlazy: + buf.WriteString("Set = ") + buf.WriteString(c.Sets[c.Codes[offset+1]].String()) + + case Multi: + fmt.Fprintf(buf, "String = %s", string(c.Strings[c.Codes[offset+1]])) + + case Ref, Testref: + fmt.Fprintf(buf, "Index = %d", c.Codes[offset+1]) + + case Capturemark: + fmt.Fprintf(buf, "Index = %d", c.Codes[offset+1]) + if c.Codes[offset+2] != -1 { + fmt.Fprintf(buf, ", Unindex = %d", c.Codes[offset+2]) + } + + case Nullcount, Setcount: + fmt.Fprintf(buf, "Value = %d", c.Codes[offset+1]) + + case Goto, Lazybranch, Branchmark, Lazybranchmark, Branchcount, Lazybranchcount: + fmt.Fprintf(buf, "Addr = %d", c.Codes[offset+1]) + } + + switch op { + case Onerep, Notonerep, Oneloop, Notoneloop, Onelazy, Notonelazy, Setrep, Setloop, Setlazy: + buf.WriteString(", Rep = ") + if c.Codes[offset+2] == math.MaxInt32 { + buf.WriteString("inf") + } else { + fmt.Fprintf(buf, "%d", c.Codes[offset+2]) + } + + case Branchcount, Lazybranchcount: + buf.WriteString(", Limit = ") + if c.Codes[offset+2] == math.MaxInt32 { + buf.WriteString("inf") + } else { + fmt.Fprintf(buf, "%d", c.Codes[offset+2]) + } + + } + + buf.WriteString(")") + + return buf.String() +} + +func (c *Code) Dump() string { + buf := &bytes.Buffer{} + + if c.RightToLeft { + fmt.Fprintln(buf, "Direction: right-to-left") + } else { + fmt.Fprintln(buf, "Direction: left-to-right") + } + if c.FcPrefix == nil { + fmt.Fprintln(buf, "Firstchars: n/a") + } else { + fmt.Fprintf(buf, "Firstchars: %v\n", c.FcPrefix.PrefixSet.String()) + } + + if c.BmPrefix == nil { + fmt.Fprintln(buf, "Prefix: n/a") + } else { + fmt.Fprintf(buf, "Prefix: %v\n", Escape(c.BmPrefix.String())) + } + + fmt.Fprintf(buf, "Anchors: %v\n", c.Anchors) + fmt.Fprintln(buf) + + if c.BmPrefix != nil { + fmt.Fprintln(buf, "BoyerMoore:") + fmt.Fprintln(buf, c.BmPrefix.Dump(" ")) + } + for i := 0; i < len(c.Codes); i += opcodeSize(InstOp(c.Codes[i])) { + fmt.Fprintln(buf, c.OpcodeDescription(i)) + } + + return buf.String() +} diff --git a/vendor/github.com/dlclark/regexp2/syntax/escape.go b/vendor/github.com/dlclark/regexp2/syntax/escape.go new file mode 100644 index 0000000000..609df10731 --- /dev/null +++ b/vendor/github.com/dlclark/regexp2/syntax/escape.go @@ -0,0 +1,94 @@ +package syntax + +import ( + "bytes" + "strconv" + "strings" + "unicode" +) + +func Escape(input string) string { + b := &bytes.Buffer{} + for _, r := range input { + escape(b, r, false) + } + return b.String() +} + +const meta = `\.+*?()|[]{}^$# ` + +func escape(b *bytes.Buffer, r rune, force bool) { + if unicode.IsPrint(r) { + if strings.IndexRune(meta, r) >= 0 || force { + b.WriteRune('\\') + } + b.WriteRune(r) + return + } + + switch r { + case '\a': + b.WriteString(`\a`) + case '\f': + b.WriteString(`\f`) + case '\n': + b.WriteString(`\n`) + case '\r': + b.WriteString(`\r`) + case '\t': + b.WriteString(`\t`) + case '\v': + b.WriteString(`\v`) + default: + if r < 0x100 { + b.WriteString(`\x`) + s := strconv.FormatInt(int64(r), 16) + if len(s) == 1 { + b.WriteRune('0') + } + b.WriteString(s) + break + } + b.WriteString(`\u`) + b.WriteString(strconv.FormatInt(int64(r), 16)) + } +} + +func Unescape(input string) (string, error) { + idx := strings.IndexRune(input, '\\') + // no slashes means no unescape needed + if idx == -1 { + return input, nil + } + + buf := bytes.NewBufferString(input[:idx]) + // get the runes for the rest of the string -- we're going full parser scan on this + + p := parser{} + p.setPattern(input[idx+1:]) + for { + if p.rightMost() { + return "", p.getErr(ErrIllegalEndEscape) + } + r, err := p.scanCharEscape() + if err != nil { + return "", err + } + buf.WriteRune(r) + // are we done? + if p.rightMost() { + return buf.String(), nil + } + + r = p.moveRightGetChar() + for r != '\\' { + buf.WriteRune(r) + if p.rightMost() { + // we're done, no more slashes + return buf.String(), nil + } + // keep scanning until we get another slash + r = p.moveRightGetChar() + } + } +} diff --git a/vendor/github.com/dlclark/regexp2/syntax/fuzz.go b/vendor/github.com/dlclark/regexp2/syntax/fuzz.go new file mode 100644 index 0000000000..ee863866db --- /dev/null +++ b/vendor/github.com/dlclark/regexp2/syntax/fuzz.go @@ -0,0 +1,20 @@ +// +build gofuzz + +package syntax + +// Fuzz is the input point for go-fuzz +func Fuzz(data []byte) int { + sdata := string(data) + tree, err := Parse(sdata, RegexOptions(0)) + if err != nil { + return 0 + } + + // translate it to code + _, err = Write(tree) + if err != nil { + panic(err) + } + + return 1 +} diff --git a/vendor/github.com/dlclark/regexp2/syntax/parser.go b/vendor/github.com/dlclark/regexp2/syntax/parser.go new file mode 100644 index 0000000000..b6c3670c03 --- /dev/null +++ b/vendor/github.com/dlclark/regexp2/syntax/parser.go @@ -0,0 +1,2262 @@ +package syntax + +import ( + "fmt" + "math" + "os" + "sort" + "strconv" + "unicode" +) + +type RegexOptions int32 + +const ( + IgnoreCase RegexOptions = 0x0001 // "i" + Multiline = 0x0002 // "m" + ExplicitCapture = 0x0004 // "n" + Compiled = 0x0008 // "c" + Singleline = 0x0010 // "s" + IgnorePatternWhitespace = 0x0020 // "x" + RightToLeft = 0x0040 // "r" + Debug = 0x0080 // "d" + ECMAScript = 0x0100 // "e" + RE2 = 0x0200 // RE2 compat mode + Unicode = 0x0400 // "u" +) + +func optionFromCode(ch rune) RegexOptions { + // case-insensitive + switch ch { + case 'i', 'I': + return IgnoreCase + case 'r', 'R': + return RightToLeft + case 'm', 'M': + return Multiline + case 'n', 'N': + return ExplicitCapture + case 's', 'S': + return Singleline + case 'x', 'X': + return IgnorePatternWhitespace + case 'd', 'D': + return Debug + case 'e', 'E': + return ECMAScript + case 'u', 'U': + return Unicode + default: + return 0 + } +} + +// An Error describes a failure to parse a regular expression +// and gives the offending expression. +type Error struct { + Code ErrorCode + Expr string + Args []interface{} +} + +func (e *Error) Error() string { + if len(e.Args) == 0 { + return "error parsing regexp: " + e.Code.String() + " in `" + e.Expr + "`" + } + return "error parsing regexp: " + fmt.Sprintf(e.Code.String(), e.Args...) + " in `" + e.Expr + "`" +} + +// An ErrorCode describes a failure to parse a regular expression. +type ErrorCode string + +const ( + // internal issue + ErrInternalError ErrorCode = "regexp/syntax: internal error" + // Parser errors + ErrUnterminatedComment = "unterminated comment" + ErrInvalidCharRange = "invalid character class range" + ErrInvalidRepeatSize = "invalid repeat count" + ErrInvalidUTF8 = "invalid UTF-8" + ErrCaptureGroupOutOfRange = "capture group number out of range" + ErrUnexpectedParen = "unexpected )" + ErrMissingParen = "missing closing )" + ErrMissingBrace = "missing closing }" + ErrInvalidRepeatOp = "invalid nested repetition operator" + ErrMissingRepeatArgument = "missing argument to repetition operator" + ErrConditionalExpression = "illegal conditional (?(...)) expression" + ErrTooManyAlternates = "too many | in (?()|)" + ErrUnrecognizedGrouping = "unrecognized grouping construct: (%v" + ErrInvalidGroupName = "invalid group name: group names must begin with a word character and have a matching terminator" + ErrCapNumNotZero = "capture number cannot be zero" + ErrUndefinedBackRef = "reference to undefined group number %v" + ErrUndefinedNameRef = "reference to undefined group name %v" + ErrAlternationCantCapture = "alternation conditions do not capture and cannot be named" + ErrAlternationCantHaveComment = "alternation conditions cannot be comments" + ErrMalformedReference = "(?(%v) ) malformed" + ErrUndefinedReference = "(?(%v) ) reference to undefined group" + ErrIllegalEndEscape = "illegal \\ at end of pattern" + ErrMalformedSlashP = "malformed \\p{X} character escape" + ErrIncompleteSlashP = "incomplete \\p{X} character escape" + ErrUnknownSlashP = "unknown unicode category, script, or property '%v'" + ErrUnrecognizedEscape = "unrecognized escape sequence \\%v" + ErrMissingControl = "missing control character" + ErrUnrecognizedControl = "unrecognized control character" + ErrTooFewHex = "insufficient hexadecimal digits" + ErrInvalidHex = "hex values may not be larger than 0x10FFFF" + ErrMalformedNameRef = "malformed \\k<...> named back reference" + ErrBadClassInCharRange = "cannot include class \\%v in character range" + ErrUnterminatedBracket = "unterminated [] set" + ErrSubtractionMustBeLast = "a subtraction must be the last element in a character class" + ErrReversedCharRange = "[%c-%c] range in reverse order" +) + +func (e ErrorCode) String() string { + return string(e) +} + +type parser struct { + stack *regexNode + group *regexNode + alternation *regexNode + concatenation *regexNode + unit *regexNode + + patternRaw string + pattern []rune + + currentPos int + specialCase *unicode.SpecialCase + + autocap int + capcount int + captop int + capsize int + + caps map[int]int + capnames map[string]int + + capnumlist []int + capnamelist []string + + options RegexOptions + optionsStack []RegexOptions + ignoreNextParen bool +} + +const ( + maxValueDiv10 int = math.MaxInt32 / 10 + maxValueMod10 = math.MaxInt32 % 10 +) + +// Parse converts a regex string into a parse tree +func Parse(re string, op RegexOptions) (*RegexTree, error) { + p := parser{ + options: op, + caps: make(map[int]int), + } + p.setPattern(re) + + if err := p.countCaptures(); err != nil { + return nil, err + } + + p.reset(op) + root, err := p.scanRegex() + + if err != nil { + return nil, err + } + tree := &RegexTree{ + root: root, + caps: p.caps, + capnumlist: p.capnumlist, + captop: p.captop, + Capnames: p.capnames, + Caplist: p.capnamelist, + options: op, + } + + if tree.options&Debug > 0 { + os.Stdout.WriteString(tree.Dump()) + } + + return tree, nil +} + +func (p *parser) setPattern(pattern string) { + p.patternRaw = pattern + p.pattern = make([]rune, 0, len(pattern)) + + //populate our rune array to handle utf8 encoding + for _, r := range pattern { + p.pattern = append(p.pattern, r) + } +} +func (p *parser) getErr(code ErrorCode, args ...interface{}) error { + return &Error{Code: code, Expr: p.patternRaw, Args: args} +} + +func (p *parser) noteCaptureSlot(i, pos int) { + if _, ok := p.caps[i]; !ok { + // the rhs of the hashtable isn't used in the parser + p.caps[i] = pos + p.capcount++ + + if p.captop <= i { + if i == math.MaxInt32 { + p.captop = i + } else { + p.captop = i + 1 + } + } + } +} + +func (p *parser) noteCaptureName(name string, pos int) { + if p.capnames == nil { + p.capnames = make(map[string]int) + } + + if _, ok := p.capnames[name]; !ok { + p.capnames[name] = pos + p.capnamelist = append(p.capnamelist, name) + } +} + +func (p *parser) assignNameSlots() { + if p.capnames != nil { + for _, name := range p.capnamelist { + for p.isCaptureSlot(p.autocap) { + p.autocap++ + } + pos := p.capnames[name] + p.capnames[name] = p.autocap + p.noteCaptureSlot(p.autocap, pos) + + p.autocap++ + } + } + + // if the caps array has at least one gap, construct the list of used slots + if p.capcount < p.captop { + p.capnumlist = make([]int, p.capcount) + i := 0 + + for k := range p.caps { + p.capnumlist[i] = k + i++ + } + + sort.Ints(p.capnumlist) + } + + // merge capsnumlist into capnamelist + if p.capnames != nil || p.capnumlist != nil { + var oldcapnamelist []string + var next int + var k int + + if p.capnames == nil { + oldcapnamelist = nil + p.capnames = make(map[string]int) + p.capnamelist = []string{} + next = -1 + } else { + oldcapnamelist = p.capnamelist + p.capnamelist = []string{} + next = p.capnames[oldcapnamelist[0]] + } + + for i := 0; i < p.capcount; i++ { + j := i + if p.capnumlist != nil { + j = p.capnumlist[i] + } + + if next == j { + p.capnamelist = append(p.capnamelist, oldcapnamelist[k]) + k++ + + if k == len(oldcapnamelist) { + next = -1 + } else { + next = p.capnames[oldcapnamelist[k]] + } + + } else { + //feature: culture? + str := strconv.Itoa(j) + p.capnamelist = append(p.capnamelist, str) + p.capnames[str] = j + } + } + } +} + +func (p *parser) consumeAutocap() int { + r := p.autocap + p.autocap++ + return r +} + +// CountCaptures is a prescanner for deducing the slots used for +// captures by doing a partial tokenization of the pattern. +func (p *parser) countCaptures() error { + var ch rune + + p.noteCaptureSlot(0, 0) + + p.autocap = 1 + + for p.charsRight() > 0 { + pos := p.textpos() + ch = p.moveRightGetChar() + switch ch { + case '\\': + if p.charsRight() > 0 { + p.scanBackslash(true) + } + + case '#': + if p.useOptionX() { + p.moveLeft() + p.scanBlank() + } + + case '[': + p.scanCharSet(false, true) + + case ')': + if !p.emptyOptionsStack() { + p.popOptions() + } + + case '(': + if p.charsRight() >= 2 && p.rightChar(1) == '#' && p.rightChar(0) == '?' { + p.moveLeft() + p.scanBlank() + } else { + p.pushOptions() + if p.charsRight() > 0 && p.rightChar(0) == '?' { + // we have (?... + p.moveRight(1) + + if p.charsRight() > 1 && (p.rightChar(0) == '<' || p.rightChar(0) == '\'') { + // named group: (?<... or (?'... + + p.moveRight(1) + ch = p.rightChar(0) + + if ch != '0' && IsWordChar(ch) { + if ch >= '1' && ch <= '9' { + dec, err := p.scanDecimal() + if err != nil { + return err + } + p.noteCaptureSlot(dec, pos) + } else { + p.noteCaptureName(p.scanCapname(), pos) + } + } + } else if p.useRE2() && p.charsRight() > 2 && (p.rightChar(0) == 'P' && p.rightChar(1) == '<') { + // RE2-compat (?P<) + p.moveRight(2) + ch = p.rightChar(0) + if IsWordChar(ch) { + p.noteCaptureName(p.scanCapname(), pos) + } + + } else { + // (?... + + // get the options if it's an option construct (?cimsx-cimsx...) + p.scanOptions() + + if p.charsRight() > 0 { + if p.rightChar(0) == ')' { + // (?cimsx-cimsx) + p.moveRight(1) + p.popKeepOptions() + } else if p.rightChar(0) == '(' { + // alternation construct: (?(foo)yes|no) + // ignore the next paren so we don't capture the condition + p.ignoreNextParen = true + + // break from here so we don't reset ignoreNextParen + continue + } + } + } + } else { + if !p.useOptionN() && !p.ignoreNextParen { + p.noteCaptureSlot(p.consumeAutocap(), pos) + } + } + } + + p.ignoreNextParen = false + + } + } + + p.assignNameSlots() + return nil +} + +func (p *parser) reset(topopts RegexOptions) { + p.currentPos = 0 + p.autocap = 1 + p.ignoreNextParen = false + + if len(p.optionsStack) > 0 { + p.optionsStack = p.optionsStack[:0] + } + + p.options = topopts + p.stack = nil +} + +func (p *parser) scanRegex() (*regexNode, error) { + ch := '@' // nonspecial ch, means at beginning + isQuant := false + + p.startGroup(newRegexNodeMN(ntCapture, p.options, 0, -1)) + + for p.charsRight() > 0 { + wasPrevQuantifier := isQuant + isQuant = false + + if err := p.scanBlank(); err != nil { + return nil, err + } + + startpos := p.textpos() + + // move past all of the normal characters. We'll stop when we hit some kind of control character, + // or if IgnorePatternWhiteSpace is on, we'll stop when we see some whitespace. + if p.useOptionX() { + for p.charsRight() > 0 { + ch = p.rightChar(0) + //UGLY: clean up, this is ugly + if !(!isStopperX(ch) || (ch == '{' && !p.isTrueQuantifier())) { + break + } + p.moveRight(1) + } + } else { + for p.charsRight() > 0 { + ch = p.rightChar(0) + if !(!isSpecial(ch) || ch == '{' && !p.isTrueQuantifier()) { + break + } + p.moveRight(1) + } + } + + endpos := p.textpos() + + p.scanBlank() + + if p.charsRight() == 0 { + ch = '!' // nonspecial, means at end + } else if ch = p.rightChar(0); isSpecial(ch) { + isQuant = isQuantifier(ch) + p.moveRight(1) + } else { + ch = ' ' // nonspecial, means at ordinary char + } + + if startpos < endpos { + cchUnquantified := endpos - startpos + if isQuant { + cchUnquantified-- + } + wasPrevQuantifier = false + + if cchUnquantified > 0 { + p.addToConcatenate(startpos, cchUnquantified, false) + } + + if isQuant { + p.addUnitOne(p.charAt(endpos - 1)) + } + } + + switch ch { + case '!': + goto BreakOuterScan + + case ' ': + goto ContinueOuterScan + + case '[': + cc, err := p.scanCharSet(p.useOptionI(), false) + if err != nil { + return nil, err + } + p.addUnitSet(cc) + + case '(': + p.pushOptions() + + if grouper, err := p.scanGroupOpen(); err != nil { + return nil, err + } else if grouper == nil { + p.popKeepOptions() + } else { + p.pushGroup() + p.startGroup(grouper) + } + + continue + + case '|': + p.addAlternate() + goto ContinueOuterScan + + case ')': + if p.emptyStack() { + return nil, p.getErr(ErrUnexpectedParen) + } + + if err := p.addGroup(); err != nil { + return nil, err + } + if err := p.popGroup(); err != nil { + return nil, err + } + p.popOptions() + + if p.unit == nil { + goto ContinueOuterScan + } + + case '\\': + n, err := p.scanBackslash(false) + if err != nil { + return nil, err + } + p.addUnitNode(n) + + case '^': + if p.useOptionM() { + p.addUnitType(ntBol) + } else { + p.addUnitType(ntBeginning) + } + + case '$': + if p.useOptionM() { + p.addUnitType(ntEol) + } else { + p.addUnitType(ntEndZ) + } + + case '.': + if p.useOptionE() { + p.addUnitSet(ECMAAnyClass()) + } else if p.useOptionS() { + p.addUnitSet(AnyClass()) + } else { + p.addUnitNotone('\n') + } + + case '{', '*', '+', '?': + if p.unit == nil { + if wasPrevQuantifier { + return nil, p.getErr(ErrInvalidRepeatOp) + } else { + return nil, p.getErr(ErrMissingRepeatArgument) + } + } + p.moveLeft() + + default: + return nil, p.getErr(ErrInternalError) + } + + if err := p.scanBlank(); err != nil { + return nil, err + } + + if p.charsRight() > 0 { + isQuant = p.isTrueQuantifier() + } + if p.charsRight() == 0 || !isQuant { + //maintain odd C# assignment order -- not sure if required, could clean up? + p.addConcatenate() + goto ContinueOuterScan + } + + ch = p.moveRightGetChar() + + // Handle quantifiers + for p.unit != nil { + var min, max int + var lazy bool + + switch ch { + case '*': + min = 0 + max = math.MaxInt32 + + case '?': + min = 0 + max = 1 + + case '+': + min = 1 + max = math.MaxInt32 + + case '{': + { + var err error + startpos = p.textpos() + if min, err = p.scanDecimal(); err != nil { + return nil, err + } + max = min + if startpos < p.textpos() { + if p.charsRight() > 0 && p.rightChar(0) == ',' { + p.moveRight(1) + if p.charsRight() == 0 || p.rightChar(0) == '}' { + max = math.MaxInt32 + } else { + if max, err = p.scanDecimal(); err != nil { + return nil, err + } + } + } + } + + if startpos == p.textpos() || p.charsRight() == 0 || p.moveRightGetChar() != '}' { + p.addConcatenate() + p.textto(startpos - 1) + goto ContinueOuterScan + } + } + + default: + return nil, p.getErr(ErrInternalError) + } + + if err := p.scanBlank(); err != nil { + return nil, err + } + + if p.charsRight() == 0 || p.rightChar(0) != '?' { + lazy = false + } else { + p.moveRight(1) + lazy = true + } + + if min > max { + return nil, p.getErr(ErrInvalidRepeatSize) + } + + p.addConcatenate3(lazy, min, max) + } + + ContinueOuterScan: + } + +BreakOuterScan: + ; + + if !p.emptyStack() { + return nil, p.getErr(ErrMissingParen) + } + + if err := p.addGroup(); err != nil { + return nil, err + } + + return p.unit, nil + +} + +/* + * Simple parsing for replacement patterns + */ +func (p *parser) scanReplacement() (*regexNode, error) { + var c, startpos int + + p.concatenation = newRegexNode(ntConcatenate, p.options) + + for { + c = p.charsRight() + if c == 0 { + break + } + + startpos = p.textpos() + + for c > 0 && p.rightChar(0) != '$' { + p.moveRight(1) + c-- + } + + p.addToConcatenate(startpos, p.textpos()-startpos, true) + + if c > 0 { + if p.moveRightGetChar() == '$' { + n, err := p.scanDollar() + if err != nil { + return nil, err + } + p.addUnitNode(n) + } + p.addConcatenate() + } + } + + return p.concatenation, nil +} + +/* + * Scans $ patterns recognized within replacement patterns + */ +func (p *parser) scanDollar() (*regexNode, error) { + if p.charsRight() == 0 { + return newRegexNodeCh(ntOne, p.options, '$'), nil + } + + ch := p.rightChar(0) + angled := false + backpos := p.textpos() + lastEndPos := backpos + + // Note angle + + if ch == '{' && p.charsRight() > 1 { + angled = true + p.moveRight(1) + ch = p.rightChar(0) + } + + // Try to parse backreference: \1 or \{1} or \{cap} + + if ch >= '0' && ch <= '9' { + if !angled && p.useOptionE() { + capnum := -1 + newcapnum := int(ch - '0') + p.moveRight(1) + if p.isCaptureSlot(newcapnum) { + capnum = newcapnum + lastEndPos = p.textpos() + } + + for p.charsRight() > 0 { + ch = p.rightChar(0) + if ch < '0' || ch > '9' { + break + } + digit := int(ch - '0') + if newcapnum > maxValueDiv10 || (newcapnum == maxValueDiv10 && digit > maxValueMod10) { + return nil, p.getErr(ErrCaptureGroupOutOfRange) + } + + newcapnum = newcapnum*10 + digit + + p.moveRight(1) + if p.isCaptureSlot(newcapnum) { + capnum = newcapnum + lastEndPos = p.textpos() + } + } + p.textto(lastEndPos) + if capnum >= 0 { + return newRegexNodeM(ntRef, p.options, capnum), nil + } + } else { + capnum, err := p.scanDecimal() + if err != nil { + return nil, err + } + if !angled || p.charsRight() > 0 && p.moveRightGetChar() == '}' { + if p.isCaptureSlot(capnum) { + return newRegexNodeM(ntRef, p.options, capnum), nil + } + } + } + } else if angled && IsWordChar(ch) { + capname := p.scanCapname() + + if p.charsRight() > 0 && p.moveRightGetChar() == '}' { + if p.isCaptureName(capname) { + return newRegexNodeM(ntRef, p.options, p.captureSlotFromName(capname)), nil + } + } + } else if !angled { + capnum := 1 + + switch ch { + case '$': + p.moveRight(1) + return newRegexNodeCh(ntOne, p.options, '$'), nil + case '&': + capnum = 0 + case '`': + capnum = replaceLeftPortion + case '\'': + capnum = replaceRightPortion + case '+': + capnum = replaceLastGroup + case '_': + capnum = replaceWholeString + } + + if capnum != 1 { + p.moveRight(1) + return newRegexNodeM(ntRef, p.options, capnum), nil + } + } + + // unrecognized $: literalize + + p.textto(backpos) + return newRegexNodeCh(ntOne, p.options, '$'), nil +} + +// scanGroupOpen scans chars following a '(' (not counting the '('), and returns +// a RegexNode for the type of group scanned, or nil if the group +// simply changed options (?cimsx-cimsx) or was a comment (#...). +func (p *parser) scanGroupOpen() (*regexNode, error) { + var ch rune + var nt nodeType + var err error + close := '>' + start := p.textpos() + + // just return a RegexNode if we have: + // 1. "(" followed by nothing + // 2. "(x" where x != ? + // 3. "(?)" + if p.charsRight() == 0 || p.rightChar(0) != '?' || (p.rightChar(0) == '?' && (p.charsRight() > 1 && p.rightChar(1) == ')')) { + if p.useOptionN() || p.ignoreNextParen { + p.ignoreNextParen = false + return newRegexNode(ntGroup, p.options), nil + } + return newRegexNodeMN(ntCapture, p.options, p.consumeAutocap(), -1), nil + } + + p.moveRight(1) + + for { + if p.charsRight() == 0 { + break + } + + switch ch = p.moveRightGetChar(); ch { + case ':': + nt = ntGroup + + case '=': + p.options &= ^RightToLeft + nt = ntRequire + + case '!': + p.options &= ^RightToLeft + nt = ntPrevent + + case '>': + nt = ntGreedy + + case '\'': + close = '\'' + fallthrough + + case '<': + if p.charsRight() == 0 { + goto BreakRecognize + } + + switch ch = p.moveRightGetChar(); ch { + case '=': + if close == '\'' { + goto BreakRecognize + } + + p.options |= RightToLeft + nt = ntRequire + + case '!': + if close == '\'' { + goto BreakRecognize + } + + p.options |= RightToLeft + nt = ntPrevent + + default: + p.moveLeft() + capnum := -1 + uncapnum := -1 + proceed := false + + // grab part before - + + if ch >= '0' && ch <= '9' { + if capnum, err = p.scanDecimal(); err != nil { + return nil, err + } + + if !p.isCaptureSlot(capnum) { + capnum = -1 + } + + // check if we have bogus characters after the number + if p.charsRight() > 0 && !(p.rightChar(0) == close || p.rightChar(0) == '-') { + return nil, p.getErr(ErrInvalidGroupName) + } + if capnum == 0 { + return nil, p.getErr(ErrCapNumNotZero) + } + } else if IsWordChar(ch) { + capname := p.scanCapname() + + if p.isCaptureName(capname) { + capnum = p.captureSlotFromName(capname) + } + + // check if we have bogus character after the name + if p.charsRight() > 0 && !(p.rightChar(0) == close || p.rightChar(0) == '-') { + return nil, p.getErr(ErrInvalidGroupName) + } + } else if ch == '-' { + proceed = true + } else { + // bad group name - starts with something other than a word character and isn't a number + return nil, p.getErr(ErrInvalidGroupName) + } + + // grab part after - if any + + if (capnum != -1 || proceed == true) && p.charsRight() > 0 && p.rightChar(0) == '-' { + p.moveRight(1) + + //no more chars left, no closing char, etc + if p.charsRight() == 0 { + return nil, p.getErr(ErrInvalidGroupName) + } + + ch = p.rightChar(0) + if ch >= '0' && ch <= '9' { + if uncapnum, err = p.scanDecimal(); err != nil { + return nil, err + } + + if !p.isCaptureSlot(uncapnum) { + return nil, p.getErr(ErrUndefinedBackRef, uncapnum) + } + + // check if we have bogus characters after the number + if p.charsRight() > 0 && p.rightChar(0) != close { + return nil, p.getErr(ErrInvalidGroupName) + } + } else if IsWordChar(ch) { + uncapname := p.scanCapname() + + if !p.isCaptureName(uncapname) { + return nil, p.getErr(ErrUndefinedNameRef, uncapname) + } + uncapnum = p.captureSlotFromName(uncapname) + + // check if we have bogus character after the name + if p.charsRight() > 0 && p.rightChar(0) != close { + return nil, p.getErr(ErrInvalidGroupName) + } + } else { + // bad group name - starts with something other than a word character and isn't a number + return nil, p.getErr(ErrInvalidGroupName) + } + } + + // actually make the node + + if (capnum != -1 || uncapnum != -1) && p.charsRight() > 0 && p.moveRightGetChar() == close { + return newRegexNodeMN(ntCapture, p.options, capnum, uncapnum), nil + } + goto BreakRecognize + } + + case '(': + // alternation construct (?(...) | ) + + parenPos := p.textpos() + if p.charsRight() > 0 { + ch = p.rightChar(0) + + // check if the alternation condition is a backref + if ch >= '0' && ch <= '9' { + var capnum int + if capnum, err = p.scanDecimal(); err != nil { + return nil, err + } + if p.charsRight() > 0 && p.moveRightGetChar() == ')' { + if p.isCaptureSlot(capnum) { + return newRegexNodeM(ntTestref, p.options, capnum), nil + } + return nil, p.getErr(ErrUndefinedReference, capnum) + } + + return nil, p.getErr(ErrMalformedReference, capnum) + + } else if IsWordChar(ch) { + capname := p.scanCapname() + + if p.isCaptureName(capname) && p.charsRight() > 0 && p.moveRightGetChar() == ')' { + return newRegexNodeM(ntTestref, p.options, p.captureSlotFromName(capname)), nil + } + } + } + // not a backref + nt = ntTestgroup + p.textto(parenPos - 1) // jump to the start of the parentheses + p.ignoreNextParen = true // but make sure we don't try to capture the insides + + charsRight := p.charsRight() + if charsRight >= 3 && p.rightChar(1) == '?' { + rightchar2 := p.rightChar(2) + // disallow comments in the condition + if rightchar2 == '#' { + return nil, p.getErr(ErrAlternationCantHaveComment) + } + + // disallow named capture group (?<..>..) in the condition + if rightchar2 == '\'' { + return nil, p.getErr(ErrAlternationCantCapture) + } + + if charsRight >= 4 && (rightchar2 == '<' && p.rightChar(3) != '!' && p.rightChar(3) != '=') { + return nil, p.getErr(ErrAlternationCantCapture) + } + } + + case 'P': + if p.useRE2() { + // support for P syntax + if p.charsRight() < 3 { + goto BreakRecognize + } + + ch = p.moveRightGetChar() + if ch != '<' { + goto BreakRecognize + } + + ch = p.moveRightGetChar() + p.moveLeft() + + if IsWordChar(ch) { + capnum := -1 + capname := p.scanCapname() + + if p.isCaptureName(capname) { + capnum = p.captureSlotFromName(capname) + } + + // check if we have bogus character after the name + if p.charsRight() > 0 && p.rightChar(0) != '>' { + return nil, p.getErr(ErrInvalidGroupName) + } + + // actually make the node + + if capnum != -1 && p.charsRight() > 0 && p.moveRightGetChar() == '>' { + return newRegexNodeMN(ntCapture, p.options, capnum, -1), nil + } + goto BreakRecognize + + } else { + // bad group name - starts with something other than a word character and isn't a number + return nil, p.getErr(ErrInvalidGroupName) + } + } + // if we're not using RE2 compat mode then + // we just behave like normal + fallthrough + + default: + p.moveLeft() + + nt = ntGroup + // disallow options in the children of a testgroup node + if p.group.t != ntTestgroup { + p.scanOptions() + } + if p.charsRight() == 0 { + goto BreakRecognize + } + + if ch = p.moveRightGetChar(); ch == ')' { + return nil, nil + } + + if ch != ':' { + goto BreakRecognize + } + + } + + return newRegexNode(nt, p.options), nil + } + +BreakRecognize: + + // break Recognize comes here + + return nil, p.getErr(ErrUnrecognizedGrouping, string(p.pattern[start:p.textpos()])) +} + +// scans backslash specials and basics +func (p *parser) scanBackslash(scanOnly bool) (*regexNode, error) { + + if p.charsRight() == 0 { + return nil, p.getErr(ErrIllegalEndEscape) + } + + switch ch := p.rightChar(0); ch { + case 'b', 'B', 'A', 'G', 'Z', 'z': + p.moveRight(1) + return newRegexNode(p.typeFromCode(ch), p.options), nil + + case 'w': + p.moveRight(1) + if p.useOptionE() || p.useRE2() { + return newRegexNodeSet(ntSet, p.options, ECMAWordClass()), nil + } + return newRegexNodeSet(ntSet, p.options, WordClass()), nil + + case 'W': + p.moveRight(1) + if p.useOptionE() || p.useRE2() { + return newRegexNodeSet(ntSet, p.options, NotECMAWordClass()), nil + } + return newRegexNodeSet(ntSet, p.options, NotWordClass()), nil + + case 's': + p.moveRight(1) + if p.useOptionE() { + return newRegexNodeSet(ntSet, p.options, ECMASpaceClass()), nil + } else if p.useRE2() { + return newRegexNodeSet(ntSet, p.options, RE2SpaceClass()), nil + } + return newRegexNodeSet(ntSet, p.options, SpaceClass()), nil + + case 'S': + p.moveRight(1) + if p.useOptionE() { + return newRegexNodeSet(ntSet, p.options, NotECMASpaceClass()), nil + } else if p.useRE2() { + return newRegexNodeSet(ntSet, p.options, NotRE2SpaceClass()), nil + } + return newRegexNodeSet(ntSet, p.options, NotSpaceClass()), nil + + case 'd': + p.moveRight(1) + if p.useOptionE() || p.useRE2() { + return newRegexNodeSet(ntSet, p.options, ECMADigitClass()), nil + } + return newRegexNodeSet(ntSet, p.options, DigitClass()), nil + + case 'D': + p.moveRight(1) + if p.useOptionE() || p.useRE2() { + return newRegexNodeSet(ntSet, p.options, NotECMADigitClass()), nil + } + return newRegexNodeSet(ntSet, p.options, NotDigitClass()), nil + + case 'p', 'P': + p.moveRight(1) + prop, err := p.parseProperty() + if err != nil { + return nil, err + } + cc := &CharSet{} + cc.addCategory(prop, (ch != 'p'), p.useOptionI(), p.patternRaw) + if p.useOptionI() { + cc.addLowercase() + } + + return newRegexNodeSet(ntSet, p.options, cc), nil + + default: + return p.scanBasicBackslash(scanOnly) + } +} + +// Scans \-style backreferences and character escapes +func (p *parser) scanBasicBackslash(scanOnly bool) (*regexNode, error) { + if p.charsRight() == 0 { + return nil, p.getErr(ErrIllegalEndEscape) + } + angled := false + k := false + close := '\x00' + + backpos := p.textpos() + ch := p.rightChar(0) + + // Allow \k instead of \, which is now deprecated. + + // According to ECMAScript specification, \k is only parsed as a named group reference if + // there is at least one group name in the regexp. + // See https://www.ecma-international.org/ecma-262/#sec-isvalidregularexpressionliteral, step 7. + // Note, during the first (scanOnly) run we may not have all group names scanned, but that's ok. + if ch == 'k' && (!p.useOptionE() || len(p.capnames) > 0) { + if p.charsRight() >= 2 { + p.moveRight(1) + ch = p.moveRightGetChar() + + if ch == '<' || (!p.useOptionE() && ch == '\'') { // No support for \k'name' in ECMAScript + angled = true + if ch == '\'' { + close = '\'' + } else { + close = '>' + } + } + } + + if !angled || p.charsRight() <= 0 { + return nil, p.getErr(ErrMalformedNameRef) + } + + ch = p.rightChar(0) + k = true + + } else if !p.useOptionE() && (ch == '<' || ch == '\'') && p.charsRight() > 1 { // Note angle without \g + angled = true + if ch == '\'' { + close = '\'' + } else { + close = '>' + } + + p.moveRight(1) + ch = p.rightChar(0) + } + + // Try to parse backreference: \<1> or \ + + if angled && ch >= '0' && ch <= '9' { + capnum, err := p.scanDecimal() + if err != nil { + return nil, err + } + + if p.charsRight() > 0 && p.moveRightGetChar() == close { + if p.isCaptureSlot(capnum) { + return newRegexNodeM(ntRef, p.options, capnum), nil + } + return nil, p.getErr(ErrUndefinedBackRef, capnum) + } + } else if !angled && ch >= '1' && ch <= '9' { // Try to parse backreference or octal: \1 + capnum, err := p.scanDecimal() + if err != nil { + return nil, err + } + + if scanOnly { + return nil, nil + } + + if p.isCaptureSlot(capnum) { + return newRegexNodeM(ntRef, p.options, capnum), nil + } + if capnum <= 9 && !p.useOptionE() { + return nil, p.getErr(ErrUndefinedBackRef, capnum) + } + + } else if angled { + capname := p.scanCapname() + + if capname != "" && p.charsRight() > 0 && p.moveRightGetChar() == close { + + if scanOnly { + return nil, nil + } + + if p.isCaptureName(capname) { + return newRegexNodeM(ntRef, p.options, p.captureSlotFromName(capname)), nil + } + return nil, p.getErr(ErrUndefinedNameRef, capname) + } else { + if k { + return nil, p.getErr(ErrMalformedNameRef) + } + } + } + + // Not backreference: must be char code + + p.textto(backpos) + ch, err := p.scanCharEscape() + if err != nil { + return nil, err + } + + if scanOnly { + return nil, nil + } + + if p.useOptionI() { + ch = unicode.ToLower(ch) + } + + return newRegexNodeCh(ntOne, p.options, ch), nil +} + +// Scans X for \p{X} or \P{X} +func (p *parser) parseProperty() (string, error) { + // RE2 and PCRE supports \pX syntax (no {} and only 1 letter unicode cats supported) + // since this is purely additive syntax it's not behind a flag + if p.charsRight() >= 1 && p.rightChar(0) != '{' { + ch := string(p.moveRightGetChar()) + // check if it's a valid cat + if !isValidUnicodeCat(ch) { + return "", p.getErr(ErrUnknownSlashP, ch) + } + return ch, nil + } + + if p.charsRight() < 3 { + return "", p.getErr(ErrIncompleteSlashP) + } + ch := p.moveRightGetChar() + if ch != '{' { + return "", p.getErr(ErrMalformedSlashP) + } + + startpos := p.textpos() + for p.charsRight() > 0 { + ch = p.moveRightGetChar() + if !(IsWordChar(ch) || ch == '-') { + p.moveLeft() + break + } + } + capname := string(p.pattern[startpos:p.textpos()]) + + if p.charsRight() == 0 || p.moveRightGetChar() != '}' { + return "", p.getErr(ErrIncompleteSlashP) + } + + if !isValidUnicodeCat(capname) { + return "", p.getErr(ErrUnknownSlashP, capname) + } + + return capname, nil +} + +// Returns ReNode type for zero-length assertions with a \ code. +func (p *parser) typeFromCode(ch rune) nodeType { + switch ch { + case 'b': + if p.useOptionE() { + return ntECMABoundary + } + return ntBoundary + case 'B': + if p.useOptionE() { + return ntNonECMABoundary + } + return ntNonboundary + case 'A': + return ntBeginning + case 'G': + return ntStart + case 'Z': + return ntEndZ + case 'z': + return ntEnd + default: + return ntNothing + } +} + +// Scans whitespace or x-mode comments. +func (p *parser) scanBlank() error { + if p.useOptionX() { + for { + for p.charsRight() > 0 && isSpace(p.rightChar(0)) { + p.moveRight(1) + } + + if p.charsRight() == 0 { + break + } + + if p.rightChar(0) == '#' { + for p.charsRight() > 0 && p.rightChar(0) != '\n' { + p.moveRight(1) + } + } else if p.charsRight() >= 3 && p.rightChar(2) == '#' && + p.rightChar(1) == '?' && p.rightChar(0) == '(' { + for p.charsRight() > 0 && p.rightChar(0) != ')' { + p.moveRight(1) + } + if p.charsRight() == 0 { + return p.getErr(ErrUnterminatedComment) + } + p.moveRight(1) + } else { + break + } + } + } else { + for { + if p.charsRight() < 3 || p.rightChar(2) != '#' || + p.rightChar(1) != '?' || p.rightChar(0) != '(' { + return nil + } + + for p.charsRight() > 0 && p.rightChar(0) != ')' { + p.moveRight(1) + } + if p.charsRight() == 0 { + return p.getErr(ErrUnterminatedComment) + } + p.moveRight(1) + } + } + return nil +} + +func (p *parser) scanCapname() string { + startpos := p.textpos() + + for p.charsRight() > 0 { + if !IsWordChar(p.moveRightGetChar()) { + p.moveLeft() + break + } + } + + return string(p.pattern[startpos:p.textpos()]) +} + +// Scans contents of [] (not including []'s), and converts to a set. +func (p *parser) scanCharSet(caseInsensitive, scanOnly bool) (*CharSet, error) { + ch := '\x00' + chPrev := '\x00' + inRange := false + firstChar := true + closed := false + + var cc *CharSet + if !scanOnly { + cc = &CharSet{} + } + + if p.charsRight() > 0 && p.rightChar(0) == '^' { + p.moveRight(1) + if !scanOnly { + cc.negate = true + } + } + + for ; p.charsRight() > 0; firstChar = false { + fTranslatedChar := false + ch = p.moveRightGetChar() + if ch == ']' { + if !firstChar { + closed = true + break + } else if p.useOptionE() { + if !scanOnly { + cc.addRanges(NoneClass().ranges) + } + closed = true + break + } + + } else if ch == '\\' && p.charsRight() > 0 { + switch ch = p.moveRightGetChar(); ch { + case 'D', 'd': + if !scanOnly { + if inRange { + return nil, p.getErr(ErrBadClassInCharRange, ch) + } + cc.addDigit(p.useOptionE() || p.useRE2(), ch == 'D', p.patternRaw) + } + continue + + case 'S', 's': + if !scanOnly { + if inRange { + return nil, p.getErr(ErrBadClassInCharRange, ch) + } + cc.addSpace(p.useOptionE(), p.useRE2(), ch == 'S') + } + continue + + case 'W', 'w': + if !scanOnly { + if inRange { + return nil, p.getErr(ErrBadClassInCharRange, ch) + } + + cc.addWord(p.useOptionE() || p.useRE2(), ch == 'W') + } + continue + + case 'p', 'P': + if !scanOnly { + if inRange { + return nil, p.getErr(ErrBadClassInCharRange, ch) + } + prop, err := p.parseProperty() + if err != nil { + return nil, err + } + cc.addCategory(prop, (ch != 'p'), caseInsensitive, p.patternRaw) + } else { + p.parseProperty() + } + + continue + + case '-': + if !scanOnly { + cc.addRange(ch, ch) + } + continue + + default: + p.moveLeft() + var err error + ch, err = p.scanCharEscape() // non-literal character + if err != nil { + return nil, err + } + fTranslatedChar = true + break // this break will only break out of the switch + } + } else if ch == '[' { + // This is code for Posix style properties - [:Ll:] or [:IsTibetan:]. + // It currently doesn't do anything other than skip the whole thing! + if p.charsRight() > 0 && p.rightChar(0) == ':' && !inRange { + savePos := p.textpos() + + p.moveRight(1) + negate := false + if p.charsRight() > 1 && p.rightChar(0) == '^' { + negate = true + p.moveRight(1) + } + + nm := p.scanCapname() // snag the name + if !scanOnly && p.useRE2() { + // look up the name since these are valid for RE2 + // add the group based on the name + if ok := cc.addNamedASCII(nm, negate); !ok { + return nil, p.getErr(ErrInvalidCharRange) + } + } + if p.charsRight() < 2 || p.moveRightGetChar() != ':' || p.moveRightGetChar() != ']' { + p.textto(savePos) + } else if p.useRE2() { + // move on + continue + } + } + } + + if inRange { + inRange = false + if !scanOnly { + if ch == '[' && !fTranslatedChar && !firstChar { + // We thought we were in a range, but we're actually starting a subtraction. + // In that case, we'll add chPrev to our char class, skip the opening [, and + // scan the new character class recursively. + cc.addChar(chPrev) + sub, err := p.scanCharSet(caseInsensitive, false) + if err != nil { + return nil, err + } + cc.addSubtraction(sub) + + if p.charsRight() > 0 && p.rightChar(0) != ']' { + return nil, p.getErr(ErrSubtractionMustBeLast) + } + } else { + // a regular range, like a-z + if chPrev > ch { + return nil, p.getErr(ErrReversedCharRange, chPrev, ch) + } + cc.addRange(chPrev, ch) + } + } + } else if p.charsRight() >= 2 && p.rightChar(0) == '-' && p.rightChar(1) != ']' { + // this could be the start of a range + chPrev = ch + inRange = true + p.moveRight(1) + } else if p.charsRight() >= 1 && ch == '-' && !fTranslatedChar && p.rightChar(0) == '[' && !firstChar { + // we aren't in a range, and now there is a subtraction. Usually this happens + // only when a subtraction follows a range, like [a-z-[b]] + if !scanOnly { + p.moveRight(1) + sub, err := p.scanCharSet(caseInsensitive, false) + if err != nil { + return nil, err + } + cc.addSubtraction(sub) + + if p.charsRight() > 0 && p.rightChar(0) != ']' { + return nil, p.getErr(ErrSubtractionMustBeLast) + } + } else { + p.moveRight(1) + p.scanCharSet(caseInsensitive, true) + } + } else { + if !scanOnly { + cc.addRange(ch, ch) + } + } + } + + if !closed { + return nil, p.getErr(ErrUnterminatedBracket) + } + + if !scanOnly && caseInsensitive { + cc.addLowercase() + } + + return cc, nil +} + +// Scans any number of decimal digits (pegs value at 2^31-1 if too large) +func (p *parser) scanDecimal() (int, error) { + i := 0 + var d int + + for p.charsRight() > 0 { + d = int(p.rightChar(0) - '0') + if d < 0 || d > 9 { + break + } + p.moveRight(1) + + if i > maxValueDiv10 || (i == maxValueDiv10 && d > maxValueMod10) { + return 0, p.getErr(ErrCaptureGroupOutOfRange) + } + + i *= 10 + i += d + } + + return int(i), nil +} + +// Returns true for options allowed only at the top level +func isOnlyTopOption(option RegexOptions) bool { + return option == RightToLeft || option == ECMAScript || option == RE2 +} + +// Scans cimsx-cimsx option string, stops at the first unrecognized char. +func (p *parser) scanOptions() { + + for off := false; p.charsRight() > 0; p.moveRight(1) { + ch := p.rightChar(0) + + if ch == '-' { + off = true + } else if ch == '+' { + off = false + } else { + option := optionFromCode(ch) + if option == 0 || isOnlyTopOption(option) { + return + } + + if off { + p.options &= ^option + } else { + p.options |= option + } + } + } +} + +// Scans \ code for escape codes that map to single unicode chars. +func (p *parser) scanCharEscape() (r rune, err error) { + + ch := p.moveRightGetChar() + + if ch >= '0' && ch <= '7' { + p.moveLeft() + return p.scanOctal(), nil + } + + pos := p.textpos() + + switch ch { + case 'x': + // support for \x{HEX} syntax from Perl and PCRE + if p.charsRight() > 0 && p.rightChar(0) == '{' { + if p.useOptionE() { + return ch, nil + } + p.moveRight(1) + return p.scanHexUntilBrace() + } else { + r, err = p.scanHex(2) + } + case 'u': + // ECMAscript suppot \u{HEX} only if `u` is also set + if p.useOptionE() && p.useOptionU() && p.charsRight() > 0 && p.rightChar(0) == '{' { + p.moveRight(1) + return p.scanHexUntilBrace() + } else { + r, err = p.scanHex(4) + } + case 'a': + return '\u0007', nil + case 'b': + return '\b', nil + case 'e': + return '\u001B', nil + case 'f': + return '\f', nil + case 'n': + return '\n', nil + case 'r': + return '\r', nil + case 't': + return '\t', nil + case 'v': + return '\u000B', nil + case 'c': + r, err = p.scanControl() + default: + if !p.useOptionE() && !p.useRE2() && IsWordChar(ch) { + return 0, p.getErr(ErrUnrecognizedEscape, string(ch)) + } + return ch, nil + } + if err != nil && p.useOptionE() { + p.textto(pos) + return ch, nil + } + return +} + +// Grabs and converts an ascii control character +func (p *parser) scanControl() (rune, error) { + if p.charsRight() <= 0 { + return 0, p.getErr(ErrMissingControl) + } + + ch := p.moveRightGetChar() + + // \ca interpreted as \cA + + if ch >= 'a' && ch <= 'z' { + ch = (ch - ('a' - 'A')) + } + ch = (ch - '@') + if ch >= 0 && ch < ' ' { + return ch, nil + } + + return 0, p.getErr(ErrUnrecognizedControl) + +} + +// Scan hex digits until we hit a closing brace. +// Non-hex digits, hex value too large for UTF-8, or running out of chars are errors +func (p *parser) scanHexUntilBrace() (rune, error) { + // PCRE spec reads like unlimited hex digits are allowed, but unicode has a limit + // so we can enforce that + i := 0 + hasContent := false + + for p.charsRight() > 0 { + ch := p.moveRightGetChar() + if ch == '}' { + // hit our close brace, we're done here + // prevent \x{} + if !hasContent { + return 0, p.getErr(ErrTooFewHex) + } + return rune(i), nil + } + hasContent = true + // no brace needs to be hex digit + d := hexDigit(ch) + if d < 0 { + return 0, p.getErr(ErrMissingBrace) + } + + i *= 0x10 + i += d + + if i > unicode.MaxRune { + return 0, p.getErr(ErrInvalidHex) + } + } + + // we only make it here if we run out of digits without finding the brace + return 0, p.getErr(ErrMissingBrace) +} + +// Scans exactly c hex digits (c=2 for \xFF, c=4 for \uFFFF) +func (p *parser) scanHex(c int) (rune, error) { + + i := 0 + + if p.charsRight() >= c { + for c > 0 { + d := hexDigit(p.moveRightGetChar()) + if d < 0 { + break + } + i *= 0x10 + i += d + c-- + } + } + + if c > 0 { + return 0, p.getErr(ErrTooFewHex) + } + + return rune(i), nil +} + +// Returns n <= 0xF for a hex digit. +func hexDigit(ch rune) int { + + if d := uint(ch - '0'); d <= 9 { + return int(d) + } + + if d := uint(ch - 'a'); d <= 5 { + return int(d + 0xa) + } + + if d := uint(ch - 'A'); d <= 5 { + return int(d + 0xa) + } + + return -1 +} + +// Scans up to three octal digits (stops before exceeding 0377). +func (p *parser) scanOctal() rune { + // Consume octal chars only up to 3 digits and value 0377 + + c := 3 + + if c > p.charsRight() { + c = p.charsRight() + } + + //we know the first char is good because the caller had to check + i := 0 + d := int(p.rightChar(0) - '0') + for c > 0 && d <= 7 && d >= 0 { + if i >= 0x20 && p.useOptionE() { + break + } + i *= 8 + i += d + c-- + + p.moveRight(1) + if !p.rightMost() { + d = int(p.rightChar(0) - '0') + } + } + + // Octal codes only go up to 255. Any larger and the behavior that Perl follows + // is simply to truncate the high bits. + i &= 0xFF + + return rune(i) +} + +// Returns the current parsing position. +func (p *parser) textpos() int { + return p.currentPos +} + +// Zaps to a specific parsing position. +func (p *parser) textto(pos int) { + p.currentPos = pos +} + +// Returns the char at the right of the current parsing position and advances to the right. +func (p *parser) moveRightGetChar() rune { + ch := p.pattern[p.currentPos] + p.currentPos++ + return ch +} + +// Moves the current position to the right. +func (p *parser) moveRight(i int) { + // default would be 1 + p.currentPos += i +} + +// Moves the current parsing position one to the left. +func (p *parser) moveLeft() { + p.currentPos-- +} + +// Returns the char left of the current parsing position. +func (p *parser) charAt(i int) rune { + return p.pattern[i] +} + +// Returns the char i chars right of the current parsing position. +func (p *parser) rightChar(i int) rune { + // default would be 0 + return p.pattern[p.currentPos+i] +} + +// Number of characters to the right of the current parsing position. +func (p *parser) charsRight() int { + return len(p.pattern) - p.currentPos +} + +func (p *parser) rightMost() bool { + return p.currentPos == len(p.pattern) +} + +// Looks up the slot number for a given name +func (p *parser) captureSlotFromName(capname string) int { + return p.capnames[capname] +} + +// True if the capture slot was noted +func (p *parser) isCaptureSlot(i int) bool { + if p.caps != nil { + _, ok := p.caps[i] + return ok + } + + return (i >= 0 && i < p.capsize) +} + +// Looks up the slot number for a given name +func (p *parser) isCaptureName(capname string) bool { + if p.capnames == nil { + return false + } + + _, ok := p.capnames[capname] + return ok +} + +// option shortcuts + +// True if N option disabling '(' autocapture is on. +func (p *parser) useOptionN() bool { + return (p.options & ExplicitCapture) != 0 +} + +// True if I option enabling case-insensitivity is on. +func (p *parser) useOptionI() bool { + return (p.options & IgnoreCase) != 0 +} + +// True if M option altering meaning of $ and ^ is on. +func (p *parser) useOptionM() bool { + return (p.options & Multiline) != 0 +} + +// True if S option altering meaning of . is on. +func (p *parser) useOptionS() bool { + return (p.options & Singleline) != 0 +} + +// True if X option enabling whitespace/comment mode is on. +func (p *parser) useOptionX() bool { + return (p.options & IgnorePatternWhitespace) != 0 +} + +// True if E option enabling ECMAScript behavior on. +func (p *parser) useOptionE() bool { + return (p.options & ECMAScript) != 0 +} + +// true to use RE2 compatibility parsing behavior. +func (p *parser) useRE2() bool { + return (p.options & RE2) != 0 +} + +// True if U option enabling ECMAScript's Unicode behavior on. +func (p *parser) useOptionU() bool { + return (p.options & Unicode) != 0 +} + +// True if options stack is empty. +func (p *parser) emptyOptionsStack() bool { + return len(p.optionsStack) == 0 +} + +// Finish the current quantifiable (when a quantifier is not found or is not possible) +func (p *parser) addConcatenate() { + // The first (| inside a Testgroup group goes directly to the group + p.concatenation.addChild(p.unit) + p.unit = nil +} + +// Finish the current quantifiable (when a quantifier is found) +func (p *parser) addConcatenate3(lazy bool, min, max int) { + p.concatenation.addChild(p.unit.makeQuantifier(lazy, min, max)) + p.unit = nil +} + +// Sets the current unit to a single char node +func (p *parser) addUnitOne(ch rune) { + if p.useOptionI() { + ch = unicode.ToLower(ch) + } + + p.unit = newRegexNodeCh(ntOne, p.options, ch) +} + +// Sets the current unit to a single inverse-char node +func (p *parser) addUnitNotone(ch rune) { + if p.useOptionI() { + ch = unicode.ToLower(ch) + } + + p.unit = newRegexNodeCh(ntNotone, p.options, ch) +} + +// Sets the current unit to a single set node +func (p *parser) addUnitSet(set *CharSet) { + p.unit = newRegexNodeSet(ntSet, p.options, set) +} + +// Sets the current unit to a subtree +func (p *parser) addUnitNode(node *regexNode) { + p.unit = node +} + +// Sets the current unit to an assertion of the specified type +func (p *parser) addUnitType(t nodeType) { + p.unit = newRegexNode(t, p.options) +} + +// Finish the current group (in response to a ')' or end) +func (p *parser) addGroup() error { + if p.group.t == ntTestgroup || p.group.t == ntTestref { + p.group.addChild(p.concatenation.reverseLeft()) + if (p.group.t == ntTestref && len(p.group.children) > 2) || len(p.group.children) > 3 { + return p.getErr(ErrTooManyAlternates) + } + } else { + p.alternation.addChild(p.concatenation.reverseLeft()) + p.group.addChild(p.alternation) + } + + p.unit = p.group + return nil +} + +// Pops the option stack, but keeps the current options unchanged. +func (p *parser) popKeepOptions() { + lastIdx := len(p.optionsStack) - 1 + p.optionsStack = p.optionsStack[:lastIdx] +} + +// Recalls options from the stack. +func (p *parser) popOptions() { + lastIdx := len(p.optionsStack) - 1 + // get the last item on the stack and then remove it by reslicing + p.options = p.optionsStack[lastIdx] + p.optionsStack = p.optionsStack[:lastIdx] +} + +// Saves options on a stack. +func (p *parser) pushOptions() { + p.optionsStack = append(p.optionsStack, p.options) +} + +// Add a string to the last concatenate. +func (p *parser) addToConcatenate(pos, cch int, isReplacement bool) { + var node *regexNode + + if cch == 0 { + return + } + + if cch > 1 { + str := make([]rune, cch) + copy(str, p.pattern[pos:pos+cch]) + + if p.useOptionI() && !isReplacement { + // We do the ToLower character by character for consistency. With surrogate chars, doing + // a ToLower on the entire string could actually change the surrogate pair. This is more correct + // linguistically, but since Regex doesn't support surrogates, it's more important to be + // consistent. + for i := 0; i < len(str); i++ { + str[i] = unicode.ToLower(str[i]) + } + } + + node = newRegexNodeStr(ntMulti, p.options, str) + } else { + ch := p.charAt(pos) + + if p.useOptionI() && !isReplacement { + ch = unicode.ToLower(ch) + } + + node = newRegexNodeCh(ntOne, p.options, ch) + } + + p.concatenation.addChild(node) +} + +// Push the parser state (in response to an open paren) +func (p *parser) pushGroup() { + p.group.next = p.stack + p.alternation.next = p.group + p.concatenation.next = p.alternation + p.stack = p.concatenation +} + +// Remember the pushed state (in response to a ')') +func (p *parser) popGroup() error { + p.concatenation = p.stack + p.alternation = p.concatenation.next + p.group = p.alternation.next + p.stack = p.group.next + + // The first () inside a Testgroup group goes directly to the group + if p.group.t == ntTestgroup && len(p.group.children) == 0 { + if p.unit == nil { + return p.getErr(ErrConditionalExpression) + } + + p.group.addChild(p.unit) + p.unit = nil + } + return nil +} + +// True if the group stack is empty. +func (p *parser) emptyStack() bool { + return p.stack == nil +} + +// Start a new round for the parser state (in response to an open paren or string start) +func (p *parser) startGroup(openGroup *regexNode) { + p.group = openGroup + p.alternation = newRegexNode(ntAlternate, p.options) + p.concatenation = newRegexNode(ntConcatenate, p.options) +} + +// Finish the current concatenation (in response to a |) +func (p *parser) addAlternate() { + // The | parts inside a Testgroup group go directly to the group + + if p.group.t == ntTestgroup || p.group.t == ntTestref { + p.group.addChild(p.concatenation.reverseLeft()) + } else { + p.alternation.addChild(p.concatenation.reverseLeft()) + } + + p.concatenation = newRegexNode(ntConcatenate, p.options) +} + +// For categorizing ascii characters. + +const ( + Q byte = 5 // quantifier + S = 4 // ordinary stopper + Z = 3 // ScanBlank stopper + X = 2 // whitespace + E = 1 // should be escaped +) + +var _category = []byte{ + //01 2 3 4 5 6 7 8 9 A B C D E F 0 1 2 3 4 5 6 7 8 9 A B C D E F + 0, 0, 0, 0, 0, 0, 0, 0, 0, X, X, X, X, X, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + // ! " # $ % & ' ( ) * + , - . / 0 1 2 3 4 5 6 7 8 9 : ; < = > ? + X, 0, 0, Z, S, 0, 0, 0, S, S, Q, Q, 0, 0, S, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, Q, + //@A B C D E F G H I J K L M N O P Q R S T U V W X Y Z [ \ ] ^ _ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, S, S, 0, S, 0, + //'a b c d e f g h i j k l m n o p q r s t u v w x y z { | } ~ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, Q, S, 0, 0, 0, +} + +func isSpace(ch rune) bool { + return (ch <= ' ' && _category[ch] == X) +} + +// Returns true for those characters that terminate a string of ordinary chars. +func isSpecial(ch rune) bool { + return (ch <= '|' && _category[ch] >= S) +} + +// Returns true for those characters that terminate a string of ordinary chars. +func isStopperX(ch rune) bool { + return (ch <= '|' && _category[ch] >= X) +} + +// Returns true for those characters that begin a quantifier. +func isQuantifier(ch rune) bool { + return (ch <= '{' && _category[ch] >= Q) +} + +func (p *parser) isTrueQuantifier() bool { + nChars := p.charsRight() + if nChars == 0 { + return false + } + + startpos := p.textpos() + ch := p.charAt(startpos) + if ch != '{' { + return ch <= '{' && _category[ch] >= Q + } + + //UGLY: this is ugly -- the original code was ugly too + pos := startpos + for { + nChars-- + if nChars <= 0 { + break + } + pos++ + ch = p.charAt(pos) + if ch < '0' || ch > '9' { + break + } + } + + if nChars == 0 || pos-startpos == 1 { + return false + } + if ch == '}' { + return true + } + if ch != ',' { + return false + } + for { + nChars-- + if nChars <= 0 { + break + } + pos++ + ch = p.charAt(pos) + if ch < '0' || ch > '9' { + break + } + } + + return nChars > 0 && ch == '}' +} diff --git a/vendor/github.com/dlclark/regexp2/syntax/prefix.go b/vendor/github.com/dlclark/regexp2/syntax/prefix.go new file mode 100644 index 0000000000..f671688629 --- /dev/null +++ b/vendor/github.com/dlclark/regexp2/syntax/prefix.go @@ -0,0 +1,896 @@ +package syntax + +import ( + "bytes" + "fmt" + "strconv" + "unicode" + "unicode/utf8" +) + +type Prefix struct { + PrefixStr []rune + PrefixSet CharSet + CaseInsensitive bool +} + +// It takes a RegexTree and computes the set of chars that can start it. +func getFirstCharsPrefix(tree *RegexTree) *Prefix { + s := regexFcd{ + fcStack: make([]regexFc, 32), + intStack: make([]int, 32), + } + fc := s.regexFCFromRegexTree(tree) + + if fc == nil || fc.nullable || fc.cc.IsEmpty() { + return nil + } + fcSet := fc.getFirstChars() + return &Prefix{PrefixSet: fcSet, CaseInsensitive: fc.caseInsensitive} +} + +type regexFcd struct { + intStack []int + intDepth int + fcStack []regexFc + fcDepth int + skipAllChildren bool // don't process any more children at the current level + skipchild bool // don't process the current child. + failed bool +} + +/* + * The main FC computation. It does a shortcutted depth-first walk + * through the tree and calls CalculateFC to emits code before + * and after each child of an interior node, and at each leaf. + */ +func (s *regexFcd) regexFCFromRegexTree(tree *RegexTree) *regexFc { + curNode := tree.root + curChild := 0 + + for { + if len(curNode.children) == 0 { + // This is a leaf node + s.calculateFC(curNode.t, curNode, 0) + } else if curChild < len(curNode.children) && !s.skipAllChildren { + // This is an interior node, and we have more children to analyze + s.calculateFC(curNode.t|beforeChild, curNode, curChild) + + if !s.skipchild { + curNode = curNode.children[curChild] + // this stack is how we get a depth first walk of the tree. + s.pushInt(curChild) + curChild = 0 + } else { + curChild++ + s.skipchild = false + } + continue + } + + // This is an interior node where we've finished analyzing all the children, or + // the end of a leaf node. + s.skipAllChildren = false + + if s.intIsEmpty() { + break + } + + curChild = s.popInt() + curNode = curNode.next + + s.calculateFC(curNode.t|afterChild, curNode, curChild) + if s.failed { + return nil + } + + curChild++ + } + + if s.fcIsEmpty() { + return nil + } + + return s.popFC() +} + +// To avoid recursion, we use a simple integer stack. +// This is the push. +func (s *regexFcd) pushInt(I int) { + if s.intDepth >= len(s.intStack) { + expanded := make([]int, s.intDepth*2) + copy(expanded, s.intStack) + s.intStack = expanded + } + + s.intStack[s.intDepth] = I + s.intDepth++ +} + +// True if the stack is empty. +func (s *regexFcd) intIsEmpty() bool { + return s.intDepth == 0 +} + +// This is the pop. +func (s *regexFcd) popInt() int { + s.intDepth-- + return s.intStack[s.intDepth] +} + +// We also use a stack of RegexFC objects. +// This is the push. +func (s *regexFcd) pushFC(fc regexFc) { + if s.fcDepth >= len(s.fcStack) { + expanded := make([]regexFc, s.fcDepth*2) + copy(expanded, s.fcStack) + s.fcStack = expanded + } + + s.fcStack[s.fcDepth] = fc + s.fcDepth++ +} + +// True if the stack is empty. +func (s *regexFcd) fcIsEmpty() bool { + return s.fcDepth == 0 +} + +// This is the pop. +func (s *regexFcd) popFC() *regexFc { + s.fcDepth-- + return &s.fcStack[s.fcDepth] +} + +// This is the top. +func (s *regexFcd) topFC() *regexFc { + return &s.fcStack[s.fcDepth-1] +} + +// Called in Beforechild to prevent further processing of the current child +func (s *regexFcd) skipChild() { + s.skipchild = true +} + +// FC computation and shortcut cases for each node type +func (s *regexFcd) calculateFC(nt nodeType, node *regexNode, CurIndex int) { + //fmt.Printf("NodeType: %v, CurIndex: %v, Desc: %v\n", nt, CurIndex, node.description()) + ci := false + rtl := false + + if nt <= ntRef { + if (node.options & IgnoreCase) != 0 { + ci = true + } + if (node.options & RightToLeft) != 0 { + rtl = true + } + } + + switch nt { + case ntConcatenate | beforeChild, ntAlternate | beforeChild, ntTestref | beforeChild, ntLoop | beforeChild, ntLazyloop | beforeChild: + break + + case ntTestgroup | beforeChild: + if CurIndex == 0 { + s.skipChild() + } + break + + case ntEmpty: + s.pushFC(regexFc{nullable: true}) + break + + case ntConcatenate | afterChild: + if CurIndex != 0 { + child := s.popFC() + cumul := s.topFC() + + s.failed = !cumul.addFC(*child, true) + } + + fc := s.topFC() + if !fc.nullable { + s.skipAllChildren = true + } + break + + case ntTestgroup | afterChild: + if CurIndex > 1 { + child := s.popFC() + cumul := s.topFC() + + s.failed = !cumul.addFC(*child, false) + } + break + + case ntAlternate | afterChild, ntTestref | afterChild: + if CurIndex != 0 { + child := s.popFC() + cumul := s.topFC() + + s.failed = !cumul.addFC(*child, false) + } + break + + case ntLoop | afterChild, ntLazyloop | afterChild: + if node.m == 0 { + fc := s.topFC() + fc.nullable = true + } + break + + case ntGroup | beforeChild, ntGroup | afterChild, ntCapture | beforeChild, ntCapture | afterChild, ntGreedy | beforeChild, ntGreedy | afterChild: + break + + case ntRequire | beforeChild, ntPrevent | beforeChild: + s.skipChild() + s.pushFC(regexFc{nullable: true}) + break + + case ntRequire | afterChild, ntPrevent | afterChild: + break + + case ntOne, ntNotone: + s.pushFC(newRegexFc(node.ch, nt == ntNotone, false, ci)) + break + + case ntOneloop, ntOnelazy: + s.pushFC(newRegexFc(node.ch, false, node.m == 0, ci)) + break + + case ntNotoneloop, ntNotonelazy: + s.pushFC(newRegexFc(node.ch, true, node.m == 0, ci)) + break + + case ntMulti: + if len(node.str) == 0 { + s.pushFC(regexFc{nullable: true}) + } else if !rtl { + s.pushFC(newRegexFc(node.str[0], false, false, ci)) + } else { + s.pushFC(newRegexFc(node.str[len(node.str)-1], false, false, ci)) + } + break + + case ntSet: + s.pushFC(regexFc{cc: node.set.Copy(), nullable: false, caseInsensitive: ci}) + break + + case ntSetloop, ntSetlazy: + s.pushFC(regexFc{cc: node.set.Copy(), nullable: node.m == 0, caseInsensitive: ci}) + break + + case ntRef: + s.pushFC(regexFc{cc: *AnyClass(), nullable: true, caseInsensitive: false}) + break + + case ntNothing, ntBol, ntEol, ntBoundary, ntNonboundary, ntECMABoundary, ntNonECMABoundary, ntBeginning, ntStart, ntEndZ, ntEnd: + s.pushFC(regexFc{nullable: true}) + break + + default: + panic(fmt.Sprintf("unexpected op code: %v", nt)) + } +} + +type regexFc struct { + cc CharSet + nullable bool + caseInsensitive bool +} + +func newRegexFc(ch rune, not, nullable, caseInsensitive bool) regexFc { + r := regexFc{ + caseInsensitive: caseInsensitive, + nullable: nullable, + } + if not { + if ch > 0 { + r.cc.addRange('\x00', ch-1) + } + if ch < 0xFFFF { + r.cc.addRange(ch+1, utf8.MaxRune) + } + } else { + r.cc.addRange(ch, ch) + } + return r +} + +func (r *regexFc) getFirstChars() CharSet { + if r.caseInsensitive { + r.cc.addLowercase() + } + + return r.cc +} + +func (r *regexFc) addFC(fc regexFc, concatenate bool) bool { + if !r.cc.IsMergeable() || !fc.cc.IsMergeable() { + return false + } + + if concatenate { + if !r.nullable { + return true + } + + if !fc.nullable { + r.nullable = false + } + } else { + if fc.nullable { + r.nullable = true + } + } + + r.caseInsensitive = r.caseInsensitive || fc.caseInsensitive + r.cc.addSet(fc.cc) + + return true +} + +// This is a related computation: it takes a RegexTree and computes the +// leading substring if it sees one. It's quite trivial and gives up easily. +func getPrefix(tree *RegexTree) *Prefix { + var concatNode *regexNode + nextChild := 0 + + curNode := tree.root + + for { + switch curNode.t { + case ntConcatenate: + if len(curNode.children) > 0 { + concatNode = curNode + nextChild = 0 + } + + case ntGreedy, ntCapture: + curNode = curNode.children[0] + concatNode = nil + continue + + case ntOneloop, ntOnelazy: + if curNode.m > 0 { + return &Prefix{ + PrefixStr: repeat(curNode.ch, curNode.m), + CaseInsensitive: (curNode.options & IgnoreCase) != 0, + } + } + return nil + + case ntOne: + return &Prefix{ + PrefixStr: []rune{curNode.ch}, + CaseInsensitive: (curNode.options & IgnoreCase) != 0, + } + + case ntMulti: + return &Prefix{ + PrefixStr: curNode.str, + CaseInsensitive: (curNode.options & IgnoreCase) != 0, + } + + case ntBol, ntEol, ntBoundary, ntECMABoundary, ntBeginning, ntStart, + ntEndZ, ntEnd, ntEmpty, ntRequire, ntPrevent: + + default: + return nil + } + + if concatNode == nil || nextChild >= len(concatNode.children) { + return nil + } + + curNode = concatNode.children[nextChild] + nextChild++ + } +} + +// repeat the rune r, c times... up to the max of MaxPrefixSize +func repeat(r rune, c int) []rune { + if c > MaxPrefixSize { + c = MaxPrefixSize + } + + ret := make([]rune, c) + + // binary growth using copy for speed + ret[0] = r + bp := 1 + for bp < len(ret) { + copy(ret[bp:], ret[:bp]) + bp *= 2 + } + + return ret +} + +// BmPrefix precomputes the Boyer-Moore +// tables for fast string scanning. These tables allow +// you to scan for the first occurrence of a string within +// a large body of text without examining every character. +// The performance of the heuristic depends on the actual +// string and the text being searched, but usually, the longer +// the string that is being searched for, the fewer characters +// need to be examined. +type BmPrefix struct { + positive []int + negativeASCII []int + negativeUnicode [][]int + pattern []rune + lowASCII rune + highASCII rune + rightToLeft bool + caseInsensitive bool +} + +func newBmPrefix(pattern []rune, caseInsensitive, rightToLeft bool) *BmPrefix { + + b := &BmPrefix{ + rightToLeft: rightToLeft, + caseInsensitive: caseInsensitive, + pattern: pattern, + } + + if caseInsensitive { + for i := 0; i < len(b.pattern); i++ { + // We do the ToLower character by character for consistency. With surrogate chars, doing + // a ToLower on the entire string could actually change the surrogate pair. This is more correct + // linguistically, but since Regex doesn't support surrogates, it's more important to be + // consistent. + + b.pattern[i] = unicode.ToLower(b.pattern[i]) + } + } + + var beforefirst, last, bump int + var scan, match int + + if !rightToLeft { + beforefirst = -1 + last = len(b.pattern) - 1 + bump = 1 + } else { + beforefirst = len(b.pattern) + last = 0 + bump = -1 + } + + // PART I - the good-suffix shift table + // + // compute the positive requirement: + // if char "i" is the first one from the right that doesn't match, + // then we know the matcher can advance by _positive[i]. + // + // This algorithm is a simplified variant of the standard + // Boyer-Moore good suffix calculation. + + b.positive = make([]int, len(b.pattern)) + + examine := last + ch := b.pattern[examine] + b.positive[examine] = bump + examine -= bump + +Outerloop: + for { + // find an internal char (examine) that matches the tail + + for { + if examine == beforefirst { + break Outerloop + } + if b.pattern[examine] == ch { + break + } + examine -= bump + } + + match = last + scan = examine + + // find the length of the match + for { + if scan == beforefirst || b.pattern[match] != b.pattern[scan] { + // at the end of the match, note the difference in _positive + // this is not the length of the match, but the distance from the internal match + // to the tail suffix. + if b.positive[match] == 0 { + b.positive[match] = match - scan + } + + // System.Diagnostics.Debug.WriteLine("Set positive[" + match + "] to " + (match - scan)); + + break + } + + scan -= bump + match -= bump + } + + examine -= bump + } + + match = last - bump + + // scan for the chars for which there are no shifts that yield a different candidate + + // The inside of the if statement used to say + // "_positive[match] = last - beforefirst;" + // This is slightly less aggressive in how much we skip, but at worst it + // should mean a little more work rather than skipping a potential match. + for match != beforefirst { + if b.positive[match] == 0 { + b.positive[match] = bump + } + + match -= bump + } + + // PART II - the bad-character shift table + // + // compute the negative requirement: + // if char "ch" is the reject character when testing position "i", + // we can slide up by _negative[ch]; + // (_negative[ch] = str.Length - 1 - str.LastIndexOf(ch)) + // + // the lookup table is divided into ASCII and Unicode portions; + // only those parts of the Unicode 16-bit code set that actually + // appear in the string are in the table. (Maximum size with + // Unicode is 65K; ASCII only case is 512 bytes.) + + b.negativeASCII = make([]int, 128) + + for i := 0; i < len(b.negativeASCII); i++ { + b.negativeASCII[i] = last - beforefirst + } + + b.lowASCII = 127 + b.highASCII = 0 + + for examine = last; examine != beforefirst; examine -= bump { + ch = b.pattern[examine] + + switch { + case ch < 128: + if b.lowASCII > ch { + b.lowASCII = ch + } + + if b.highASCII < ch { + b.highASCII = ch + } + + if b.negativeASCII[ch] == last-beforefirst { + b.negativeASCII[ch] = last - examine + } + case ch <= 0xffff: + i, j := ch>>8, ch&0xFF + + if b.negativeUnicode == nil { + b.negativeUnicode = make([][]int, 256) + } + + if b.negativeUnicode[i] == nil { + newarray := make([]int, 256) + + for k := 0; k < len(newarray); k++ { + newarray[k] = last - beforefirst + } + + if i == 0 { + copy(newarray, b.negativeASCII) + //TODO: this line needed? + b.negativeASCII = newarray + } + + b.negativeUnicode[i] = newarray + } + + if b.negativeUnicode[i][j] == last-beforefirst { + b.negativeUnicode[i][j] = last - examine + } + default: + // we can't do the filter because this algo doesn't support + // unicode chars >0xffff + return nil + } + } + + return b +} + +func (b *BmPrefix) String() string { + return string(b.pattern) +} + +// Dump returns the contents of the filter as a human readable string +func (b *BmPrefix) Dump(indent string) string { + buf := &bytes.Buffer{} + + fmt.Fprintf(buf, "%sBM Pattern: %s\n%sPositive: ", indent, string(b.pattern), indent) + for i := 0; i < len(b.positive); i++ { + buf.WriteString(strconv.Itoa(b.positive[i])) + buf.WriteRune(' ') + } + buf.WriteRune('\n') + + if b.negativeASCII != nil { + buf.WriteString(indent) + buf.WriteString("Negative table\n") + for i := 0; i < len(b.negativeASCII); i++ { + if b.negativeASCII[i] != len(b.pattern) { + fmt.Fprintf(buf, "%s %s %s\n", indent, Escape(string(rune(i))), strconv.Itoa(b.negativeASCII[i])) + } + } + } + + return buf.String() +} + +// Scan uses the Boyer-Moore algorithm to find the first occurrence +// of the specified string within text, beginning at index, and +// constrained within beglimit and endlimit. +// +// The direction and case-sensitivity of the match is determined +// by the arguments to the RegexBoyerMoore constructor. +func (b *BmPrefix) Scan(text []rune, index, beglimit, endlimit int) int { + var ( + defadv, test, test2 int + match, startmatch, endmatch int + bump, advance int + chTest rune + unicodeLookup []int + ) + + if !b.rightToLeft { + defadv = len(b.pattern) + startmatch = len(b.pattern) - 1 + endmatch = 0 + test = index + defadv - 1 + bump = 1 + } else { + defadv = -len(b.pattern) + startmatch = 0 + endmatch = -defadv - 1 + test = index + defadv + bump = -1 + } + + chMatch := b.pattern[startmatch] + + for { + if test >= endlimit || test < beglimit { + return -1 + } + + chTest = text[test] + + if b.caseInsensitive { + chTest = unicode.ToLower(chTest) + } + + if chTest != chMatch { + if chTest < 128 { + advance = b.negativeASCII[chTest] + } else if chTest < 0xffff && len(b.negativeUnicode) > 0 { + unicodeLookup = b.negativeUnicode[chTest>>8] + if len(unicodeLookup) > 0 { + advance = unicodeLookup[chTest&0xFF] + } else { + advance = defadv + } + } else { + advance = defadv + } + + test += advance + } else { // if (chTest == chMatch) + test2 = test + match = startmatch + + for { + if match == endmatch { + if b.rightToLeft { + return test2 + 1 + } else { + return test2 + } + } + + match -= bump + test2 -= bump + + chTest = text[test2] + + if b.caseInsensitive { + chTest = unicode.ToLower(chTest) + } + + if chTest != b.pattern[match] { + advance = b.positive[match] + if chTest < 128 { + test2 = (match - startmatch) + b.negativeASCII[chTest] + } else if chTest < 0xffff && len(b.negativeUnicode) > 0 { + unicodeLookup = b.negativeUnicode[chTest>>8] + if len(unicodeLookup) > 0 { + test2 = (match - startmatch) + unicodeLookup[chTest&0xFF] + } else { + test += advance + break + } + } else { + test += advance + break + } + + if b.rightToLeft { + if test2 < advance { + advance = test2 + } + } else if test2 > advance { + advance = test2 + } + + test += advance + break + } + } + } + } +} + +// When a regex is anchored, we can do a quick IsMatch test instead of a Scan +func (b *BmPrefix) IsMatch(text []rune, index, beglimit, endlimit int) bool { + if !b.rightToLeft { + if index < beglimit || endlimit-index < len(b.pattern) { + return false + } + + return b.matchPattern(text, index) + } else { + if index > endlimit || index-beglimit < len(b.pattern) { + return false + } + + return b.matchPattern(text, index-len(b.pattern)) + } +} + +func (b *BmPrefix) matchPattern(text []rune, index int) bool { + if len(text)-index < len(b.pattern) { + return false + } + + if b.caseInsensitive { + for i := 0; i < len(b.pattern); i++ { + //Debug.Assert(textinfo.ToLower(_pattern[i]) == _pattern[i], "pattern should be converted to lower case in constructor!"); + if unicode.ToLower(text[index+i]) != b.pattern[i] { + return false + } + } + return true + } else { + for i := 0; i < len(b.pattern); i++ { + if text[index+i] != b.pattern[i] { + return false + } + } + return true + } +} + +type AnchorLoc int16 + +// where the regex can be pegged +const ( + AnchorBeginning AnchorLoc = 0x0001 + AnchorBol = 0x0002 + AnchorStart = 0x0004 + AnchorEol = 0x0008 + AnchorEndZ = 0x0010 + AnchorEnd = 0x0020 + AnchorBoundary = 0x0040 + AnchorECMABoundary = 0x0080 +) + +func getAnchors(tree *RegexTree) AnchorLoc { + + var concatNode *regexNode + nextChild, result := 0, AnchorLoc(0) + + curNode := tree.root + + for { + switch curNode.t { + case ntConcatenate: + if len(curNode.children) > 0 { + concatNode = curNode + nextChild = 0 + } + + case ntGreedy, ntCapture: + curNode = curNode.children[0] + concatNode = nil + continue + + case ntBol, ntEol, ntBoundary, ntECMABoundary, ntBeginning, + ntStart, ntEndZ, ntEnd: + return result | anchorFromType(curNode.t) + + case ntEmpty, ntRequire, ntPrevent: + + default: + return result + } + + if concatNode == nil || nextChild >= len(concatNode.children) { + return result + } + + curNode = concatNode.children[nextChild] + nextChild++ + } +} + +func anchorFromType(t nodeType) AnchorLoc { + switch t { + case ntBol: + return AnchorBol + case ntEol: + return AnchorEol + case ntBoundary: + return AnchorBoundary + case ntECMABoundary: + return AnchorECMABoundary + case ntBeginning: + return AnchorBeginning + case ntStart: + return AnchorStart + case ntEndZ: + return AnchorEndZ + case ntEnd: + return AnchorEnd + default: + return 0 + } +} + +// anchorDescription returns a human-readable description of the anchors +func (anchors AnchorLoc) String() string { + buf := &bytes.Buffer{} + + if 0 != (anchors & AnchorBeginning) { + buf.WriteString(", Beginning") + } + if 0 != (anchors & AnchorStart) { + buf.WriteString(", Start") + } + if 0 != (anchors & AnchorBol) { + buf.WriteString(", Bol") + } + if 0 != (anchors & AnchorBoundary) { + buf.WriteString(", Boundary") + } + if 0 != (anchors & AnchorECMABoundary) { + buf.WriteString(", ECMABoundary") + } + if 0 != (anchors & AnchorEol) { + buf.WriteString(", Eol") + } + if 0 != (anchors & AnchorEnd) { + buf.WriteString(", End") + } + if 0 != (anchors & AnchorEndZ) { + buf.WriteString(", EndZ") + } + + // trim off comma + if buf.Len() >= 2 { + return buf.String()[2:] + } + return "None" +} diff --git a/vendor/github.com/dlclark/regexp2/syntax/replacerdata.go b/vendor/github.com/dlclark/regexp2/syntax/replacerdata.go new file mode 100644 index 0000000000..bcf4d3f257 --- /dev/null +++ b/vendor/github.com/dlclark/regexp2/syntax/replacerdata.go @@ -0,0 +1,87 @@ +package syntax + +import ( + "bytes" + "errors" +) + +type ReplacerData struct { + Rep string + Strings []string + Rules []int +} + +const ( + replaceSpecials = 4 + replaceLeftPortion = -1 + replaceRightPortion = -2 + replaceLastGroup = -3 + replaceWholeString = -4 +) + +//ErrReplacementError is a general error during parsing the replacement text +var ErrReplacementError = errors.New("Replacement pattern error.") + +// NewReplacerData will populate a reusable replacer data struct based on the given replacement string +// and the capture group data from a regexp +func NewReplacerData(rep string, caps map[int]int, capsize int, capnames map[string]int, op RegexOptions) (*ReplacerData, error) { + p := parser{ + options: op, + caps: caps, + capsize: capsize, + capnames: capnames, + } + p.setPattern(rep) + concat, err := p.scanReplacement() + if err != nil { + return nil, err + } + + if concat.t != ntConcatenate { + panic(ErrReplacementError) + } + + sb := &bytes.Buffer{} + var ( + strings []string + rules []int + ) + + for _, child := range concat.children { + switch child.t { + case ntMulti: + child.writeStrToBuf(sb) + + case ntOne: + sb.WriteRune(child.ch) + + case ntRef: + if sb.Len() > 0 { + rules = append(rules, len(strings)) + strings = append(strings, sb.String()) + sb.Reset() + } + slot := child.m + + if len(caps) > 0 && slot >= 0 { + slot = caps[slot] + } + + rules = append(rules, -replaceSpecials-1-slot) + + default: + panic(ErrReplacementError) + } + } + + if sb.Len() > 0 { + rules = append(rules, len(strings)) + strings = append(strings, sb.String()) + } + + return &ReplacerData{ + Rep: rep, + Strings: strings, + Rules: rules, + }, nil +} diff --git a/vendor/github.com/dlclark/regexp2/syntax/tree.go b/vendor/github.com/dlclark/regexp2/syntax/tree.go new file mode 100644 index 0000000000..ea28829319 --- /dev/null +++ b/vendor/github.com/dlclark/regexp2/syntax/tree.go @@ -0,0 +1,654 @@ +package syntax + +import ( + "bytes" + "fmt" + "math" + "strconv" +) + +type RegexTree struct { + root *regexNode + caps map[int]int + capnumlist []int + captop int + Capnames map[string]int + Caplist []string + options RegexOptions +} + +// It is built into a parsed tree for a regular expression. + +// Implementation notes: +// +// Since the node tree is a temporary data structure only used +// during compilation of the regexp to integer codes, it's +// designed for clarity and convenience rather than +// space efficiency. +// +// RegexNodes are built into a tree, linked by the n.children list. +// Each node also has a n.parent and n.ichild member indicating +// its parent and which child # it is in its parent's list. +// +// RegexNodes come in as many types as there are constructs in +// a regular expression, for example, "concatenate", "alternate", +// "one", "rept", "group". There are also node types for basic +// peephole optimizations, e.g., "onerep", "notsetrep", etc. +// +// Because perl 5 allows "lookback" groups that scan backwards, +// each node also gets a "direction". Normally the value of +// boolean n.backward = false. +// +// During parsing, top-level nodes are also stacked onto a parse +// stack (a stack of trees). For this purpose we have a n.next +// pointer. [Note that to save a few bytes, we could overload the +// n.parent pointer instead.] +// +// On the parse stack, each tree has a "role" - basically, the +// nonterminal in the grammar that the parser has currently +// assigned to the tree. That code is stored in n.role. +// +// Finally, some of the different kinds of nodes have data. +// Two integers (for the looping constructs) are stored in +// n.operands, an an object (either a string or a set) +// is stored in n.data +type regexNode struct { + t nodeType + children []*regexNode + str []rune + set *CharSet + ch rune + m int + n int + options RegexOptions + next *regexNode +} + +type nodeType int32 + +const ( + // The following are leaves, and correspond to primitive operations + + ntOnerep nodeType = 0 // lef,back char,min,max a {n} + ntNotonerep = 1 // lef,back char,min,max .{n} + ntSetrep = 2 // lef,back set,min,max [\d]{n} + ntOneloop = 3 // lef,back char,min,max a {,n} + ntNotoneloop = 4 // lef,back char,min,max .{,n} + ntSetloop = 5 // lef,back set,min,max [\d]{,n} + ntOnelazy = 6 // lef,back char,min,max a {,n}? + ntNotonelazy = 7 // lef,back char,min,max .{,n}? + ntSetlazy = 8 // lef,back set,min,max [\d]{,n}? + ntOne = 9 // lef char a + ntNotone = 10 // lef char [^a] + ntSet = 11 // lef set [a-z\s] \w \s \d + ntMulti = 12 // lef string abcd + ntRef = 13 // lef group \# + ntBol = 14 // ^ + ntEol = 15 // $ + ntBoundary = 16 // \b + ntNonboundary = 17 // \B + ntBeginning = 18 // \A + ntStart = 19 // \G + ntEndZ = 20 // \Z + ntEnd = 21 // \Z + + // Interior nodes do not correspond to primitive operations, but + // control structures compositing other operations + + // Concat and alternate take n children, and can run forward or backwards + + ntNothing = 22 // [] + ntEmpty = 23 // () + ntAlternate = 24 // a|b + ntConcatenate = 25 // ab + ntLoop = 26 // m,x * + ? {,} + ntLazyloop = 27 // m,x *? +? ?? {,}? + ntCapture = 28 // n () + ntGroup = 29 // (?:) + ntRequire = 30 // (?=) (?<=) + ntPrevent = 31 // (?!) (?) (?<) + ntTestref = 33 // (?(n) | ) + ntTestgroup = 34 // (?(...) | ) + + ntECMABoundary = 41 // \b + ntNonECMABoundary = 42 // \B +) + +func newRegexNode(t nodeType, opt RegexOptions) *regexNode { + return ®exNode{ + t: t, + options: opt, + } +} + +func newRegexNodeCh(t nodeType, opt RegexOptions, ch rune) *regexNode { + return ®exNode{ + t: t, + options: opt, + ch: ch, + } +} + +func newRegexNodeStr(t nodeType, opt RegexOptions, str []rune) *regexNode { + return ®exNode{ + t: t, + options: opt, + str: str, + } +} + +func newRegexNodeSet(t nodeType, opt RegexOptions, set *CharSet) *regexNode { + return ®exNode{ + t: t, + options: opt, + set: set, + } +} + +func newRegexNodeM(t nodeType, opt RegexOptions, m int) *regexNode { + return ®exNode{ + t: t, + options: opt, + m: m, + } +} +func newRegexNodeMN(t nodeType, opt RegexOptions, m, n int) *regexNode { + return ®exNode{ + t: t, + options: opt, + m: m, + n: n, + } +} + +func (n *regexNode) writeStrToBuf(buf *bytes.Buffer) { + for i := 0; i < len(n.str); i++ { + buf.WriteRune(n.str[i]) + } +} + +func (n *regexNode) addChild(child *regexNode) { + reduced := child.reduce() + n.children = append(n.children, reduced) + reduced.next = n +} + +func (n *regexNode) insertChildren(afterIndex int, nodes []*regexNode) { + newChildren := make([]*regexNode, 0, len(n.children)+len(nodes)) + n.children = append(append(append(newChildren, n.children[:afterIndex]...), nodes...), n.children[afterIndex:]...) +} + +// removes children including the start but not the end index +func (n *regexNode) removeChildren(startIndex, endIndex int) { + n.children = append(n.children[:startIndex], n.children[endIndex:]...) +} + +// Pass type as OneLazy or OneLoop +func (n *regexNode) makeRep(t nodeType, min, max int) { + n.t += (t - ntOne) + n.m = min + n.n = max +} + +func (n *regexNode) reduce() *regexNode { + switch n.t { + case ntAlternate: + return n.reduceAlternation() + + case ntConcatenate: + return n.reduceConcatenation() + + case ntLoop, ntLazyloop: + return n.reduceRep() + + case ntGroup: + return n.reduceGroup() + + case ntSet, ntSetloop: + return n.reduceSet() + + default: + return n + } +} + +// Basic optimization. Single-letter alternations can be replaced +// by faster set specifications, and nested alternations with no +// intervening operators can be flattened: +// +// a|b|c|def|g|h -> [a-c]|def|[gh] +// apple|(?:orange|pear)|grape -> apple|orange|pear|grape +func (n *regexNode) reduceAlternation() *regexNode { + if len(n.children) == 0 { + return newRegexNode(ntNothing, n.options) + } + + wasLastSet := false + lastNodeCannotMerge := false + var optionsLast RegexOptions + var i, j int + + for i, j = 0, 0; i < len(n.children); i, j = i+1, j+1 { + at := n.children[i] + + if j < i { + n.children[j] = at + } + + for { + if at.t == ntAlternate { + for k := 0; k < len(at.children); k++ { + at.children[k].next = n + } + n.insertChildren(i+1, at.children) + + j-- + } else if at.t == ntSet || at.t == ntOne { + // Cannot merge sets if L or I options differ, or if either are negated. + optionsAt := at.options & (RightToLeft | IgnoreCase) + + if at.t == ntSet { + if !wasLastSet || optionsLast != optionsAt || lastNodeCannotMerge || !at.set.IsMergeable() { + wasLastSet = true + lastNodeCannotMerge = !at.set.IsMergeable() + optionsLast = optionsAt + break + } + } else if !wasLastSet || optionsLast != optionsAt || lastNodeCannotMerge { + wasLastSet = true + lastNodeCannotMerge = false + optionsLast = optionsAt + break + } + + // The last node was a Set or a One, we're a Set or One and our options are the same. + // Merge the two nodes. + j-- + prev := n.children[j] + + var prevCharClass *CharSet + if prev.t == ntOne { + prevCharClass = &CharSet{} + prevCharClass.addChar(prev.ch) + } else { + prevCharClass = prev.set + } + + if at.t == ntOne { + prevCharClass.addChar(at.ch) + } else { + prevCharClass.addSet(*at.set) + } + + prev.t = ntSet + prev.set = prevCharClass + } else if at.t == ntNothing { + j-- + } else { + wasLastSet = false + lastNodeCannotMerge = false + } + break + } + } + + if j < i { + n.removeChildren(j, i) + } + + return n.stripEnation(ntNothing) +} + +// Basic optimization. Adjacent strings can be concatenated. +// +// (?:abc)(?:def) -> abcdef +func (n *regexNode) reduceConcatenation() *regexNode { + // Eliminate empties and concat adjacent strings/chars + + var optionsLast RegexOptions + var optionsAt RegexOptions + var i, j int + + if len(n.children) == 0 { + return newRegexNode(ntEmpty, n.options) + } + + wasLastString := false + + for i, j = 0, 0; i < len(n.children); i, j = i+1, j+1 { + var at, prev *regexNode + + at = n.children[i] + + if j < i { + n.children[j] = at + } + + if at.t == ntConcatenate && + ((at.options & RightToLeft) == (n.options & RightToLeft)) { + for k := 0; k < len(at.children); k++ { + at.children[k].next = n + } + + //insert at.children at i+1 index in n.children + n.insertChildren(i+1, at.children) + + j-- + } else if at.t == ntMulti || at.t == ntOne { + // Cannot merge strings if L or I options differ + optionsAt = at.options & (RightToLeft | IgnoreCase) + + if !wasLastString || optionsLast != optionsAt { + wasLastString = true + optionsLast = optionsAt + continue + } + + j-- + prev = n.children[j] + + if prev.t == ntOne { + prev.t = ntMulti + prev.str = []rune{prev.ch} + } + + if (optionsAt & RightToLeft) == 0 { + if at.t == ntOne { + prev.str = append(prev.str, at.ch) + } else { + prev.str = append(prev.str, at.str...) + } + } else { + if at.t == ntOne { + // insert at the front by expanding our slice, copying the data over, and then setting the value + prev.str = append(prev.str, 0) + copy(prev.str[1:], prev.str) + prev.str[0] = at.ch + } else { + //insert at the front...this one we'll make a new slice and copy both into it + merge := make([]rune, len(prev.str)+len(at.str)) + copy(merge, at.str) + copy(merge[len(at.str):], prev.str) + prev.str = merge + } + } + } else if at.t == ntEmpty { + j-- + } else { + wasLastString = false + } + } + + if j < i { + // remove indices j through i from the children + n.removeChildren(j, i) + } + + return n.stripEnation(ntEmpty) +} + +// Nested repeaters just get multiplied with each other if they're not +// too lumpy +func (n *regexNode) reduceRep() *regexNode { + + u := n + t := n.t + min := n.m + max := n.n + + for { + if len(u.children) == 0 { + break + } + + child := u.children[0] + + // multiply reps of the same type only + if child.t != t { + childType := child.t + + if !(childType >= ntOneloop && childType <= ntSetloop && t == ntLoop || + childType >= ntOnelazy && childType <= ntSetlazy && t == ntLazyloop) { + break + } + } + + // child can be too lumpy to blur, e.g., (a {100,105}) {3} or (a {2,})? + // [but things like (a {2,})+ are not too lumpy...] + if u.m == 0 && child.m > 1 || child.n < child.m*2 { + break + } + + u = child + if u.m > 0 { + if (math.MaxInt32-1)/u.m < min { + u.m = math.MaxInt32 + } else { + u.m = u.m * min + } + } + if u.n > 0 { + if (math.MaxInt32-1)/u.n < max { + u.n = math.MaxInt32 + } else { + u.n = u.n * max + } + } + } + + if math.MaxInt32 == min { + return newRegexNode(ntNothing, n.options) + } + return u + +} + +// Simple optimization. If a concatenation or alternation has only +// one child strip out the intermediate node. If it has zero children, +// turn it into an empty. +func (n *regexNode) stripEnation(emptyType nodeType) *regexNode { + switch len(n.children) { + case 0: + return newRegexNode(emptyType, n.options) + case 1: + return n.children[0] + default: + return n + } +} + +func (n *regexNode) reduceGroup() *regexNode { + u := n + + for u.t == ntGroup { + u = u.children[0] + } + + return u +} + +// Simple optimization. If a set is a singleton, an inverse singleton, +// or empty, it's transformed accordingly. +func (n *regexNode) reduceSet() *regexNode { + // Extract empty-set, one and not-one case as special + + if n.set == nil { + n.t = ntNothing + } else if n.set.IsSingleton() { + n.ch = n.set.SingletonChar() + n.set = nil + n.t += (ntOne - ntSet) + } else if n.set.IsSingletonInverse() { + n.ch = n.set.SingletonChar() + n.set = nil + n.t += (ntNotone - ntSet) + } + + return n +} + +func (n *regexNode) reverseLeft() *regexNode { + if n.options&RightToLeft != 0 && n.t == ntConcatenate && len(n.children) > 0 { + //reverse children order + for left, right := 0, len(n.children)-1; left < right; left, right = left+1, right-1 { + n.children[left], n.children[right] = n.children[right], n.children[left] + } + } + + return n +} + +func (n *regexNode) makeQuantifier(lazy bool, min, max int) *regexNode { + if min == 0 && max == 0 { + return newRegexNode(ntEmpty, n.options) + } + + if min == 1 && max == 1 { + return n + } + + switch n.t { + case ntOne, ntNotone, ntSet: + if lazy { + n.makeRep(Onelazy, min, max) + } else { + n.makeRep(Oneloop, min, max) + } + return n + + default: + var t nodeType + if lazy { + t = ntLazyloop + } else { + t = ntLoop + } + result := newRegexNodeMN(t, n.options, min, max) + result.addChild(n) + return result + } +} + +// debug functions + +var typeStr = []string{ + "Onerep", "Notonerep", "Setrep", + "Oneloop", "Notoneloop", "Setloop", + "Onelazy", "Notonelazy", "Setlazy", + "One", "Notone", "Set", + "Multi", "Ref", + "Bol", "Eol", "Boundary", "Nonboundary", + "Beginning", "Start", "EndZ", "End", + "Nothing", "Empty", + "Alternate", "Concatenate", + "Loop", "Lazyloop", + "Capture", "Group", "Require", "Prevent", "Greedy", + "Testref", "Testgroup", + "Unknown", "Unknown", "Unknown", + "Unknown", "Unknown", "Unknown", + "ECMABoundary", "NonECMABoundary", +} + +func (n *regexNode) description() string { + buf := &bytes.Buffer{} + + buf.WriteString(typeStr[n.t]) + + if (n.options & ExplicitCapture) != 0 { + buf.WriteString("-C") + } + if (n.options & IgnoreCase) != 0 { + buf.WriteString("-I") + } + if (n.options & RightToLeft) != 0 { + buf.WriteString("-L") + } + if (n.options & Multiline) != 0 { + buf.WriteString("-M") + } + if (n.options & Singleline) != 0 { + buf.WriteString("-S") + } + if (n.options & IgnorePatternWhitespace) != 0 { + buf.WriteString("-X") + } + if (n.options & ECMAScript) != 0 { + buf.WriteString("-E") + } + + switch n.t { + case ntOneloop, ntNotoneloop, ntOnelazy, ntNotonelazy, ntOne, ntNotone: + buf.WriteString("(Ch = " + CharDescription(n.ch) + ")") + break + case ntCapture: + buf.WriteString("(index = " + strconv.Itoa(n.m) + ", unindex = " + strconv.Itoa(n.n) + ")") + break + case ntRef, ntTestref: + buf.WriteString("(index = " + strconv.Itoa(n.m) + ")") + break + case ntMulti: + fmt.Fprintf(buf, "(String = %s)", string(n.str)) + break + case ntSet, ntSetloop, ntSetlazy: + buf.WriteString("(Set = " + n.set.String() + ")") + break + } + + switch n.t { + case ntOneloop, ntNotoneloop, ntOnelazy, ntNotonelazy, ntSetloop, ntSetlazy, ntLoop, ntLazyloop: + buf.WriteString("(Min = ") + buf.WriteString(strconv.Itoa(n.m)) + buf.WriteString(", Max = ") + if n.n == math.MaxInt32 { + buf.WriteString("inf") + } else { + buf.WriteString(strconv.Itoa(n.n)) + } + buf.WriteString(")") + + break + } + + return buf.String() +} + +var padSpace = []byte(" ") + +func (t *RegexTree) Dump() string { + return t.root.dump() +} + +func (n *regexNode) dump() string { + var stack []int + CurNode := n + CurChild := 0 + + buf := bytes.NewBufferString(CurNode.description()) + buf.WriteRune('\n') + + for { + if CurNode.children != nil && CurChild < len(CurNode.children) { + stack = append(stack, CurChild+1) + CurNode = CurNode.children[CurChild] + CurChild = 0 + + Depth := len(stack) + if Depth > 32 { + Depth = 32 + } + buf.Write(padSpace[:Depth]) + buf.WriteString(CurNode.description()) + buf.WriteRune('\n') + } else { + if len(stack) == 0 { + break + } + + CurChild = stack[len(stack)-1] + stack = stack[:len(stack)-1] + CurNode = CurNode.next + } + } + return buf.String() +} diff --git a/vendor/github.com/dlclark/regexp2/syntax/writer.go b/vendor/github.com/dlclark/regexp2/syntax/writer.go new file mode 100644 index 0000000000..a5aa11ca06 --- /dev/null +++ b/vendor/github.com/dlclark/regexp2/syntax/writer.go @@ -0,0 +1,500 @@ +package syntax + +import ( + "bytes" + "fmt" + "math" + "os" +) + +func Write(tree *RegexTree) (*Code, error) { + w := writer{ + intStack: make([]int, 0, 32), + emitted: make([]int, 2), + stringhash: make(map[string]int), + sethash: make(map[string]int), + } + + code, err := w.codeFromTree(tree) + + if tree.options&Debug > 0 && code != nil { + os.Stdout.WriteString(code.Dump()) + os.Stdout.WriteString("\n") + } + + return code, err +} + +type writer struct { + emitted []int + + intStack []int + curpos int + stringhash map[string]int + stringtable [][]rune + sethash map[string]int + settable []*CharSet + counting bool + count int + trackcount int + caps map[int]int +} + +const ( + beforeChild nodeType = 64 + afterChild = 128 + //MaxPrefixSize is the largest number of runes we'll use for a BoyerMoyer prefix + MaxPrefixSize = 50 +) + +// The top level RegexCode generator. It does a depth-first walk +// through the tree and calls EmitFragment to emits code before +// and after each child of an interior node, and at each leaf. +// +// It runs two passes, first to count the size of the generated +// code, and second to generate the code. +// +// We should time it against the alternative, which is +// to just generate the code and grow the array as we go. +func (w *writer) codeFromTree(tree *RegexTree) (*Code, error) { + var ( + curNode *regexNode + curChild int + capsize int + ) + // construct sparse capnum mapping if some numbers are unused + + if tree.capnumlist == nil || tree.captop == len(tree.capnumlist) { + capsize = tree.captop + w.caps = nil + } else { + capsize = len(tree.capnumlist) + w.caps = tree.caps + for i := 0; i < len(tree.capnumlist); i++ { + w.caps[tree.capnumlist[i]] = i + } + } + + w.counting = true + + for { + if !w.counting { + w.emitted = make([]int, w.count) + } + + curNode = tree.root + curChild = 0 + + w.emit1(Lazybranch, 0) + + for { + if len(curNode.children) == 0 { + w.emitFragment(curNode.t, curNode, 0) + } else if curChild < len(curNode.children) { + w.emitFragment(curNode.t|beforeChild, curNode, curChild) + + curNode = curNode.children[curChild] + + w.pushInt(curChild) + curChild = 0 + continue + } + + if w.emptyStack() { + break + } + + curChild = w.popInt() + curNode = curNode.next + + w.emitFragment(curNode.t|afterChild, curNode, curChild) + curChild++ + } + + w.patchJump(0, w.curPos()) + w.emit(Stop) + + if !w.counting { + break + } + + w.counting = false + } + + fcPrefix := getFirstCharsPrefix(tree) + prefix := getPrefix(tree) + rtl := (tree.options & RightToLeft) != 0 + + var bmPrefix *BmPrefix + //TODO: benchmark string prefixes + if prefix != nil && len(prefix.PrefixStr) > 0 && MaxPrefixSize > 0 { + if len(prefix.PrefixStr) > MaxPrefixSize { + // limit prefix changes to 10k + prefix.PrefixStr = prefix.PrefixStr[:MaxPrefixSize] + } + bmPrefix = newBmPrefix(prefix.PrefixStr, prefix.CaseInsensitive, rtl) + } else { + bmPrefix = nil + } + + return &Code{ + Codes: w.emitted, + Strings: w.stringtable, + Sets: w.settable, + TrackCount: w.trackcount, + Caps: w.caps, + Capsize: capsize, + FcPrefix: fcPrefix, + BmPrefix: bmPrefix, + Anchors: getAnchors(tree), + RightToLeft: rtl, + }, nil +} + +// The main RegexCode generator. It does a depth-first walk +// through the tree and calls EmitFragment to emits code before +// and after each child of an interior node, and at each leaf. +func (w *writer) emitFragment(nodetype nodeType, node *regexNode, curIndex int) error { + bits := InstOp(0) + + if nodetype <= ntRef { + if (node.options & RightToLeft) != 0 { + bits |= Rtl + } + if (node.options & IgnoreCase) != 0 { + bits |= Ci + } + } + ntBits := nodeType(bits) + + switch nodetype { + case ntConcatenate | beforeChild, ntConcatenate | afterChild, ntEmpty: + break + + case ntAlternate | beforeChild: + if curIndex < len(node.children)-1 { + w.pushInt(w.curPos()) + w.emit1(Lazybranch, 0) + } + + case ntAlternate | afterChild: + if curIndex < len(node.children)-1 { + lbPos := w.popInt() + w.pushInt(w.curPos()) + w.emit1(Goto, 0) + w.patchJump(lbPos, w.curPos()) + } else { + for i := 0; i < curIndex; i++ { + w.patchJump(w.popInt(), w.curPos()) + } + } + break + + case ntTestref | beforeChild: + if curIndex == 0 { + w.emit(Setjump) + w.pushInt(w.curPos()) + w.emit1(Lazybranch, 0) + w.emit1(Testref, w.mapCapnum(node.m)) + w.emit(Forejump) + } + + case ntTestref | afterChild: + if curIndex == 0 { + branchpos := w.popInt() + w.pushInt(w.curPos()) + w.emit1(Goto, 0) + w.patchJump(branchpos, w.curPos()) + w.emit(Forejump) + if len(node.children) <= 1 { + w.patchJump(w.popInt(), w.curPos()) + } + } else if curIndex == 1 { + w.patchJump(w.popInt(), w.curPos()) + } + + case ntTestgroup | beforeChild: + if curIndex == 0 { + w.emit(Setjump) + w.emit(Setmark) + w.pushInt(w.curPos()) + w.emit1(Lazybranch, 0) + } + + case ntTestgroup | afterChild: + if curIndex == 0 { + w.emit(Getmark) + w.emit(Forejump) + } else if curIndex == 1 { + Branchpos := w.popInt() + w.pushInt(w.curPos()) + w.emit1(Goto, 0) + w.patchJump(Branchpos, w.curPos()) + w.emit(Getmark) + w.emit(Forejump) + if len(node.children) <= 2 { + w.patchJump(w.popInt(), w.curPos()) + } + } else if curIndex == 2 { + w.patchJump(w.popInt(), w.curPos()) + } + + case ntLoop | beforeChild, ntLazyloop | beforeChild: + + if node.n < math.MaxInt32 || node.m > 1 { + if node.m == 0 { + w.emit1(Nullcount, 0) + } else { + w.emit1(Setcount, 1-node.m) + } + } else if node.m == 0 { + w.emit(Nullmark) + } else { + w.emit(Setmark) + } + + if node.m == 0 { + w.pushInt(w.curPos()) + w.emit1(Goto, 0) + } + w.pushInt(w.curPos()) + + case ntLoop | afterChild, ntLazyloop | afterChild: + + startJumpPos := w.curPos() + lazy := (nodetype - (ntLoop | afterChild)) + + if node.n < math.MaxInt32 || node.m > 1 { + if node.n == math.MaxInt32 { + w.emit2(InstOp(Branchcount+lazy), w.popInt(), math.MaxInt32) + } else { + w.emit2(InstOp(Branchcount+lazy), w.popInt(), node.n-node.m) + } + } else { + w.emit1(InstOp(Branchmark+lazy), w.popInt()) + } + + if node.m == 0 { + w.patchJump(w.popInt(), startJumpPos) + } + + case ntGroup | beforeChild, ntGroup | afterChild: + + case ntCapture | beforeChild: + w.emit(Setmark) + + case ntCapture | afterChild: + w.emit2(Capturemark, w.mapCapnum(node.m), w.mapCapnum(node.n)) + + case ntRequire | beforeChild: + // NOTE: the following line causes lookahead/lookbehind to be + // NON-BACKTRACKING. It can be commented out with (*) + w.emit(Setjump) + + w.emit(Setmark) + + case ntRequire | afterChild: + w.emit(Getmark) + + // NOTE: the following line causes lookahead/lookbehind to be + // NON-BACKTRACKING. It can be commented out with (*) + w.emit(Forejump) + + case ntPrevent | beforeChild: + w.emit(Setjump) + w.pushInt(w.curPos()) + w.emit1(Lazybranch, 0) + + case ntPrevent | afterChild: + w.emit(Backjump) + w.patchJump(w.popInt(), w.curPos()) + w.emit(Forejump) + + case ntGreedy | beforeChild: + w.emit(Setjump) + + case ntGreedy | afterChild: + w.emit(Forejump) + + case ntOne, ntNotone: + w.emit1(InstOp(node.t|ntBits), int(node.ch)) + + case ntNotoneloop, ntNotonelazy, ntOneloop, ntOnelazy: + if node.m > 0 { + if node.t == ntOneloop || node.t == ntOnelazy { + w.emit2(Onerep|bits, int(node.ch), node.m) + } else { + w.emit2(Notonerep|bits, int(node.ch), node.m) + } + } + if node.n > node.m { + if node.n == math.MaxInt32 { + w.emit2(InstOp(node.t|ntBits), int(node.ch), math.MaxInt32) + } else { + w.emit2(InstOp(node.t|ntBits), int(node.ch), node.n-node.m) + } + } + + case ntSetloop, ntSetlazy: + if node.m > 0 { + w.emit2(Setrep|bits, w.setCode(node.set), node.m) + } + if node.n > node.m { + if node.n == math.MaxInt32 { + w.emit2(InstOp(node.t|ntBits), w.setCode(node.set), math.MaxInt32) + } else { + w.emit2(InstOp(node.t|ntBits), w.setCode(node.set), node.n-node.m) + } + } + + case ntMulti: + w.emit1(InstOp(node.t|ntBits), w.stringCode(node.str)) + + case ntSet: + w.emit1(InstOp(node.t|ntBits), w.setCode(node.set)) + + case ntRef: + w.emit1(InstOp(node.t|ntBits), w.mapCapnum(node.m)) + + case ntNothing, ntBol, ntEol, ntBoundary, ntNonboundary, ntECMABoundary, ntNonECMABoundary, ntBeginning, ntStart, ntEndZ, ntEnd: + w.emit(InstOp(node.t)) + + default: + return fmt.Errorf("unexpected opcode in regular expression generation: %v", nodetype) + } + + return nil +} + +// To avoid recursion, we use a simple integer stack. +// This is the push. +func (w *writer) pushInt(i int) { + w.intStack = append(w.intStack, i) +} + +// Returns true if the stack is empty. +func (w *writer) emptyStack() bool { + return len(w.intStack) == 0 +} + +// This is the pop. +func (w *writer) popInt() int { + //get our item + idx := len(w.intStack) - 1 + i := w.intStack[idx] + //trim our slice + w.intStack = w.intStack[:idx] + return i +} + +// Returns the current position in the emitted code. +func (w *writer) curPos() int { + return w.curpos +} + +// Fixes up a jump instruction at the specified offset +// so that it jumps to the specified jumpDest. +func (w *writer) patchJump(offset, jumpDest int) { + w.emitted[offset+1] = jumpDest +} + +// Returns an index in the set table for a charset +// uses a map to eliminate duplicates. +func (w *writer) setCode(set *CharSet) int { + if w.counting { + return 0 + } + + buf := &bytes.Buffer{} + + set.mapHashFill(buf) + hash := buf.String() + i, ok := w.sethash[hash] + if !ok { + i = len(w.sethash) + w.sethash[hash] = i + w.settable = append(w.settable, set) + } + return i +} + +// Returns an index in the string table for a string. +// uses a map to eliminate duplicates. +func (w *writer) stringCode(str []rune) int { + if w.counting { + return 0 + } + + hash := string(str) + i, ok := w.stringhash[hash] + if !ok { + i = len(w.stringhash) + w.stringhash[hash] = i + w.stringtable = append(w.stringtable, str) + } + + return i +} + +// When generating code on a regex that uses a sparse set +// of capture slots, we hash them to a dense set of indices +// for an array of capture slots. Instead of doing the hash +// at match time, it's done at compile time, here. +func (w *writer) mapCapnum(capnum int) int { + if capnum == -1 { + return -1 + } + + if w.caps != nil { + return w.caps[capnum] + } + + return capnum +} + +// Emits a zero-argument operation. Note that the emit +// functions all run in two modes: they can emit code, or +// they can just count the size of the code. +func (w *writer) emit(op InstOp) { + if w.counting { + w.count++ + if opcodeBacktracks(op) { + w.trackcount++ + } + return + } + w.emitted[w.curpos] = int(op) + w.curpos++ +} + +// Emits a one-argument operation. +func (w *writer) emit1(op InstOp, opd1 int) { + if w.counting { + w.count += 2 + if opcodeBacktracks(op) { + w.trackcount++ + } + return + } + w.emitted[w.curpos] = int(op) + w.curpos++ + w.emitted[w.curpos] = opd1 + w.curpos++ +} + +// Emits a two-argument operation. +func (w *writer) emit2(op InstOp, opd1, opd2 int) { + if w.counting { + w.count += 3 + if opcodeBacktracks(op) { + w.trackcount++ + } + return + } + w.emitted[w.curpos] = int(op) + w.curpos++ + w.emitted[w.curpos] = opd1 + w.curpos++ + w.emitted[w.curpos] = opd2 + w.curpos++ +} diff --git a/vendor/github.com/dlclark/regexp2/testoutput1 b/vendor/github.com/dlclark/regexp2/testoutput1 new file mode 100644 index 0000000000..fbf63fdf2f --- /dev/null +++ b/vendor/github.com/dlclark/regexp2/testoutput1 @@ -0,0 +1,7061 @@ +# This set of tests is for features that are compatible with all versions of +# Perl >= 5.10, in non-UTF mode. It should run clean for the 8-bit, 16-bit, and +# 32-bit PCRE libraries, and also using the perltest.pl script. + +#forbid_utf +#newline_default lf any anycrlf +#perltest + +/the quick brown fox/ + the quick brown fox + 0: the quick brown fox + What do you know about the quick brown fox? + 0: the quick brown fox +\= Expect no match + The quick brown FOX +No match + What do you know about THE QUICK BROWN FOX? +No match + +/The quick brown fox/i + the quick brown fox + 0: the quick brown fox + The quick brown FOX + 0: The quick brown FOX + What do you know about the quick brown fox? + 0: the quick brown fox + What do you know about THE QUICK BROWN FOX? + 0: THE QUICK BROWN FOX + +/abcd\t\n\r\f\a\e\071\x3b\$\\\?caxyz/ + abcd\t\n\r\f\a\e9;\$\\?caxyz + 0: abcd\x09\x0a\x0d\x0c\x07\x1b9;$\?caxyz + +/a*abc?xyz+pqr{3}ab{2,}xy{4,5}pq{0,6}AB{0,}zz/ + abxyzpqrrrabbxyyyypqAzz + 0: abxyzpqrrrabbxyyyypqAzz + abxyzpqrrrabbxyyyypqAzz + 0: abxyzpqrrrabbxyyyypqAzz + aabxyzpqrrrabbxyyyypqAzz + 0: aabxyzpqrrrabbxyyyypqAzz + aaabxyzpqrrrabbxyyyypqAzz + 0: aaabxyzpqrrrabbxyyyypqAzz + aaaabxyzpqrrrabbxyyyypqAzz + 0: aaaabxyzpqrrrabbxyyyypqAzz + abcxyzpqrrrabbxyyyypqAzz + 0: abcxyzpqrrrabbxyyyypqAzz + aabcxyzpqrrrabbxyyyypqAzz + 0: aabcxyzpqrrrabbxyyyypqAzz + aaabcxyzpqrrrabbxyyyypAzz + 0: aaabcxyzpqrrrabbxyyyypAzz + aaabcxyzpqrrrabbxyyyypqAzz + 0: aaabcxyzpqrrrabbxyyyypqAzz + aaabcxyzpqrrrabbxyyyypqqAzz + 0: aaabcxyzpqrrrabbxyyyypqqAzz + aaabcxyzpqrrrabbxyyyypqqqAzz + 0: aaabcxyzpqrrrabbxyyyypqqqAzz + aaabcxyzpqrrrabbxyyyypqqqqAzz + 0: aaabcxyzpqrrrabbxyyyypqqqqAzz + aaabcxyzpqrrrabbxyyyypqqqqqAzz + 0: aaabcxyzpqrrrabbxyyyypqqqqqAzz + aaabcxyzpqrrrabbxyyyypqqqqqqAzz + 0: aaabcxyzpqrrrabbxyyyypqqqqqqAzz + aaaabcxyzpqrrrabbxyyyypqAzz + 0: aaaabcxyzpqrrrabbxyyyypqAzz + abxyzzpqrrrabbxyyyypqAzz + 0: abxyzzpqrrrabbxyyyypqAzz + aabxyzzzpqrrrabbxyyyypqAzz + 0: aabxyzzzpqrrrabbxyyyypqAzz + aaabxyzzzzpqrrrabbxyyyypqAzz + 0: aaabxyzzzzpqrrrabbxyyyypqAzz + aaaabxyzzzzpqrrrabbxyyyypqAzz + 0: aaaabxyzzzzpqrrrabbxyyyypqAzz + abcxyzzpqrrrabbxyyyypqAzz + 0: abcxyzzpqrrrabbxyyyypqAzz + aabcxyzzzpqrrrabbxyyyypqAzz + 0: aabcxyzzzpqrrrabbxyyyypqAzz + aaabcxyzzzzpqrrrabbxyyyypqAzz + 0: aaabcxyzzzzpqrrrabbxyyyypqAzz + aaaabcxyzzzzpqrrrabbxyyyypqAzz + 0: aaaabcxyzzzzpqrrrabbxyyyypqAzz + aaaabcxyzzzzpqrrrabbbxyyyypqAzz + 0: aaaabcxyzzzzpqrrrabbbxyyyypqAzz + aaaabcxyzzzzpqrrrabbbxyyyyypqAzz + 0: aaaabcxyzzzzpqrrrabbbxyyyyypqAzz + aaabcxyzpqrrrabbxyyyypABzz + 0: aaabcxyzpqrrrabbxyyyypABzz + aaabcxyzpqrrrabbxyyyypABBzz + 0: aaabcxyzpqrrrabbxyyyypABBzz + >>>aaabxyzpqrrrabbxyyyypqAzz + 0: aaabxyzpqrrrabbxyyyypqAzz + >aaaabxyzpqrrrabbxyyyypqAzz + 0: aaaabxyzpqrrrabbxyyyypqAzz + >>>>abcxyzpqrrrabbxyyyypqAzz + 0: abcxyzpqrrrabbxyyyypqAzz +\= Expect no match + abxyzpqrrabbxyyyypqAzz +No match + abxyzpqrrrrabbxyyyypqAzz +No match + abxyzpqrrrabxyyyypqAzz +No match + aaaabcxyzzzzpqrrrabbbxyyyyyypqAzz +No match + aaaabcxyzzzzpqrrrabbbxyyypqAzz +No match + aaabcxyzpqrrrabbxyyyypqqqqqqqAzz +No match + +/^(abc){1,2}zz/ + abczz + 0: abczz + 1: abc + abcabczz + 0: abcabczz + 1: abc +\= Expect no match + zz +No match + abcabcabczz +No match + >>abczz +No match + +/^(b+?|a){1,2}?c/ + bc + 0: bc + 1: b + bbc + 0: bbc + 1: b + bbbc + 0: bbbc + 1: bb + bac + 0: bac + 1: a + bbac + 0: bbac + 1: a + aac + 0: aac + 1: a + abbbbbbbbbbbc + 0: abbbbbbbbbbbc + 1: bbbbbbbbbbb + bbbbbbbbbbbac + 0: bbbbbbbbbbbac + 1: a +\= Expect no match + aaac +No match + abbbbbbbbbbbac +No match + +/^(b+|a){1,2}c/ + bc + 0: bc + 1: b + bbc + 0: bbc + 1: bb + bbbc + 0: bbbc + 1: bbb + bac + 0: bac + 1: a + bbac + 0: bbac + 1: a + aac + 0: aac + 1: a + abbbbbbbbbbbc + 0: abbbbbbbbbbbc + 1: bbbbbbbbbbb + bbbbbbbbbbbac + 0: bbbbbbbbbbbac + 1: a +\= Expect no match + aaac +No match + abbbbbbbbbbbac +No match + +/^(b+|a){1,2}?bc/ + bbc + 0: bbc + 1: b + +/^(b*|ba){1,2}?bc/ + babc + 0: babc + 1: ba + bbabc + 0: bbabc + 1: ba + bababc + 0: bababc + 1: ba +\= Expect no match + bababbc +No match + babababc +No match + +/^(ba|b*){1,2}?bc/ + babc + 0: babc + 1: ba + bbabc + 0: bbabc + 1: ba + bababc + 0: bababc + 1: ba +\= Expect no match + bababbc +No match + babababc +No match + +#/^\ca\cA\c[;\c:/ +# \x01\x01\e;z +# 0: \x01\x01\x1b;z + +/^[ab\]cde]/ + athing + 0: a + bthing + 0: b + ]thing + 0: ] + cthing + 0: c + dthing + 0: d + ething + 0: e +\= Expect no match + fthing +No match + [thing +No match + \\thing +No match + +/^[]cde]/ + ]thing + 0: ] + cthing + 0: c + dthing + 0: d + ething + 0: e +\= Expect no match + athing +No match + fthing +No match + +/^[^ab\]cde]/ + fthing + 0: f + [thing + 0: [ + \\thing + 0: \ +\= Expect no match + athing +No match + bthing +No match + ]thing +No match + cthing +No match + dthing +No match + ething +No match + +/^[^]cde]/ + athing + 0: a + fthing + 0: f +\= Expect no match + ]thing +No match + cthing +No match + dthing +No match + ething +No match + +# DLC - I don't get this one +#/^\/ +#  +# 0: \x81 + +#updated to handle 16-bits utf8 +/^ÿ/ + ÿ + 0: \xc3\xbf + +/^[0-9]+$/ + 0 + 0: 0 + 1 + 0: 1 + 2 + 0: 2 + 3 + 0: 3 + 4 + 0: 4 + 5 + 0: 5 + 6 + 0: 6 + 7 + 0: 7 + 8 + 0: 8 + 9 + 0: 9 + 10 + 0: 10 + 100 + 0: 100 +\= Expect no match + abc +No match + +/^.*nter/ + enter + 0: enter + inter + 0: inter + uponter + 0: uponter + +/^xxx[0-9]+$/ + xxx0 + 0: xxx0 + xxx1234 + 0: xxx1234 +\= Expect no match + xxx +No match + +/^.+[0-9][0-9][0-9]$/ + x123 + 0: x123 + x1234 + 0: x1234 + xx123 + 0: xx123 + 123456 + 0: 123456 +\= Expect no match + 123 +No match + +/^.+?[0-9][0-9][0-9]$/ + x123 + 0: x123 + x1234 + 0: x1234 + xx123 + 0: xx123 + 123456 + 0: 123456 +\= Expect no match + 123 +No match + +/^([^!]+)!(.+)=apquxz\.ixr\.zzz\.ac\.uk$/ + abc!pqr=apquxz.ixr.zzz.ac.uk + 0: abc!pqr=apquxz.ixr.zzz.ac.uk + 1: abc + 2: pqr +\= Expect no match + !pqr=apquxz.ixr.zzz.ac.uk +No match + abc!=apquxz.ixr.zzz.ac.uk +No match + abc!pqr=apquxz:ixr.zzz.ac.uk +No match + abc!pqr=apquxz.ixr.zzz.ac.ukk +No match + +/:/ + Well, we need a colon: somewhere + 0: : +\= Expect no match + Fail without a colon +No match + +/([\da-f:]+)$/i + 0abc + 0: 0abc + 1: 0abc + abc + 0: abc + 1: abc + fed + 0: fed + 1: fed + E + 0: E + 1: E + :: + 0: :: + 1: :: + 5f03:12C0::932e + 0: 5f03:12C0::932e + 1: 5f03:12C0::932e + fed def + 0: def + 1: def + Any old stuff + 0: ff + 1: ff +\= Expect no match + 0zzz +No match + gzzz +No match + fed\x20 +No match + Any old rubbish +No match + +/^.*\.(\d{1,3})\.(\d{1,3})\.(\d{1,3})$/ + .1.2.3 + 0: .1.2.3 + 1: 1 + 2: 2 + 3: 3 + A.12.123.0 + 0: A.12.123.0 + 1: 12 + 2: 123 + 3: 0 +\= Expect no match + .1.2.3333 +No match + 1.2.3 +No match + 1234.2.3 +No match + +/^(\d+)\s+IN\s+SOA\s+(\S+)\s+(\S+)\s*\(\s*$/ + 1 IN SOA non-sp1 non-sp2( + 0: 1 IN SOA non-sp1 non-sp2( + 1: 1 + 2: non-sp1 + 3: non-sp2 + 1 IN SOA non-sp1 non-sp2 ( + 0: 1 IN SOA non-sp1 non-sp2 ( + 1: 1 + 2: non-sp1 + 3: non-sp2 +\= Expect no match + 1IN SOA non-sp1 non-sp2( +No match + +/^[a-zA-Z\d][a-zA-Z\d\-]*(\.[a-zA-Z\d][a-zA-z\d\-]*)*\.$/ + a. + 0: a. + Z. + 0: Z. + 2. + 0: 2. + ab-c.pq-r. + 0: ab-c.pq-r. + 1: .pq-r + sxk.zzz.ac.uk. + 0: sxk.zzz.ac.uk. + 1: .uk + x-.y-. + 0: x-.y-. + 1: .y- +\= Expect no match + -abc.peq. +No match + +/^\*\.[a-z]([a-z\-\d]*[a-z\d]+)?(\.[a-z]([a-z\-\d]*[a-z\d]+)?)*$/ + *.a + 0: *.a + *.b0-a + 0: *.b0-a + 1: 0-a + *.c3-b.c + 0: *.c3-b.c + 1: 3-b + 2: .c + *.c-a.b-c + 0: *.c-a.b-c + 1: -a + 2: .b-c + 3: -c +\= Expect no match + *.0 +No match + *.a- +No match + *.a-b.c- +No match + *.c-a.0-c +No match + +/^(?=ab(de))(abd)(e)/ + abde + 0: abde + 1: de + 2: abd + 3: e + +/^(?!(ab)de|x)(abd)(f)/ + abdf + 0: abdf + 1: + 2: abd + 3: f + +/^(?=(ab(cd)))(ab)/ + abcd + 0: ab + 1: abcd + 2: cd + 3: ab + +/^[\da-f](\.[\da-f])*$/i + a.b.c.d + 0: a.b.c.d + 1: .d + A.B.C.D + 0: A.B.C.D + 1: .D + a.b.c.1.2.3.C + 0: a.b.c.1.2.3.C + 1: .C + +/^\".*\"\s*(;.*)?$/ + \"1234\" + 0: "1234" + \"abcd\" ; + 0: "abcd" ; + 1: ; + \"\" ; rhubarb + 0: "" ; rhubarb + 1: ; rhubarb +\= Expect no match + \"1234\" : things +No match + +/^$/ + \ + 0: +\= Expect no match + A non-empty line +No match + +/ ^ a (?# begins with a) b\sc (?# then b c) $ (?# then end)/x + ab c + 0: ab c +\= Expect no match + abc +No match + ab cde +No match + +/(?x) ^ a (?# begins with a) b\sc (?# then b c) $ (?# then end)/ + ab c + 0: ab c +\= Expect no match + abc +No match + ab cde +No match + +/^ a\ b[c ]d $/x + a bcd + 0: a bcd + a b d + 0: a b d +\= Expect no match + abcd +No match + ab d +No match + +/^(a(b(c)))(d(e(f)))(h(i(j)))(k(l(m)))$/ + abcdefhijklm + 0: abcdefhijklm + 1: abc + 2: bc + 3: c + 4: def + 5: ef + 6: f + 7: hij + 8: ij + 9: j +10: klm +11: lm +12: m + +/^(?:a(b(c)))(?:d(e(f)))(?:h(i(j)))(?:k(l(m)))$/ + abcdefhijklm + 0: abcdefhijklm + 1: bc + 2: c + 3: ef + 4: f + 5: ij + 6: j + 7: lm + 8: m + +#/^[\w][\W][\s][\S][\d][\D][\b][\n][\c]][\022]/ +# a+ Z0+\x08\n\x1d\x12 +# 0: a+ Z0+\x08\x0a\x1d\x12 + +/^[.^$|()*+?{,}]+/ + .^\$(*+)|{?,?} + 0: .^$(*+)|{?,?} + +/^a*\w/ + z + 0: z + az + 0: az + aaaz + 0: aaaz + a + 0: a + aa + 0: aa + aaaa + 0: aaaa + a+ + 0: a + aa+ + 0: aa + +/^a*?\w/ + z + 0: z + az + 0: a + aaaz + 0: a + a + 0: a + aa + 0: a + aaaa + 0: a + a+ + 0: a + aa+ + 0: a + +/^a+\w/ + az + 0: az + aaaz + 0: aaaz + aa + 0: aa + aaaa + 0: aaaa + aa+ + 0: aa + +/^a+?\w/ + az + 0: az + aaaz + 0: aa + aa + 0: aa + aaaa + 0: aa + aa+ + 0: aa + +/^\d{8}\w{2,}/ + 1234567890 + 0: 1234567890 + 12345678ab + 0: 12345678ab + 12345678__ + 0: 12345678__ +\= Expect no match + 1234567 +No match + +/^[aeiou\d]{4,5}$/ + uoie + 0: uoie + 1234 + 0: 1234 + 12345 + 0: 12345 + aaaaa + 0: aaaaa +\= Expect no match + 123456 +No match + +/^[aeiou\d]{4,5}?/ + uoie + 0: uoie + 1234 + 0: 1234 + 12345 + 0: 1234 + aaaaa + 0: aaaa + 123456 + 0: 1234 + +/\A(abc|def)=(\1){2,3}\Z/ + abc=abcabc + 0: abc=abcabc + 1: abc + 2: abc + def=defdefdef + 0: def=defdefdef + 1: def + 2: def +\= Expect no match + abc=defdef +No match + +/^(a)(b)(c)(d)(e)(f)(g)(h)(i)(j)(k)\11*(\3\4)\1(?#)2$/ + abcdefghijkcda2 + 0: abcdefghijkcda2 + 1: a + 2: b + 3: c + 4: d + 5: e + 6: f + 7: g + 8: h + 9: i +10: j +11: k +12: cd + abcdefghijkkkkcda2 + 0: abcdefghijkkkkcda2 + 1: a + 2: b + 3: c + 4: d + 5: e + 6: f + 7: g + 8: h + 9: i +10: j +11: k +12: cd + +/(cat(a(ract|tonic)|erpillar)) \1()2(3)/ + cataract cataract23 + 0: cataract cataract23 + 1: cataract + 2: aract + 3: ract + 4: + 5: 3 + catatonic catatonic23 + 0: catatonic catatonic23 + 1: catatonic + 2: atonic + 3: tonic + 4: + 5: 3 + caterpillar caterpillar23 + 0: caterpillar caterpillar23 + 1: caterpillar + 2: erpillar + 3: + 4: + 5: 3 + + +/^From +([^ ]+) +[a-zA-Z][a-zA-Z][a-zA-Z] +[a-zA-Z][a-zA-Z][a-zA-Z] +[0-9]?[0-9] +[0-9][0-9]:[0-9][0-9]/ + From abcd Mon Sep 01 12:33:02 1997 + 0: From abcd Mon Sep 01 12:33 + 1: abcd + +/^From\s+\S+\s+([a-zA-Z]{3}\s+){2}\d{1,2}\s+\d\d:\d\d/ + From abcd Mon Sep 01 12:33:02 1997 + 0: From abcd Mon Sep 01 12:33 + 1: Sep + From abcd Mon Sep 1 12:33:02 1997 + 0: From abcd Mon Sep 1 12:33 + 1: Sep +\= Expect no match + From abcd Sep 01 12:33:02 1997 +No match + +/^12.34/s + 12\n34 + 0: 12\x0a34 + 12\r34 + 0: 12\x0d34 + +/\w+(?=\t)/ + the quick brown\t fox + 0: brown + +/foo(?!bar)(.*)/ + foobar is foolish see? + 0: foolish see? + 1: lish see? + +/(?:(?!foo)...|^.{0,2})bar(.*)/ + foobar crowbar etc + 0: rowbar etc + 1: etc + barrel + 0: barrel + 1: rel + 2barrel + 0: 2barrel + 1: rel + A barrel + 0: A barrel + 1: rel + +/^(\D*)(?=\d)(?!123)/ + abc456 + 0: abc + 1: abc +\= Expect no match + abc123 +No match + +/^1234(?# test newlines + inside)/ + 1234 + 0: 1234 + +/^1234 #comment in extended re + /x + 1234 + 0: 1234 + +/#rhubarb + abcd/x + abcd + 0: abcd + +/^abcd#rhubarb/x + abcd + 0: abcd + +/^(a)\1{2,3}(.)/ + aaab + 0: aaab + 1: a + 2: b + aaaab + 0: aaaab + 1: a + 2: b + aaaaab + 0: aaaaa + 1: a + 2: a + aaaaaab + 0: aaaaa + 1: a + 2: a + +/(?!^)abc/ + the abc + 0: abc +\= Expect no match + abc +No match + +/(?=^)abc/ + abc + 0: abc +\= Expect no match + the abc +No match + +/^[ab]{1,3}(ab*|b)/ + aabbbbb + 0: aabb + 1: b + +/^[ab]{1,3}?(ab*|b)/ + aabbbbb + 0: aabbbbb + 1: abbbbb + +/^[ab]{1,3}?(ab*?|b)/ + aabbbbb + 0: aa + 1: a + +/^[ab]{1,3}(ab*?|b)/ + aabbbbb + 0: aabb + 1: b + +/ (?: [\040\t] | \( +(?: [^\\\x80-\xff\n\015()] | \\ [^\x80-\xff] | \( (?: [^\\\x80-\xff\n\015()] | \\ [^\x80-\xff] )* \) )* +\) )* # optional leading comment +(?: (?: +[^(\040)<>@,;:".\\\[\]\000-\037\x80-\xff]+ # some number of atom characters... +(?![^(\040)<>@,;:".\\\[\]\000-\037\x80-\xff]) # ..not followed by something that could be part of an atom +| +" (?: # opening quote... +[^\\\x80-\xff\n\015"] # Anything except backslash and quote +| # or +\\ [^\x80-\xff] # Escaped something (something != CR) +)* " # closing quote +) # initial word +(?: (?: [\040\t] | \( +(?: [^\\\x80-\xff\n\015()] | \\ [^\x80-\xff] | \( (?: [^\\\x80-\xff\n\015()] | \\ [^\x80-\xff] )* \) )* +\) )* \. (?: [\040\t] | \( +(?: [^\\\x80-\xff\n\015()] | \\ [^\x80-\xff] | \( (?: [^\\\x80-\xff\n\015()] | \\ [^\x80-\xff] )* \) )* +\) )* (?: +[^(\040)<>@,;:".\\\[\]\000-\037\x80-\xff]+ # some number of atom characters... +(?![^(\040)<>@,;:".\\\[\]\000-\037\x80-\xff]) # ..not followed by something that could be part of an atom +| +" (?: # opening quote... +[^\\\x80-\xff\n\015"] # Anything except backslash and quote +| # or +\\ [^\x80-\xff] # Escaped something (something != CR) +)* " # closing quote +) )* # further okay, if led by a period +(?: [\040\t] | \( +(?: [^\\\x80-\xff\n\015()] | \\ [^\x80-\xff] | \( (?: [^\\\x80-\xff\n\015()] | \\ [^\x80-\xff] )* \) )* +\) )* @ (?: [\040\t] | \( +(?: [^\\\x80-\xff\n\015()] | \\ [^\x80-\xff] | \( (?: [^\\\x80-\xff\n\015()] | \\ [^\x80-\xff] )* \) )* +\) )* (?: +[^(\040)<>@,;:".\\\[\]\000-\037\x80-\xff]+ # some number of atom characters... +(?![^(\040)<>@,;:".\\\[\]\000-\037\x80-\xff]) # ..not followed by something that could be part of an atom +| \[ # [ +(?: [^\\\x80-\xff\n\015\[\]] | \\ [^\x80-\xff] )* # stuff +\] # ] +) # initial subdomain +(?: # +(?: [\040\t] | \( +(?: [^\\\x80-\xff\n\015()] | \\ [^\x80-\xff] | \( (?: [^\\\x80-\xff\n\015()] | \\ [^\x80-\xff] )* \) )* +\) )* \. # if led by a period... +(?: [\040\t] | \( +(?: [^\\\x80-\xff\n\015()] | \\ [^\x80-\xff] | \( (?: [^\\\x80-\xff\n\015()] | \\ [^\x80-\xff] )* \) )* +\) )* (?: +[^(\040)<>@,;:".\\\[\]\000-\037\x80-\xff]+ # some number of atom characters... +(?![^(\040)<>@,;:".\\\[\]\000-\037\x80-\xff]) # ..not followed by something that could be part of an atom +| \[ # [ +(?: [^\\\x80-\xff\n\015\[\]] | \\ [^\x80-\xff] )* # stuff +\] # ] +) # ...further okay +)* +# address +| # or +(?: +[^(\040)<>@,;:".\\\[\]\000-\037\x80-\xff]+ # some number of atom characters... +(?![^(\040)<>@,;:".\\\[\]\000-\037\x80-\xff]) # ..not followed by something that could be part of an atom +| +" (?: # opening quote... +[^\\\x80-\xff\n\015"] # Anything except backslash and quote +| # or +\\ [^\x80-\xff] # Escaped something (something != CR) +)* " # closing quote +) # one word, optionally followed by.... +(?: +[^()<>@,;:".\\\[\]\x80-\xff\000-\010\012-\037] | # atom and space parts, or... +\( +(?: [^\\\x80-\xff\n\015()] | \\ [^\x80-\xff] | \( (?: [^\\\x80-\xff\n\015()] | \\ [^\x80-\xff] )* \) )* +\) | # comments, or... + +" (?: # opening quote... +[^\\\x80-\xff\n\015"] # Anything except backslash and quote +| # or +\\ [^\x80-\xff] # Escaped something (something != CR) +)* " # closing quote +# quoted strings +)* +< (?: [\040\t] | \( +(?: [^\\\x80-\xff\n\015()] | \\ [^\x80-\xff] | \( (?: [^\\\x80-\xff\n\015()] | \\ [^\x80-\xff] )* \) )* +\) )* # leading < +(?: @ (?: [\040\t] | \( +(?: [^\\\x80-\xff\n\015()] | \\ [^\x80-\xff] | \( (?: [^\\\x80-\xff\n\015()] | \\ [^\x80-\xff] )* \) )* +\) )* (?: +[^(\040)<>@,;:".\\\[\]\000-\037\x80-\xff]+ # some number of atom characters... +(?![^(\040)<>@,;:".\\\[\]\000-\037\x80-\xff]) # ..not followed by something that could be part of an atom +| \[ # [ +(?: [^\\\x80-\xff\n\015\[\]] | \\ [^\x80-\xff] )* # stuff +\] # ] +) # initial subdomain +(?: # +(?: [\040\t] | \( +(?: [^\\\x80-\xff\n\015()] | \\ [^\x80-\xff] | \( (?: [^\\\x80-\xff\n\015()] | \\ [^\x80-\xff] )* \) )* +\) )* \. # if led by a period... +(?: [\040\t] | \( +(?: [^\\\x80-\xff\n\015()] | \\ [^\x80-\xff] | \( (?: [^\\\x80-\xff\n\015()] | \\ [^\x80-\xff] )* \) )* +\) )* (?: +[^(\040)<>@,;:".\\\[\]\000-\037\x80-\xff]+ # some number of atom characters... +(?![^(\040)<>@,;:".\\\[\]\000-\037\x80-\xff]) # ..not followed by something that could be part of an atom +| \[ # [ +(?: [^\\\x80-\xff\n\015\[\]] | \\ [^\x80-\xff] )* # stuff +\] # ] +) # ...further okay +)* + +(?: (?: [\040\t] | \( +(?: [^\\\x80-\xff\n\015()] | \\ [^\x80-\xff] | \( (?: [^\\\x80-\xff\n\015()] | \\ [^\x80-\xff] )* \) )* +\) )* , (?: [\040\t] | \( +(?: [^\\\x80-\xff\n\015()] | \\ [^\x80-\xff] | \( (?: [^\\\x80-\xff\n\015()] | \\ [^\x80-\xff] )* \) )* +\) )* @ (?: [\040\t] | \( +(?: [^\\\x80-\xff\n\015()] | \\ [^\x80-\xff] | \( (?: [^\\\x80-\xff\n\015()] | \\ [^\x80-\xff] )* \) )* +\) )* (?: +[^(\040)<>@,;:".\\\[\]\000-\037\x80-\xff]+ # some number of atom characters... +(?![^(\040)<>@,;:".\\\[\]\000-\037\x80-\xff]) # ..not followed by something that could be part of an atom +| \[ # [ +(?: [^\\\x80-\xff\n\015\[\]] | \\ [^\x80-\xff] )* # stuff +\] # ] +) # initial subdomain +(?: # +(?: [\040\t] | \( +(?: [^\\\x80-\xff\n\015()] | \\ [^\x80-\xff] | \( (?: [^\\\x80-\xff\n\015()] | \\ [^\x80-\xff] )* \) )* +\) )* \. # if led by a period... +(?: [\040\t] | \( +(?: [^\\\x80-\xff\n\015()] | \\ [^\x80-\xff] | \( (?: [^\\\x80-\xff\n\015()] | \\ [^\x80-\xff] )* \) )* +\) )* (?: +[^(\040)<>@,;:".\\\[\]\000-\037\x80-\xff]+ # some number of atom characters... +(?![^(\040)<>@,;:".\\\[\]\000-\037\x80-\xff]) # ..not followed by something that could be part of an atom +| \[ # [ +(?: [^\\\x80-\xff\n\015\[\]] | \\ [^\x80-\xff] )* # stuff +\] # ] +) # ...further okay +)* +)* # further okay, if led by comma +: # closing colon +(?: [\040\t] | \( +(?: [^\\\x80-\xff\n\015()] | \\ [^\x80-\xff] | \( (?: [^\\\x80-\xff\n\015()] | \\ [^\x80-\xff] )* \) )* +\) )* )? # optional route +(?: +[^(\040)<>@,;:".\\\[\]\000-\037\x80-\xff]+ # some number of atom characters... +(?![^(\040)<>@,;:".\\\[\]\000-\037\x80-\xff]) # ..not followed by something that could be part of an atom +| +" (?: # opening quote... +[^\\\x80-\xff\n\015"] # Anything except backslash and quote +| # or +\\ [^\x80-\xff] # Escaped something (something != CR) +)* " # closing quote +) # initial word +(?: (?: [\040\t] | \( +(?: [^\\\x80-\xff\n\015()] | \\ [^\x80-\xff] | \( (?: [^\\\x80-\xff\n\015()] | \\ [^\x80-\xff] )* \) )* +\) )* \. (?: [\040\t] | \( +(?: [^\\\x80-\xff\n\015()] | \\ [^\x80-\xff] | \( (?: [^\\\x80-\xff\n\015()] | \\ [^\x80-\xff] )* \) )* +\) )* (?: +[^(\040)<>@,;:".\\\[\]\000-\037\x80-\xff]+ # some number of atom characters... +(?![^(\040)<>@,;:".\\\[\]\000-\037\x80-\xff]) # ..not followed by something that could be part of an atom +| +" (?: # opening quote... +[^\\\x80-\xff\n\015"] # Anything except backslash and quote +| # or +\\ [^\x80-\xff] # Escaped something (something != CR) +)* " # closing quote +) )* # further okay, if led by a period +(?: [\040\t] | \( +(?: [^\\\x80-\xff\n\015()] | \\ [^\x80-\xff] | \( (?: [^\\\x80-\xff\n\015()] | \\ [^\x80-\xff] )* \) )* +\) )* @ (?: [\040\t] | \( +(?: [^\\\x80-\xff\n\015()] | \\ [^\x80-\xff] | \( (?: [^\\\x80-\xff\n\015()] | \\ [^\x80-\xff] )* \) )* +\) )* (?: +[^(\040)<>@,;:".\\\[\]\000-\037\x80-\xff]+ # some number of atom characters... +(?![^(\040)<>@,;:".\\\[\]\000-\037\x80-\xff]) # ..not followed by something that could be part of an atom +| \[ # [ +(?: [^\\\x80-\xff\n\015\[\]] | \\ [^\x80-\xff] )* # stuff +\] # ] +) # initial subdomain +(?: # +(?: [\040\t] | \( +(?: [^\\\x80-\xff\n\015()] | \\ [^\x80-\xff] | \( (?: [^\\\x80-\xff\n\015()] | \\ [^\x80-\xff] )* \) )* +\) )* \. # if led by a period... +(?: [\040\t] | \( +(?: [^\\\x80-\xff\n\015()] | \\ [^\x80-\xff] | \( (?: [^\\\x80-\xff\n\015()] | \\ [^\x80-\xff] )* \) )* +\) )* (?: +[^(\040)<>@,;:".\\\[\]\000-\037\x80-\xff]+ # some number of atom characters... +(?![^(\040)<>@,;:".\\\[\]\000-\037\x80-\xff]) # ..not followed by something that could be part of an atom +| \[ # [ +(?: [^\\\x80-\xff\n\015\[\]] | \\ [^\x80-\xff] )* # stuff +\] # ] +) # ...further okay +)* +# address spec +(?: [\040\t] | \( +(?: [^\\\x80-\xff\n\015()] | \\ [^\x80-\xff] | \( (?: [^\\\x80-\xff\n\015()] | \\ [^\x80-\xff] )* \) )* +\) )* > # trailing > +# name and address +) (?: [\040\t] | \( +(?: [^\\\x80-\xff\n\015()] | \\ [^\x80-\xff] | \( (?: [^\\\x80-\xff\n\015()] | \\ [^\x80-\xff] )* \) )* +\) )* # optional trailing comment +/x + Alan Other + 0: Alan Other + + 0: user@dom.ain + user\@dom.ain + 0: user@dom.ain + \"A. Other\" (a comment) + 0: "A. Other" (a comment) + A. Other (a comment) + 0: Other (a comment) + \"/s=user/ou=host/o=place/prmd=uu.yy/admd= /c=gb/\"\@x400-re.lay + 0: "/s=user/ou=host/o=place/prmd=uu.yy/admd= /c=gb/"@x400-re.lay + A missing angle @,;:".\\\[\]\000-\037\x80-\xff]+ # some number of atom characters... +(?![^(\040)<>@,;:".\\\[\]\000-\037\x80-\xff]) # ..not followed by something that could be part of an atom +# Atom +| # or +" # " +[^\\\x80-\xff\n\015"] * # normal +(?: \\ [^\x80-\xff] [^\\\x80-\xff\n\015"] * )* # ( special normal* )* +" # " +# Quoted string +) +[\040\t]* # Nab whitespace. +(?: +\( # ( +[^\\\x80-\xff\n\015()] * # normal* +(?: # ( +(?: \\ [^\x80-\xff] | +\( # ( +[^\\\x80-\xff\n\015()] * # normal* +(?: \\ [^\x80-\xff] [^\\\x80-\xff\n\015()] * )* # (special normal*)* +\) # ) +) # special +[^\\\x80-\xff\n\015()] * # normal* +)* # )* +\) # ) +[\040\t]* )* # If comment found, allow more spaces. +(?: +\. +[\040\t]* # Nab whitespace. +(?: +\( # ( +[^\\\x80-\xff\n\015()] * # normal* +(?: # ( +(?: \\ [^\x80-\xff] | +\( # ( +[^\\\x80-\xff\n\015()] * # normal* +(?: \\ [^\x80-\xff] [^\\\x80-\xff\n\015()] * )* # (special normal*)* +\) # ) +) # special +[^\\\x80-\xff\n\015()] * # normal* +)* # )* +\) # ) +[\040\t]* )* # If comment found, allow more spaces. +(?: +[^(\040)<>@,;:".\\\[\]\000-\037\x80-\xff]+ # some number of atom characters... +(?![^(\040)<>@,;:".\\\[\]\000-\037\x80-\xff]) # ..not followed by something that could be part of an atom +# Atom +| # or +" # " +[^\\\x80-\xff\n\015"] * # normal +(?: \\ [^\x80-\xff] [^\\\x80-\xff\n\015"] * )* # ( special normal* )* +" # " +# Quoted string +) +[\040\t]* # Nab whitespace. +(?: +\( # ( +[^\\\x80-\xff\n\015()] * # normal* +(?: # ( +(?: \\ [^\x80-\xff] | +\( # ( +[^\\\x80-\xff\n\015()] * # normal* +(?: \\ [^\x80-\xff] [^\\\x80-\xff\n\015()] * )* # (special normal*)* +\) # ) +) # special +[^\\\x80-\xff\n\015()] * # normal* +)* # )* +\) # ) +[\040\t]* )* # If comment found, allow more spaces. +# additional words +)* +@ +[\040\t]* # Nab whitespace. +(?: +\( # ( +[^\\\x80-\xff\n\015()] * # normal* +(?: # ( +(?: \\ [^\x80-\xff] | +\( # ( +[^\\\x80-\xff\n\015()] * # normal* +(?: \\ [^\x80-\xff] [^\\\x80-\xff\n\015()] * )* # (special normal*)* +\) # ) +) # special +[^\\\x80-\xff\n\015()] * # normal* +)* # )* +\) # ) +[\040\t]* )* # If comment found, allow more spaces. +(?: +[^(\040)<>@,;:".\\\[\]\000-\037\x80-\xff]+ # some number of atom characters... +(?![^(\040)<>@,;:".\\\[\]\000-\037\x80-\xff]) # ..not followed by something that could be part of an atom +| +\[ # [ +(?: [^\\\x80-\xff\n\015\[\]] | \\ [^\x80-\xff] )* # stuff +\] # ] +) +[\040\t]* # Nab whitespace. +(?: +\( # ( +[^\\\x80-\xff\n\015()] * # normal* +(?: # ( +(?: \\ [^\x80-\xff] | +\( # ( +[^\\\x80-\xff\n\015()] * # normal* +(?: \\ [^\x80-\xff] [^\\\x80-\xff\n\015()] * )* # (special normal*)* +\) # ) +) # special +[^\\\x80-\xff\n\015()] * # normal* +)* # )* +\) # ) +[\040\t]* )* # If comment found, allow more spaces. +# optional trailing comments +(?: +\. +[\040\t]* # Nab whitespace. +(?: +\( # ( +[^\\\x80-\xff\n\015()] * # normal* +(?: # ( +(?: \\ [^\x80-\xff] | +\( # ( +[^\\\x80-\xff\n\015()] * # normal* +(?: \\ [^\x80-\xff] [^\\\x80-\xff\n\015()] * )* # (special normal*)* +\) # ) +) # special +[^\\\x80-\xff\n\015()] * # normal* +)* # )* +\) # ) +[\040\t]* )* # If comment found, allow more spaces. +(?: +[^(\040)<>@,;:".\\\[\]\000-\037\x80-\xff]+ # some number of atom characters... +(?![^(\040)<>@,;:".\\\[\]\000-\037\x80-\xff]) # ..not followed by something that could be part of an atom +| +\[ # [ +(?: [^\\\x80-\xff\n\015\[\]] | \\ [^\x80-\xff] )* # stuff +\] # ] +) +[\040\t]* # Nab whitespace. +(?: +\( # ( +[^\\\x80-\xff\n\015()] * # normal* +(?: # ( +(?: \\ [^\x80-\xff] | +\( # ( +[^\\\x80-\xff\n\015()] * # normal* +(?: \\ [^\x80-\xff] [^\\\x80-\xff\n\015()] * )* # (special normal*)* +\) # ) +) # special +[^\\\x80-\xff\n\015()] * # normal* +)* # )* +\) # ) +[\040\t]* )* # If comment found, allow more spaces. +# optional trailing comments +)* +# address +| # or +(?: +[^(\040)<>@,;:".\\\[\]\000-\037\x80-\xff]+ # some number of atom characters... +(?![^(\040)<>@,;:".\\\[\]\000-\037\x80-\xff]) # ..not followed by something that could be part of an atom +# Atom +| # or +" # " +[^\\\x80-\xff\n\015"] * # normal +(?: \\ [^\x80-\xff] [^\\\x80-\xff\n\015"] * )* # ( special normal* )* +" # " +# Quoted string +) +# leading word +[^()<>@,;:".\\\[\]\x80-\xff\000-\010\012-\037] * # "normal" atoms and or spaces +(?: +(?: +\( # ( +[^\\\x80-\xff\n\015()] * # normal* +(?: # ( +(?: \\ [^\x80-\xff] | +\( # ( +[^\\\x80-\xff\n\015()] * # normal* +(?: \\ [^\x80-\xff] [^\\\x80-\xff\n\015()] * )* # (special normal*)* +\) # ) +) # special +[^\\\x80-\xff\n\015()] * # normal* +)* # )* +\) # ) +| +" # " +[^\\\x80-\xff\n\015"] * # normal +(?: \\ [^\x80-\xff] [^\\\x80-\xff\n\015"] * )* # ( special normal* )* +" # " +) # "special" comment or quoted string +[^()<>@,;:".\\\[\]\x80-\xff\000-\010\012-\037] * # more "normal" +)* +< +[\040\t]* # Nab whitespace. +(?: +\( # ( +[^\\\x80-\xff\n\015()] * # normal* +(?: # ( +(?: \\ [^\x80-\xff] | +\( # ( +[^\\\x80-\xff\n\015()] * # normal* +(?: \\ [^\x80-\xff] [^\\\x80-\xff\n\015()] * )* # (special normal*)* +\) # ) +) # special +[^\\\x80-\xff\n\015()] * # normal* +)* # )* +\) # ) +[\040\t]* )* # If comment found, allow more spaces. +# < +(?: +@ +[\040\t]* # Nab whitespace. +(?: +\( # ( +[^\\\x80-\xff\n\015()] * # normal* +(?: # ( +(?: \\ [^\x80-\xff] | +\( # ( +[^\\\x80-\xff\n\015()] * # normal* +(?: \\ [^\x80-\xff] [^\\\x80-\xff\n\015()] * )* # (special normal*)* +\) # ) +) # special +[^\\\x80-\xff\n\015()] * # normal* +)* # )* +\) # ) +[\040\t]* )* # If comment found, allow more spaces. +(?: +[^(\040)<>@,;:".\\\[\]\000-\037\x80-\xff]+ # some number of atom characters... +(?![^(\040)<>@,;:".\\\[\]\000-\037\x80-\xff]) # ..not followed by something that could be part of an atom +| +\[ # [ +(?: [^\\\x80-\xff\n\015\[\]] | \\ [^\x80-\xff] )* # stuff +\] # ] +) +[\040\t]* # Nab whitespace. +(?: +\( # ( +[^\\\x80-\xff\n\015()] * # normal* +(?: # ( +(?: \\ [^\x80-\xff] | +\( # ( +[^\\\x80-\xff\n\015()] * # normal* +(?: \\ [^\x80-\xff] [^\\\x80-\xff\n\015()] * )* # (special normal*)* +\) # ) +) # special +[^\\\x80-\xff\n\015()] * # normal* +)* # )* +\) # ) +[\040\t]* )* # If comment found, allow more spaces. +# optional trailing comments +(?: +\. +[\040\t]* # Nab whitespace. +(?: +\( # ( +[^\\\x80-\xff\n\015()] * # normal* +(?: # ( +(?: \\ [^\x80-\xff] | +\( # ( +[^\\\x80-\xff\n\015()] * # normal* +(?: \\ [^\x80-\xff] [^\\\x80-\xff\n\015()] * )* # (special normal*)* +\) # ) +) # special +[^\\\x80-\xff\n\015()] * # normal* +)* # )* +\) # ) +[\040\t]* )* # If comment found, allow more spaces. +(?: +[^(\040)<>@,;:".\\\[\]\000-\037\x80-\xff]+ # some number of atom characters... +(?![^(\040)<>@,;:".\\\[\]\000-\037\x80-\xff]) # ..not followed by something that could be part of an atom +| +\[ # [ +(?: [^\\\x80-\xff\n\015\[\]] | \\ [^\x80-\xff] )* # stuff +\] # ] +) +[\040\t]* # Nab whitespace. +(?: +\( # ( +[^\\\x80-\xff\n\015()] * # normal* +(?: # ( +(?: \\ [^\x80-\xff] | +\( # ( +[^\\\x80-\xff\n\015()] * # normal* +(?: \\ [^\x80-\xff] [^\\\x80-\xff\n\015()] * )* # (special normal*)* +\) # ) +) # special +[^\\\x80-\xff\n\015()] * # normal* +)* # )* +\) # ) +[\040\t]* )* # If comment found, allow more spaces. +# optional trailing comments +)* +(?: , +[\040\t]* # Nab whitespace. +(?: +\( # ( +[^\\\x80-\xff\n\015()] * # normal* +(?: # ( +(?: \\ [^\x80-\xff] | +\( # ( +[^\\\x80-\xff\n\015()] * # normal* +(?: \\ [^\x80-\xff] [^\\\x80-\xff\n\015()] * )* # (special normal*)* +\) # ) +) # special +[^\\\x80-\xff\n\015()] * # normal* +)* # )* +\) # ) +[\040\t]* )* # If comment found, allow more spaces. +@ +[\040\t]* # Nab whitespace. +(?: +\( # ( +[^\\\x80-\xff\n\015()] * # normal* +(?: # ( +(?: \\ [^\x80-\xff] | +\( # ( +[^\\\x80-\xff\n\015()] * # normal* +(?: \\ [^\x80-\xff] [^\\\x80-\xff\n\015()] * )* # (special normal*)* +\) # ) +) # special +[^\\\x80-\xff\n\015()] * # normal* +)* # )* +\) # ) +[\040\t]* )* # If comment found, allow more spaces. +(?: +[^(\040)<>@,;:".\\\[\]\000-\037\x80-\xff]+ # some number of atom characters... +(?![^(\040)<>@,;:".\\\[\]\000-\037\x80-\xff]) # ..not followed by something that could be part of an atom +| +\[ # [ +(?: [^\\\x80-\xff\n\015\[\]] | \\ [^\x80-\xff] )* # stuff +\] # ] +) +[\040\t]* # Nab whitespace. +(?: +\( # ( +[^\\\x80-\xff\n\015()] * # normal* +(?: # ( +(?: \\ [^\x80-\xff] | +\( # ( +[^\\\x80-\xff\n\015()] * # normal* +(?: \\ [^\x80-\xff] [^\\\x80-\xff\n\015()] * )* # (special normal*)* +\) # ) +) # special +[^\\\x80-\xff\n\015()] * # normal* +)* # )* +\) # ) +[\040\t]* )* # If comment found, allow more spaces. +# optional trailing comments +(?: +\. +[\040\t]* # Nab whitespace. +(?: +\( # ( +[^\\\x80-\xff\n\015()] * # normal* +(?: # ( +(?: \\ [^\x80-\xff] | +\( # ( +[^\\\x80-\xff\n\015()] * # normal* +(?: \\ [^\x80-\xff] [^\\\x80-\xff\n\015()] * )* # (special normal*)* +\) # ) +) # special +[^\\\x80-\xff\n\015()] * # normal* +)* # )* +\) # ) +[\040\t]* )* # If comment found, allow more spaces. +(?: +[^(\040)<>@,;:".\\\[\]\000-\037\x80-\xff]+ # some number of atom characters... +(?![^(\040)<>@,;:".\\\[\]\000-\037\x80-\xff]) # ..not followed by something that could be part of an atom +| +\[ # [ +(?: [^\\\x80-\xff\n\015\[\]] | \\ [^\x80-\xff] )* # stuff +\] # ] +) +[\040\t]* # Nab whitespace. +(?: +\( # ( +[^\\\x80-\xff\n\015()] * # normal* +(?: # ( +(?: \\ [^\x80-\xff] | +\( # ( +[^\\\x80-\xff\n\015()] * # normal* +(?: \\ [^\x80-\xff] [^\\\x80-\xff\n\015()] * )* # (special normal*)* +\) # ) +) # special +[^\\\x80-\xff\n\015()] * # normal* +)* # )* +\) # ) +[\040\t]* )* # If comment found, allow more spaces. +# optional trailing comments +)* +)* # additional domains +: +[\040\t]* # Nab whitespace. +(?: +\( # ( +[^\\\x80-\xff\n\015()] * # normal* +(?: # ( +(?: \\ [^\x80-\xff] | +\( # ( +[^\\\x80-\xff\n\015()] * # normal* +(?: \\ [^\x80-\xff] [^\\\x80-\xff\n\015()] * )* # (special normal*)* +\) # ) +) # special +[^\\\x80-\xff\n\015()] * # normal* +)* # )* +\) # ) +[\040\t]* )* # If comment found, allow more spaces. +# optional trailing comments +)? # optional route +(?: +[^(\040)<>@,;:".\\\[\]\000-\037\x80-\xff]+ # some number of atom characters... +(?![^(\040)<>@,;:".\\\[\]\000-\037\x80-\xff]) # ..not followed by something that could be part of an atom +# Atom +| # or +" # " +[^\\\x80-\xff\n\015"] * # normal +(?: \\ [^\x80-\xff] [^\\\x80-\xff\n\015"] * )* # ( special normal* )* +" # " +# Quoted string +) +[\040\t]* # Nab whitespace. +(?: +\( # ( +[^\\\x80-\xff\n\015()] * # normal* +(?: # ( +(?: \\ [^\x80-\xff] | +\( # ( +[^\\\x80-\xff\n\015()] * # normal* +(?: \\ [^\x80-\xff] [^\\\x80-\xff\n\015()] * )* # (special normal*)* +\) # ) +) # special +[^\\\x80-\xff\n\015()] * # normal* +)* # )* +\) # ) +[\040\t]* )* # If comment found, allow more spaces. +(?: +\. +[\040\t]* # Nab whitespace. +(?: +\( # ( +[^\\\x80-\xff\n\015()] * # normal* +(?: # ( +(?: \\ [^\x80-\xff] | +\( # ( +[^\\\x80-\xff\n\015()] * # normal* +(?: \\ [^\x80-\xff] [^\\\x80-\xff\n\015()] * )* # (special normal*)* +\) # ) +) # special +[^\\\x80-\xff\n\015()] * # normal* +)* # )* +\) # ) +[\040\t]* )* # If comment found, allow more spaces. +(?: +[^(\040)<>@,;:".\\\[\]\000-\037\x80-\xff]+ # some number of atom characters... +(?![^(\040)<>@,;:".\\\[\]\000-\037\x80-\xff]) # ..not followed by something that could be part of an atom +# Atom +| # or +" # " +[^\\\x80-\xff\n\015"] * # normal +(?: \\ [^\x80-\xff] [^\\\x80-\xff\n\015"] * )* # ( special normal* )* +" # " +# Quoted string +) +[\040\t]* # Nab whitespace. +(?: +\( # ( +[^\\\x80-\xff\n\015()] * # normal* +(?: # ( +(?: \\ [^\x80-\xff] | +\( # ( +[^\\\x80-\xff\n\015()] * # normal* +(?: \\ [^\x80-\xff] [^\\\x80-\xff\n\015()] * )* # (special normal*)* +\) # ) +) # special +[^\\\x80-\xff\n\015()] * # normal* +)* # )* +\) # ) +[\040\t]* )* # If comment found, allow more spaces. +# additional words +)* +@ +[\040\t]* # Nab whitespace. +(?: +\( # ( +[^\\\x80-\xff\n\015()] * # normal* +(?: # ( +(?: \\ [^\x80-\xff] | +\( # ( +[^\\\x80-\xff\n\015()] * # normal* +(?: \\ [^\x80-\xff] [^\\\x80-\xff\n\015()] * )* # (special normal*)* +\) # ) +) # special +[^\\\x80-\xff\n\015()] * # normal* +)* # )* +\) # ) +[\040\t]* )* # If comment found, allow more spaces. +(?: +[^(\040)<>@,;:".\\\[\]\000-\037\x80-\xff]+ # some number of atom characters... +(?![^(\040)<>@,;:".\\\[\]\000-\037\x80-\xff]) # ..not followed by something that could be part of an atom +| +\[ # [ +(?: [^\\\x80-\xff\n\015\[\]] | \\ [^\x80-\xff] )* # stuff +\] # ] +) +[\040\t]* # Nab whitespace. +(?: +\( # ( +[^\\\x80-\xff\n\015()] * # normal* +(?: # ( +(?: \\ [^\x80-\xff] | +\( # ( +[^\\\x80-\xff\n\015()] * # normal* +(?: \\ [^\x80-\xff] [^\\\x80-\xff\n\015()] * )* # (special normal*)* +\) # ) +) # special +[^\\\x80-\xff\n\015()] * # normal* +)* # )* +\) # ) +[\040\t]* )* # If comment found, allow more spaces. +# optional trailing comments +(?: +\. +[\040\t]* # Nab whitespace. +(?: +\( # ( +[^\\\x80-\xff\n\015()] * # normal* +(?: # ( +(?: \\ [^\x80-\xff] | +\( # ( +[^\\\x80-\xff\n\015()] * # normal* +(?: \\ [^\x80-\xff] [^\\\x80-\xff\n\015()] * )* # (special normal*)* +\) # ) +) # special +[^\\\x80-\xff\n\015()] * # normal* +)* # )* +\) # ) +[\040\t]* )* # If comment found, allow more spaces. +(?: +[^(\040)<>@,;:".\\\[\]\000-\037\x80-\xff]+ # some number of atom characters... +(?![^(\040)<>@,;:".\\\[\]\000-\037\x80-\xff]) # ..not followed by something that could be part of an atom +| +\[ # [ +(?: [^\\\x80-\xff\n\015\[\]] | \\ [^\x80-\xff] )* # stuff +\] # ] +) +[\040\t]* # Nab whitespace. +(?: +\( # ( +[^\\\x80-\xff\n\015()] * # normal* +(?: # ( +(?: \\ [^\x80-\xff] | +\( # ( +[^\\\x80-\xff\n\015()] * # normal* +(?: \\ [^\x80-\xff] [^\\\x80-\xff\n\015()] * )* # (special normal*)* +\) # ) +) # special +[^\\\x80-\xff\n\015()] * # normal* +)* # )* +\) # ) +[\040\t]* )* # If comment found, allow more spaces. +# optional trailing comments +)* +# address spec +> # > +# name and address +) +/x + Alan Other + 0: Alan Other + + 0: user@dom.ain + user\@dom.ain + 0: user@dom.ain + \"A. Other\" (a comment) + 0: "A. Other" + A. Other (a comment) + 0: Other + \"/s=user/ou=host/o=place/prmd=uu.yy/admd= /c=gb/\"\@x400-re.lay + 0: "/s=user/ou=host/o=place/prmd=uu.yy/admd= /c=gb/"@x400-re.lay + A missing angle ?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_`abcdefghijklmnopqrstuvwxyz{|}~\x7f + +/P[^*]TAIRE[^*]{1,6}?LL/ + xxxxxxxxxxxPSTAIREISLLxxxxxxxxx + 0: PSTAIREISLL + +/P[^*]TAIRE[^*]{1,}?LL/ + xxxxxxxxxxxPSTAIREISLLxxxxxxxxx + 0: PSTAIREISLL + +/(\.\d\d[1-9]?)\d+/ + 1.230003938 + 0: .230003938 + 1: .23 + 1.875000282 + 0: .875000282 + 1: .875 + 1.235 + 0: .235 + 1: .23 + +/(\.\d\d((?=0)|\d(?=\d)))/ + 1.230003938 + 0: .23 + 1: .23 + 2: + 1.875000282 + 0: .875 + 1: .875 + 2: 5 +\= Expect no match + 1.235 +No match + +/\b(foo)\s+(\w+)/i + Food is on the foo table + 0: foo table + 1: foo + 2: table + +/foo(.*)bar/ + The food is under the bar in the barn. + 0: food is under the bar in the bar + 1: d is under the bar in the + +/foo(.*?)bar/ + The food is under the bar in the barn. + 0: food is under the bar + 1: d is under the + +/(.*)(\d*)/ + I have 2 numbers: 53147 + 0: I have 2 numbers: 53147 + 1: I have 2 numbers: 53147 + 2: + +/(.*)(\d+)/ + I have 2 numbers: 53147 + 0: I have 2 numbers: 53147 + 1: I have 2 numbers: 5314 + 2: 7 + +/(.*?)(\d*)/ + I have 2 numbers: 53147 + 0: + 1: + 2: + +/(.*?)(\d+)/ + I have 2 numbers: 53147 + 0: I have 2 + 1: I have + 2: 2 + +/(.*)(\d+)$/ + I have 2 numbers: 53147 + 0: I have 2 numbers: 53147 + 1: I have 2 numbers: 5314 + 2: 7 + +/(.*?)(\d+)$/ + I have 2 numbers: 53147 + 0: I have 2 numbers: 53147 + 1: I have 2 numbers: + 2: 53147 + +/(.*)\b(\d+)$/ + I have 2 numbers: 53147 + 0: I have 2 numbers: 53147 + 1: I have 2 numbers: + 2: 53147 + +/(.*\D)(\d+)$/ + I have 2 numbers: 53147 + 0: I have 2 numbers: 53147 + 1: I have 2 numbers: + 2: 53147 + +/^\D*(?!123)/ + ABC123 + 0: AB + +/^(\D*)(?=\d)(?!123)/ + ABC445 + 0: ABC + 1: ABC +\= Expect no match + ABC123 +No match + +/^[W-]46]/ + W46]789 + 0: W46] + -46]789 + 0: -46] +\= Expect no match + Wall +No match + Zebra +No match + 42 +No match + [abcd] +No match + ]abcd[ +No match + +/^[W-\]46]/ + W46]789 + 0: W + Wall + 0: W + Zebra + 0: Z + Xylophone + 0: X + 42 + 0: 4 + [abcd] + 0: [ + ]abcd[ + 0: ] + \\backslash + 0: \ +\= Expect no match + -46]789 +No match + well +No match + +/\d\d\/\d\d\/\d\d\d\d/ + 01/01/2000 + 0: 01/01/2000 + +/word (?:[a-zA-Z0-9]+ ){0,10}otherword/ + word cat dog elephant mussel cow horse canary baboon snake shark otherword + 0: word cat dog elephant mussel cow horse canary baboon snake shark otherword +\= Expect no match + word cat dog elephant mussel cow horse canary baboon snake shark +No match + +/word (?:[a-zA-Z0-9]+ ){0,300}otherword/ +\= Expect no match + word cat dog elephant mussel cow horse canary baboon snake shark the quick brown fox and the lazy dog and several other words getting close to thirty by now I hope +No match + +/^(a){0,0}/ + bcd + 0: + abc + 0: + aab + 0: + +/^(a){0,1}/ + bcd + 0: + abc + 0: a + 1: a + aab + 0: a + 1: a + +/^(a){0,2}/ + bcd + 0: + abc + 0: a + 1: a + aab + 0: aa + 1: a + +/^(a){0,3}/ + bcd + 0: + abc + 0: a + 1: a + aab + 0: aa + 1: a + aaa + 0: aaa + 1: a + +/^(a){0,}/ + bcd + 0: + abc + 0: a + 1: a + aab + 0: aa + 1: a + aaa + 0: aaa + 1: a + aaaaaaaa + 0: aaaaaaaa + 1: a + +/^(a){1,1}/ + abc + 0: a + 1: a + aab + 0: a + 1: a +\= Expect no match + bcd +No match + +/^(a){1,2}/ + abc + 0: a + 1: a + aab + 0: aa + 1: a +\= Expect no match + bcd +No match + +/^(a){1,3}/ + abc + 0: a + 1: a + aab + 0: aa + 1: a + aaa + 0: aaa + 1: a +\= Expect no match + bcd +No match + +/^(a){1,}/ + abc + 0: a + 1: a + aab + 0: aa + 1: a + aaa + 0: aaa + 1: a + aaaaaaaa + 0: aaaaaaaa + 1: a +\= Expect no match + bcd +No match + +/.*\.gif/ + borfle\nbib.gif\nno + 0: bib.gif + +/.{0,}\.gif/ + borfle\nbib.gif\nno + 0: bib.gif + +/.*\.gif/m + borfle\nbib.gif\nno + 0: bib.gif + +/.*\.gif/s + borfle\nbib.gif\nno + 0: borfle\x0abib.gif + +/.*\.gif/ms + borfle\nbib.gif\nno + 0: borfle\x0abib.gif + +/.*$/ + borfle\nbib.gif\nno + 0: no + +/.*$/m + borfle\nbib.gif\nno + 0: borfle + +/.*$/s + borfle\nbib.gif\nno + 0: borfle\x0abib.gif\x0ano + +/.*$/ms + borfle\nbib.gif\nno + 0: borfle\x0abib.gif\x0ano + +/.*$/ + borfle\nbib.gif\nno\n + 0: no + +/.*$/m + borfle\nbib.gif\nno\n + 0: borfle + +/.*$/s + borfle\nbib.gif\nno\n + 0: borfle\x0abib.gif\x0ano\x0a + +/.*$/ms + borfle\nbib.gif\nno\n + 0: borfle\x0abib.gif\x0ano\x0a + +/(.*X|^B)/ + abcde\n1234Xyz + 0: 1234X + 1: 1234X + BarFoo + 0: B + 1: B +\= Expect no match + abcde\nBar +No match + +/(.*X|^B)/m + abcde\n1234Xyz + 0: 1234X + 1: 1234X + BarFoo + 0: B + 1: B + abcde\nBar + 0: B + 1: B + +/(.*X|^B)/s + abcde\n1234Xyz + 0: abcde\x0a1234X + 1: abcde\x0a1234X + BarFoo + 0: B + 1: B +\= Expect no match + abcde\nBar +No match + +/(.*X|^B)/ms + abcde\n1234Xyz + 0: abcde\x0a1234X + 1: abcde\x0a1234X + BarFoo + 0: B + 1: B + abcde\nBar + 0: B + 1: B + +/(?s)(.*X|^B)/ + abcde\n1234Xyz + 0: abcde\x0a1234X + 1: abcde\x0a1234X + BarFoo + 0: B + 1: B +\= Expect no match + abcde\nBar +No match + +/(?s:.*X|^B)/ + abcde\n1234Xyz + 0: abcde\x0a1234X + BarFoo + 0: B +\= Expect no match + abcde\nBar +No match + +/^.*B/ +\= Expect no match + abc\nB +No match + +/(?s)^.*B/ + abc\nB + 0: abc\x0aB + +/(?m)^.*B/ + abc\nB + 0: B + +/(?ms)^.*B/ + abc\nB + 0: abc\x0aB + +/(?ms)^B/ + abc\nB + 0: B + +/(?s)B$/ + B\n + 0: B + +/^[0-9][0-9][0-9][0-9][0-9][0-9][0-9][0-9][0-9][0-9][0-9][0-9]/ + 123456654321 + 0: 123456654321 + +/^\d\d\d\d\d\d\d\d\d\d\d\d/ + 123456654321 + 0: 123456654321 + +/^[\d][\d][\d][\d][\d][\d][\d][\d][\d][\d][\d][\d]/ + 123456654321 + 0: 123456654321 + +/^[abc]{12}/ + abcabcabcabc + 0: abcabcabcabc + +/^[a-c]{12}/ + abcabcabcabc + 0: abcabcabcabc + +/^(a|b|c){12}/ + abcabcabcabc + 0: abcabcabcabc + 1: c + +/^[abcdefghijklmnopqrstuvwxy0123456789]/ + n + 0: n +\= Expect no match + z +No match + +/abcde{0,0}/ + abcd + 0: abcd +\= Expect no match + abce +No match + +/ab[cd]{0,0}e/ + abe + 0: abe +\= Expect no match + abcde +No match + +/ab(c){0,0}d/ + abd + 0: abd +\= Expect no match + abcd +No match + +/a(b*)/ + a + 0: a + 1: + ab + 0: ab + 1: b + abbbb + 0: abbbb + 1: bbbb +\= Expect no match + bbbbb +No match + +/ab\d{0}e/ + abe + 0: abe +\= Expect no match + ab1e +No match + +/"([^\\"]+|\\.)*"/ + the \"quick\" brown fox + 0: "quick" + 1: quick + \"the \\\"quick\\\" brown fox\" + 0: "the \"quick\" brown fox" + 1: brown fox + +/]{0,})>]{0,})>([\d]{0,}\.)(.*)((
([\w\W\s\d][^<>]{0,})|[\s]{0,}))<\/a><\/TD>]{0,})>([\w\W\s\d][^<>]{0,})<\/TD>]{0,})>([\w\W\s\d][^<>]{0,})<\/TD><\/TR>/is + 43.Word Processor
(N-1286)
Lega lstaff.comCA - Statewide + 0: 43.Word Processor
(N-1286)
Lega lstaff.comCA - Statewide + 1: BGCOLOR='#DBE9E9' + 2: align=left valign=top + 3: 43. + 4: Word Processor
(N-1286) + 5: + 6: + 7: + 8: align=left valign=top + 9: Lega lstaff.com +10: align=left valign=top +11: CA - Statewide + +/a[^a]b/ + acb + 0: acb + a\nb + 0: a\x0ab + +/a.b/ + acb + 0: acb +\= Expect no match + a\nb +No match + +/a[^a]b/s + acb + 0: acb + a\nb + 0: a\x0ab + +/a.b/s + acb + 0: acb + a\nb + 0: a\x0ab + +/^(b+?|a){1,2}?c/ + bac + 0: bac + 1: a + bbac + 0: bbac + 1: a + bbbac + 0: bbbac + 1: a + bbbbac + 0: bbbbac + 1: a + bbbbbac + 0: bbbbbac + 1: a + +/^(b+|a){1,2}?c/ + bac + 0: bac + 1: a + bbac + 0: bbac + 1: a + bbbac + 0: bbbac + 1: a + bbbbac + 0: bbbbac + 1: a + bbbbbac + 0: bbbbbac + 1: a + +/(?!\A)x/m + a\bx\n + 0: x + a\nx\n + 0: x +\= Expect no match + x\nb\n +No match + +/(A|B)*?CD/ + CD + 0: CD + +/(A|B)*CD/ + CD + 0: CD + +/(AB)*?\1/ + ABABAB + 0: ABAB + 1: AB + +/(AB)*\1/ + ABABAB + 0: ABABAB + 1: AB + +/(?.*/)foo" + /this/is/a/very/long/line/in/deed/with/very/many/slashes/in/and/foo + 0: /this/is/a/very/long/line/in/deed/with/very/many/slashes/in/and/foo +\= Expect no match + /this/is/a/very/long/line/in/deed/with/very/many/slashes/in/it/you/see/ +No match + +/(?>(\.\d\d[1-9]?))\d+/ + 1.230003938 + 0: .230003938 + 1: .23 + 1.875000282 + 0: .875000282 + 1: .875 +\= Expect no match + 1.235 +No match + +/^((?>\w+)|(?>\s+))*$/ + now is the time for all good men to come to the aid of the party + 0: now is the time for all good men to come to the aid of the party + 1: party +\= Expect no match + this is not a line with only words and spaces! +No match + +/(\d+)(\w)/ + 12345a + 0: 12345a + 1: 12345 + 2: a + 12345+ + 0: 12345 + 1: 1234 + 2: 5 + +/((?>\d+))(\w)/ + 12345a + 0: 12345a + 1: 12345 + 2: a +\= Expect no match + 12345+ +No match + +/(?>a+)b/ + aaab + 0: aaab + +/((?>a+)b)/ + aaab + 0: aaab + 1: aaab + +/(?>(a+))b/ + aaab + 0: aaab + 1: aaa + +/(?>b)+/ + aaabbbccc + 0: bbb + +/(?>a+|b+|c+)*c/ + aaabbbbccccd + 0: aaabbbbc + +/((?>[^()]+)|\([^()]*\))+/ + ((abc(ade)ufh()()x + 0: abc(ade)ufh()()x + 1: x + +/\(((?>[^()]+)|\([^()]+\))+\)/ + (abc) + 0: (abc) + 1: abc + (abc(def)xyz) + 0: (abc(def)xyz) + 1: xyz +\= Expect no match + ((()aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa +No match + +/a(?-i)b/i + ab + 0: ab + Ab + 0: Ab +\= Expect no match + aB +No match + AB +No match + +/(a (?x)b c)d e/ + a bcd e + 0: a bcd e + 1: a bc +\= Expect no match + a b cd e +No match + abcd e +No match + a bcde +No match + +/(a b(?x)c d (?-x)e f)/ + a bcde f + 0: a bcde f + 1: a bcde f +\= Expect no match + abcdef +No match + +/(a(?i)b)c/ + abc + 0: abc + 1: ab + aBc + 0: aBc + 1: aB +\= Expect no match + abC +No match + aBC +No match + Abc +No match + ABc +No match + ABC +No match + AbC +No match + +/a(?i:b)c/ + abc + 0: abc + aBc + 0: aBc +\= Expect no match + ABC +No match + abC +No match + aBC +No match + +/a(?i:b)*c/ + aBc + 0: aBc + aBBc + 0: aBBc +\= Expect no match + aBC +No match + aBBC +No match + +/a(?=b(?i)c)\w\wd/ + abcd + 0: abcd + abCd + 0: abCd +\= Expect no match + aBCd +No match + abcD +No match + +/(?s-i:more.*than).*million/i + more than million + 0: more than million + more than MILLION + 0: more than MILLION + more \n than Million + 0: more \x0a than Million +\= Expect no match + MORE THAN MILLION +No match + more \n than \n million +No match + +/(?:(?s-i)more.*than).*million/i + more than million + 0: more than million + more than MILLION + 0: more than MILLION + more \n than Million + 0: more \x0a than Million +\= Expect no match + MORE THAN MILLION +No match + more \n than \n million +No match + +/(?>a(?i)b+)+c/ + abc + 0: abc + aBbc + 0: aBbc + aBBc + 0: aBBc +\= Expect no match + Abc +No match + abAb +No match + abbC +No match + +/(?=a(?i)b)\w\wc/ + abc + 0: abc + aBc + 0: aBc +\= Expect no match + Ab +No match + abC +No match + aBC +No match + +/(?<=a(?i)b)(\w\w)c/ + abxxc + 0: xxc + 1: xx + aBxxc + 0: xxc + 1: xx +\= Expect no match + Abxxc +No match + ABxxc +No match + abxxC +No match + +/(?:(a)|b)(?(1)A|B)/ + aA + 0: aA + 1: a + bB + 0: bB +\= Expect no match + aB +No match + bA +No match + +/^(a)?(?(1)a|b)+$/ + aa + 0: aa + 1: a + b + 0: b + bb + 0: bb +\= Expect no match + ab +No match + +# Perl gets this next one wrong if the pattern ends with $; in that case it +# fails to match "12". + +/^(?(?=abc)\w{3}:|\d\d)/ + abc: + 0: abc: + 12 + 0: 12 + 123 + 0: 12 +\= Expect no match + xyz +No match + +/^(?(?!abc)\d\d|\w{3}:)$/ + abc: + 0: abc: + 12 + 0: 12 +\= Expect no match + 123 +No match + xyz +No match + +/(?(?<=foo)bar|cat)/ + foobar + 0: bar + cat + 0: cat + fcat + 0: cat + focat + 0: cat +\= Expect no match + foocat +No match + +/(?(?a*)*/ + a + 0: a + aa + 0: aa + aaaa + 0: aaaa + +/(abc|)+/ + abc + 0: abc + 1: + abcabc + 0: abcabc + 1: + abcabcabc + 0: abcabcabc + 1: + xyz + 0: + 1: + +/([a]*)*/ + a + 0: a + 1: + aaaaa + 0: aaaaa + 1: + +/([ab]*)*/ + a + 0: a + 1: + b + 0: b + 1: + ababab + 0: ababab + 1: + aaaabcde + 0: aaaab + 1: + bbbb + 0: bbbb + 1: + +/([^a]*)*/ + b + 0: b + 1: + bbbb + 0: bbbb + 1: + aaa + 0: + 1: + +/([^ab]*)*/ + cccc + 0: cccc + 1: + abab + 0: + 1: + +/([a]*?)*/ + a + 0: + 1: + aaaa + 0: + 1: + +/([ab]*?)*/ + a + 0: + 1: + b + 0: + 1: + abab + 0: + 1: + baba + 0: + 1: + +/([^a]*?)*/ + b + 0: + 1: + bbbb + 0: + 1: + aaa + 0: + 1: + +/([^ab]*?)*/ + c + 0: + 1: + cccc + 0: + 1: + baba + 0: + 1: + +/(?>a*)*/ + a + 0: a + aaabcde + 0: aaa + +/((?>a*))*/ + aaaaa + 0: aaaaa + 1: + aabbaa + 0: aa + 1: + +/((?>a*?))*/ + aaaaa + 0: + 1: + aabbaa + 0: + 1: + +/(?(?=[^a-z]+[a-z]) \d{2}-[a-z]{3}-\d{2} | \d{2}-\d{2}-\d{2} ) /x + 12-sep-98 + 0: 12-sep-98 + 12-09-98 + 0: 12-09-98 +\= Expect no match + sep-12-98 +No match + +/(?<=(foo))bar\1/ + foobarfoo + 0: barfoo + 1: foo + foobarfootling + 0: barfoo + 1: foo +\= Expect no match + foobar +No match + barfoo +No match + +/(?i:saturday|sunday)/ + saturday + 0: saturday + sunday + 0: sunday + Saturday + 0: Saturday + Sunday + 0: Sunday + SATURDAY + 0: SATURDAY + SUNDAY + 0: SUNDAY + SunDay + 0: SunDay + +/(a(?i)bc|BB)x/ + abcx + 0: abcx + 1: abc + aBCx + 0: aBCx + 1: aBC + bbx + 0: bbx + 1: bb + BBx + 0: BBx + 1: BB +\= Expect no match + abcX +No match + aBCX +No match + bbX +No match + BBX +No match + +/^([ab](?i)[cd]|[ef])/ + ac + 0: ac + 1: ac + aC + 0: aC + 1: aC + bD + 0: bD + 1: bD + elephant + 0: e + 1: e + Europe + 0: E + 1: E + frog + 0: f + 1: f + France + 0: F + 1: F +\= Expect no match + Africa +No match + +/^(ab|a(?i)[b-c](?m-i)d|x(?i)y|z)/ + ab + 0: ab + 1: ab + aBd + 0: aBd + 1: aBd + xy + 0: xy + 1: xy + xY + 0: xY + 1: xY + zebra + 0: z + 1: z + Zambesi + 0: Z + 1: Z +\= Expect no match + aCD +No match + XY +No match + +/(?<=foo\n)^bar/m + foo\nbar + 0: bar +\= Expect no match + bar +No match + baz\nbar +No match + +/(?<=(?]&/ + <&OUT + 0: <& + +/^(a\1?){4}$/ + aaaaaaaaaa + 0: aaaaaaaaaa + 1: aaaa +\= Expect no match + AB +No match + aaaaaaaaa +No match + aaaaaaaaaaa +No match + +/^(a(?(1)\1)){4}$/ + aaaaaaaaaa + 0: aaaaaaaaaa + 1: aaaa +\= Expect no match + aaaaaaaaa +No match + aaaaaaaaaaa +No match + +/(?:(f)(o)(o)|(b)(a)(r))*/ + foobar + 0: foobar + 1: f + 2: o + 3: o + 4: b + 5: a + 6: r + +/(?<=a)b/ + ab + 0: b +\= Expect no match + cb +No match + b +No match + +/(? + 2: abcd + xy:z:::abcd + 0: xy:z:::abcd + 1: xy:z::: + 2: abcd + +/^[^bcd]*(c+)/ + aexycd + 0: aexyc + 1: c + +/(a*)b+/ + caab + 0: aab + 1: aa + +/([\w:]+::)?(\w+)$/ + abcd + 0: abcd + 1: + 2: abcd + xy:z:::abcd + 0: xy:z:::abcd + 1: xy:z::: + 2: abcd +\= Expect no match + abcd: +No match + abcd: +No match + +/^[^bcd]*(c+)/ + aexycd + 0: aexyc + 1: c + +/(>a+)ab/ + +/(?>a+)b/ + aaab + 0: aaab + +/([[:]+)/ + a:[b]: + 0: :[ + 1: :[ + +/([[=]+)/ + a=[b]= + 0: =[ + 1: =[ + +/([[.]+)/ + a.[b]. + 0: .[ + 1: .[ + +/((?>a+)b)/ + aaab + 0: aaab + 1: aaab + +/(?>(a+))b/ + aaab + 0: aaab + 1: aaa + +/((?>[^()]+)|\([^()]*\))+/ + ((abc(ade)ufh()()x + 0: abc(ade)ufh()()x + 1: x + +/a\Z/ +\= Expect no match + aaab +No match + a\nb\n +No match + +/b\Z/ + a\nb\n + 0: b + +/b\z/ + +/b\Z/ + a\nb + 0: b + +/b\z/ + a\nb + 0: b + +/^(?>(?(1)\.|())[^\W_](?>[a-z0-9-]*[^\W_])?)+$/ + a + 0: a + 1: + abc + 0: abc + 1: + a-b + 0: a-b + 1: + 0-9 + 0: 0-9 + 1: + a.b + 0: a.b + 1: + 5.6.7 + 0: 5.6.7 + 1: + the.quick.brown.fox + 0: the.quick.brown.fox + 1: + a100.b200.300c + 0: a100.b200.300c + 1: + 12-ab.1245 + 0: 12-ab.1245 + 1: +\= Expect no match + \ +No match + .a +No match + -a +No match + a- +No match + a. +No match + a_b +No match + a.- +No match + a.. +No match + ab..bc +No match + the.quick.brown.fox- +No match + the.quick.brown.fox. +No match + the.quick.brown.fox_ +No match + the.quick.brown.fox+ +No match + +/(?>.*)(?<=(abcd|wxyz))/ + alphabetabcd + 0: alphabetabcd + 1: abcd + endingwxyz + 0: endingwxyz + 1: wxyz +\= Expect no match + a rather long string that doesn't end with one of them +No match + +/word (?>(?:(?!otherword)[a-zA-Z0-9]+ ){0,30})otherword/ + word cat dog elephant mussel cow horse canary baboon snake shark otherword + 0: word cat dog elephant mussel cow horse canary baboon snake shark otherword +\= Expect no match + word cat dog elephant mussel cow horse canary baboon snake shark +No match + +/word (?>[a-zA-Z0-9]+ ){0,30}otherword/ +\= Expect no match + word cat dog elephant mussel cow horse canary baboon snake shark the quick brown fox and the lazy dog and several other words getting close to thirty by now I hope +No match + +/(?<=\d{3}(?!999))foo/ + 999foo + 0: foo + 123999foo + 0: foo +\= Expect no match + 123abcfoo +No match + +/(?<=(?!...999)\d{3})foo/ + 999foo + 0: foo + 123999foo + 0: foo +\= Expect no match + 123abcfoo +No match + +/(?<=\d{3}(?!999)...)foo/ + 123abcfoo + 0: foo + 123456foo + 0: foo +\= Expect no match + 123999foo +No match + +/(?<=\d{3}...)(? + 2: + 3: abcd +
+ 2: + 3: abcd + \s*)=(?>\s*) # find + 2: + 3: abcd + Z)+|A)*/ + ZABCDEFG + 0: ZA + 1: A + +/((?>)+|A)*/ + ZABCDEFG + 0: + 1: + +/^[\d-a]/ + abcde + 0: a + -things + 0: - + 0digit + 0: 0 +\= Expect no match + bcdef +No match + +/[\s]+/ + > \x09\x0a\x0c\x0d\x0b< + 0: \x09\x0a\x0c\x0d\x0b + +/\s+/ + > \x09\x0a\x0c\x0d\x0b< + 0: \x09\x0a\x0c\x0d\x0b + +/a b/x + ab + 0: ab + +/(?!\A)x/m + a\nxb\n + 0: x + +/(?!^)x/m +\= Expect no match + a\nxb\n +No match + +#/abc\Qabc\Eabc/ +# abcabcabc +# 0: abcabcabc + +#/abc\Q(*+|\Eabc/ +# abc(*+|abc +# 0: abc(*+|abc + +#/ abc\Q abc\Eabc/x +# abc abcabc +# 0: abc abcabc +#\= Expect no match +# abcabcabc +#No match + +#/abc#comment +# \Q#not comment +# literal\E/x +# abc#not comment\n literal +# 0: abc#not comment\x0a literal + +#/abc#comment +# \Q#not comment +# literal/x +# abc#not comment\n literal +# 0: abc#not comment\x0a literal + +#/abc#comment +# \Q#not comment +# literal\E #more comment +# /x +# abc#not comment\n literal +# 0: abc#not comment\x0a literal + +#/abc#comment +# \Q#not comment +# literal\E #more comment/x +# abc#not comment\n literal +# 0: abc#not comment\x0a literal + +#/\Qabc\$xyz\E/ +# abc\\\$xyz +# 0: abc\$xyz + +#/\Qabc\E\$\Qxyz\E/ +# abc\$xyz +# 0: abc$xyz + +/\Gabc/ + abc + 0: abc +\= Expect no match + xyzabc +No match + +/a(?x: b c )d/ + XabcdY + 0: abcd +\= Expect no match + Xa b c d Y +No match + +/((?x)x y z | a b c)/ + XabcY + 0: abc + 1: abc + AxyzB + 0: xyz + 1: xyz + +/(?i)AB(?-i)C/ + XabCY + 0: abC +\= Expect no match + XabcY +No match + +/((?i)AB(?-i)C|D)E/ + abCE + 0: abCE + 1: abC + DE + 0: DE + 1: D +\= Expect no match + abcE +No match + abCe +No match + dE +No match + De +No match + +/(.*)\d+\1/ + abc123abc + 0: abc123abc + 1: abc + abc123bc + 0: bc123bc + 1: bc + +/(.*)\d+\1/s + abc123abc + 0: abc123abc + 1: abc + abc123bc + 0: bc123bc + 1: bc + +/((.*))\d+\1/ + abc123abc + 0: abc123abc + 1: abc + 2: abc + abc123bc + 0: bc123bc + 1: bc + 2: bc + +# This tests for an IPv6 address in the form where it can have up to +# eight components, one and only one of which is empty. This must be +# an internal component. + +/^(?!:) # colon disallowed at start + (?: # start of item + (?: [0-9a-f]{1,4} | # 1-4 hex digits or + (?(1)0 | () ) ) # if null previously matched, fail; else null + : # followed by colon + ){1,7} # end item; 1-7 of them required + [0-9a-f]{1,4} $ # final hex number at end of string + (?(1)|.) # check that there was an empty component + /ix + a123::a123 + 0: a123::a123 + 1: + a123:b342::abcd + 0: a123:b342::abcd + 1: + a123:b342::324e:abcd + 0: a123:b342::324e:abcd + 1: + a123:ddde:b342::324e:abcd + 0: a123:ddde:b342::324e:abcd + 1: + a123:ddde:b342::324e:dcba:abcd + 0: a123:ddde:b342::324e:dcba:abcd + 1: + a123:ddde:9999:b342::324e:dcba:abcd + 0: a123:ddde:9999:b342::324e:dcba:abcd + 1: +\= Expect no match + 1:2:3:4:5:6:7:8 +No match + a123:bce:ddde:9999:b342::324e:dcba:abcd +No match + a123::9999:b342::324e:dcba:abcd +No match + abcde:2:3:4:5:6:7:8 +No match + ::1 +No match + abcd:fee0:123:: +No match + :1 +No match + 1: +No match + +#/[z\Qa-d]\E]/ +# z +# 0: z +# a +# 0: a +# - +# 0: - +# d +# 0: d +# ] +# 0: ] +#\= Expect no match +# b +#No match + +#TODO: PCRE has an optimization to make this workable, .NET does not +#/(a+)*b/ +#\= Expect no match +# aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa +#No match + +# All these had to be updated because we understand unicode +# and this looks like it's expecting single byte matches + +# .NET generates \xe4...not sure what's up, might just be different code pages +/(?i)reg(?:ul(?:[aä]|ae)r|ex)/ + REGular + 0: REGular + regulaer + 0: regulaer + Regex + 0: Regex + regulär + 0: regul\xc3\xa4r + +#/Åæåä[à-ÿÀ-ß]+/ +# Åæåäà +# 0: \xc5\xe6\xe5\xe4\xe0 +# Åæåäÿ +# 0: \xc5\xe6\xe5\xe4\xff +# ÅæåäÀ +# 0: \xc5\xe6\xe5\xe4\xc0 +# Åæåäß +# 0: \xc5\xe6\xe5\xe4\xdf + +/(?<=Z)X./ + \x84XAZXB + 0: XB + +/ab cd (?x) de fg/ + ab cd defg + 0: ab cd defg + +/ab cd(?x) de fg/ + ab cddefg + 0: ab cddefg +\= Expect no match + abcddefg +No match + +/(? + 2: + D + 0: D + 1: + 2: + +# this is really long with debug -- removing for now +#/(a|)*\d/ +# aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa4 +# 0: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa4 +# 1: +#\= Expect no match +# aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa +#No match + +/(?>a|)*\d/ + aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa4 + 0: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa4 +\= Expect no match + aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa +No match + +/(?:a|)*\d/ + aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa4 + 0: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa4 +\= Expect no match + aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa +No match + +/^(?s)(?>.*)(? + 2: a + +/(?>(a))b|(a)c/ + ac + 0: ac + 1: + 2: a + +/(?=(a))ab|(a)c/ + ac + 0: ac + 1: + 2: a + +/((?>(a))b|(a)c)/ + ac + 0: ac + 1: ac + 2: + 3: a + +/(?=(?>(a))b|(a)c)(..)/ + ac + 0: ac + 1: + 2: a + 3: ac + +/(?>(?>(a))b|(a)c)/ + ac + 0: ac + 1: + 2: a + +/((?>(a+)b)+(aabab))/ + aaaabaaabaabab + 0: aaaabaaabaabab + 1: aaaabaaabaabab + 2: aaa + 3: aabab + +/(?>a+|ab)+?c/ +\= Expect no match + aabc +No match + +/(?>a+|ab)+c/ +\= Expect no match + aabc +No match + +/(?:a+|ab)+c/ + aabc + 0: aabc + +/^(?:a|ab)+c/ + aaaabc + 0: aaaabc + +/(?=abc){0}xyz/ + xyz + 0: xyz + +/(?=abc){1}xyz/ +\= Expect no match + xyz +No match + +/(?=(a))?./ + ab + 0: a + 1: a + bc + 0: b + +/(?=(a))??./ + ab + 0: a + bc + 0: b + +/^(?!a){0}\w+/ + aaaaa + 0: aaaaa + +/(?<=(abc))?xyz/ + abcxyz + 0: xyz + 1: abc + pqrxyz + 0: xyz + +/^[g]+/ + ggg<<>> + 0: ggg<<>> +\= Expect no match + \\ga +No match + +/^[ga]+/ + gggagagaxyz + 0: gggagaga + +/[:a]xxx[b:]/ + :xxx: + 0: :xxx: + +/(?<=a{2})b/i + xaabc + 0: b +\= Expect no match + xabc +No match + +/(? +# 4: +# 5: c +# 6: d +# 7: Y + +#/^X(?7)(a)(?|(b|(?|(r)|(t))(s))|(q))(c)(d)(Y)/ +# XYabcdY +# 0: XYabcdY +# 1: a +# 2: b +# 3: +# 4: +# 5: c +# 6: d +# 7: Y + +/(?'abc'\w+):\k{2}/ + a:aaxyz + 0: a:aa + 1: a + ab:ababxyz + 0: ab:abab + 1: ab +\= Expect no match + a:axyz +No match + ab:abxyz +No match + +/^(?a)? (?(ab)b|c) (?(ab)d|e)/x + abd + 0: abd + 1: a + ce + 0: ce + +# .NET has more consistent grouping numbers with these dupe groups for the two options +/(?:a(? (?')|(?")) |b(? (?')|(?")) ) (?(quote)[a-z]+|[0-9]+)/x,dupnames + a\"aaaaa + 0: a"aaaaa + 1: " + 2: + 3: " + b\"aaaaa + 0: b"aaaaa + 1: " + 2: + 3: " +\= Expect no match + b\"11111 +No match + +#/(?P(?P0)(?P>L1)|(?P>L2))/ +# 0 +# 0: 0 +# 1: 0 +# 00 +# 0: 00 +# 1: 00 +# 2: 0 +# 0000 +# 0: 0000 +# 1: 0000 +# 2: 0 + +#/(?P(?P0)|(?P>L2)(?P>L1))/ +# 0 +# 0: 0 +# 1: 0 +# 2: 0 +# 00 +# 0: 0 +# 1: 0 +# 2: 0 +# 0000 +# 0: 0 +# 1: 0 +# 2: 0 + +# Check the use of names for failure + +# Check opening parens in comment when seeking forward reference. + +#/(?P(?P=abn)xxx|)+/ +# xxx +# 0: +# 1: + +#Posses +/^(a)?(\w)/ + aaaaX + 0: aa + 1: a + 2: a + YZ + 0: Y + 1: + 2: Y + +#Posses +/^(?:a)?(\w)/ + aaaaX + 0: aa + 1: a + YZ + 0: Y + 1: Y + +/\A.*?(a|bc)/ + ba + 0: ba + 1: a + +/\A.*?(?:a|bc|d)/ + ba + 0: ba + +# -------------------------- + +/(another)?(\1?)test/ + hello world test + 0: test + 1: + 2: + +/(another)?(\1+)test/ +\= Expect no match + hello world test +No match + +/((?:a?)*)*c/ + aac + 0: aac + 1: + +/((?>a?)*)*c/ + aac + 0: aac + 1: + +/(?>.*?a)(?<=ba)/ + aba + 0: ba + +/(?:.*?a)(?<=ba)/ + aba + 0: aba + +/(?>.*?a)b/s + aab + 0: ab + +/(?>.*?a)b/ + aab + 0: ab + +/(?>^a)b/s +\= Expect no match + aab +No match + +/(?>.*?)(?<=(abcd)|(wxyz))/ + alphabetabcd + 0: + 1: abcd + endingwxyz + 0: + 1: + 2: wxyz + +/(?>.*)(?<=(abcd)|(wxyz))/ + alphabetabcd + 0: alphabetabcd + 1: abcd + endingwxyz + 0: endingwxyz + 1: + 2: wxyz + +"(?>.*)foo" +\= Expect no match + abcdfooxyz +No match + +"(?>.*?)foo" + abcdfooxyz + 0: foo + +# Tests that try to figure out how Perl works. My hypothesis is that the first +# verb that is backtracked onto is the one that acts. This seems to be the case +# almost all the time, but there is one exception that is perhaps a bug. + +/a(?=bc).|abd/ + abd + 0: abd + abc + 0: ab + +/a(?>bc)d|abd/ + abceabd + 0: abd + +# These tests were formerly in test 2, but changes in PCRE and Perl have +# made them compatible. + +/^(a)?(?(1)a|b)+$/ +\= Expect no match + a +No match + +# ---- + +/^\d*\w{4}/ + 1234 + 0: 1234 +\= Expect no match + 123 +No match + +/^[^b]*\w{4}/ + aaaa + 0: aaaa +\= Expect no match + aaa +No match + +/^[^b]*\w{4}/i + aaaa + 0: aaaa +\= Expect no match + aaa +No match + +/^a*\w{4}/ + aaaa + 0: aaaa +\= Expect no match + aaa +No match + +/^a*\w{4}/i + aaaa + 0: aaaa +\= Expect no match + aaa +No match + +/(?:(?foo)|(?bar))\k/dupnames + foofoo + 0: foofoo + 1: foo + barbar + 0: barbar + 1: bar + +# A notable difference between PCRE and .NET. According to +# the PCRE docs: +# If you make a subroutine call to a non-unique named +# subpattern, the one that corresponds to the first +# occurrence of the name is used. In the absence of +# duplicate numbers (see the previous section) this is +# the one with the lowest number. +# .NET takes the most recently captured number according to MSDN: +# A backreference refers to the most recent definition of +# a group (the definition most immediately to the left, +# when matching left to right). When a group makes multiple +# captures, a backreference refers to the most recent capture. + +#/(?A)(?:(?foo)|(?bar))\k/dupnames +# AfooA +# 0: AfooA +# 1: A +# 2: foo +# AbarA +# 0: AbarA +# 1: A +# 2: +# 3: bar +#\= Expect no match +# Afoofoo +#No match +# Abarbar +#No match + +/^(\d+)\s+IN\s+SOA\s+(\S+)\s+(\S+)\s*\(\s*$/ + 1 IN SOA non-sp1 non-sp2( + 0: 1 IN SOA non-sp1 non-sp2( + 1: 1 + 2: non-sp1 + 3: non-sp2 + +# TODO: .NET's group number ordering here in the second example is a bit odd +/^ (?:(?A)|(?'B'B)(?A)) (?(A)x) (?(B)y)$/x,dupnames + Ax + 0: Ax + 1: A + BAxy + 0: BAxy + 1: A + 2: B + +/ ^ a + b $ /x + aaaab + 0: aaaab + +/ ^ a + #comment + b $ /x + aaaab + 0: aaaab + +/ ^ a + #comment + #comment + b $ /x + aaaab + 0: aaaab + +/ ^ (?> a + ) b $ /x + aaaab + 0: aaaab + +/ ^ ( a + ) + \w $ /x + aaaab + 0: aaaab + 1: aaaa + +/(?:x|(?:(xx|yy)+|x|x|x|x|x)|a|a|a)bc/ +\= Expect no match + acb +No match + +#Posses +#/\A(?:[^\"]+|\"(?:[^\"]*|\"\")*\")+/ +# NON QUOTED \"QUOT\"\"ED\" AFTER \"NOT MATCHED +# 0: NON QUOTED "QUOT""ED" AFTER + +#Posses +#/\A(?:[^\"]+|\"(?:[^\"]+|\"\")*\")+/ +# NON QUOTED \"QUOT\"\"ED\" AFTER \"NOT MATCHED +# 0: NON QUOTED "QUOT""ED" AFTER + +#Posses +#/\A(?:[^\"]+|\"(?:[^\"]+|\"\")+\")+/ +# NON QUOTED \"QUOT\"\"ED\" AFTER \"NOT MATCHED +# 0: NON QUOTED "QUOT""ED" AFTER + +#Posses +#/\A([^\"1]+|[\"2]([^\"3]*|[\"4][\"5])*[\"6])+/ +# NON QUOTED \"QUOT\"\"ED\" AFTER \"NOT MATCHED +# 0: NON QUOTED "QUOT""ED" AFTER +# 1: AFTER +# 2: + +/^\w+(?>\s*)(?<=\w)/ + test test + 0: tes + +#/(?Pa)?(?Pb)?(?()c|d)*l/ +# acl +# 0: acl +# 1: a +# bdl +# 0: bdl +# 1: +# 2: b +# adl +# 0: dl +# bcl +# 0: l + +/\sabc/ + \x0babc + 0: \x0babc + +#/[\Qa]\E]+/ +# aa]] +# 0: aa]] + +#/[\Q]a\E]+/ +# aa]] +# 0: aa]] + +/A((((((((a))))))))\8B/ + AaaB + 0: AaaB + 1: a + 2: a + 3: a + 4: a + 5: a + 6: a + 7: a + 8: a + +/A(((((((((a)))))))))\9B/ + AaaB + 0: AaaB + 1: a + 2: a + 3: a + 4: a + 5: a + 6: a + 7: a + 8: a + 9: a + +/(|ab)*?d/ + abd + 0: abd + 1: ab + xyd + 0: d + +/(\2|a)(\1)/ + aaa + 0: aa + 1: a + 2: a + +/(\2)(\1)/ + +"Z*(|d*){216}" + +/((((((((((((x))))))))))))\12/ + xx + 0: xx + 1: x + 2: x + 3: x + 4: x + 5: x + 6: x + 7: x + 8: x + 9: x +10: x +11: x +12: x + +#"(?|(\k'Pm')|(?'Pm'))" +# abcd +# 0: +# 1: + +#/(?|(aaa)|(b))\g{1}/ +# aaaaaa +# 0: aaaaaa +# 1: aaa +# bb +# 0: bb +# 1: b + +#/(?|(aaa)|(b))(?1)/ +# aaaaaa +# 0: aaaaaa +# 1: aaa +# baaa +# 0: baaa +# 1: b +#\= Expect no match +# bb +#No match + +#/(?|(aaa)|(b))/ +# xaaa +# 0: aaa +# 1: aaa +# xbc +# 0: b +# 1: b + +#/(?|(?'a'aaa)|(?'a'b))\k'a'/ +# aaaaaa +# 0: aaaaaa +# 1: aaa +# bb +# 0: bb +# 1: b + +#/(?|(?'a'aaa)|(?'a'b))(?'a'cccc)\k'a'/dupnames +# aaaccccaaa +# 0: aaaccccaaa +# 1: aaa +# 2: cccc +# bccccb +# 0: bccccb +# 1: b +# 2: cccc + +# End of testinput1 diff --git a/vendor/github.com/googleapis/gax-go/v2/.release-please-manifest.json b/vendor/github.com/googleapis/gax-go/v2/.release-please-manifest.json index 91d60a809f..ef508417b3 100644 --- a/vendor/github.com/googleapis/gax-go/v2/.release-please-manifest.json +++ b/vendor/github.com/googleapis/gax-go/v2/.release-please-manifest.json @@ -1,3 +1,3 @@ { - "v2": "2.11.0" + "v2": "2.12.0" } diff --git a/vendor/github.com/googleapis/gax-go/v2/CHANGES.md b/vendor/github.com/googleapis/gax-go/v2/CHANGES.md index e17b196f6c..ae71149470 100644 --- a/vendor/github.com/googleapis/gax-go/v2/CHANGES.md +++ b/vendor/github.com/googleapis/gax-go/v2/CHANGES.md @@ -1,5 +1,13 @@ # Changelog +## [2.12.0](https://github.com/googleapis/gax-go/compare/v2.11.0...v2.12.0) (2023-06-26) + + +### Features + +* **v2/callctx:** add new callctx package ([#291](https://github.com/googleapis/gax-go/issues/291)) ([11503ed](https://github.com/googleapis/gax-go/commit/11503ed98df4ae1bbdedf91ff64d47e63f187d68)) +* **v2:** add BuildHeaders and InsertMetadataIntoOutgoingContext to header ([#290](https://github.com/googleapis/gax-go/issues/290)) ([6a4b89f](https://github.com/googleapis/gax-go/commit/6a4b89f5551a40262e7c3caf2e1bdc7321b76ea1)) + ## [2.11.0](https://github.com/googleapis/gax-go/compare/v2.10.0...v2.11.0) (2023-06-13) diff --git a/vendor/github.com/googleapis/gax-go/v2/callctx/callctx.go b/vendor/github.com/googleapis/gax-go/v2/callctx/callctx.go new file mode 100644 index 0000000000..af15fb5827 --- /dev/null +++ b/vendor/github.com/googleapis/gax-go/v2/callctx/callctx.go @@ -0,0 +1,74 @@ +// Copyright 2023, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Package callctx provides helpers for storing and retrieving values out of +// [context.Context]. These values are used by our client libraries in various +// ways across the stack. +package callctx + +import ( + "context" + "fmt" +) + +const ( + headerKey = contextKey("header") +) + +// contextKey is a private type used to store/retrieve context values. +type contextKey string + +// HeadersFromContext retrieves headers set from [SetHeaders]. These headers +// can then be cast to http.Header or metadata.MD to send along on requests. +func HeadersFromContext(ctx context.Context) map[string][]string { + m, ok := ctx.Value(headerKey).(map[string][]string) + if !ok { + return nil + } + return m +} + +// SetHeaders stores key value pairs in the returned context that can later +// be retrieved by [HeadersFromContext]. Values stored in this manner will +// automatically be retrieved by client libraries and sent as outgoing headers +// on all requests. keyvals should have a corresponding value for every key +// provided. If there is an odd number of keyvals this method will panic. +func SetHeaders(ctx context.Context, keyvals ...string) context.Context { + if len(keyvals)%2 != 0 { + panic(fmt.Sprintf("callctx: an even number of key value pairs must be provided, got %d", len(keyvals))) + } + h, ok := ctx.Value(headerKey).(map[string][]string) + if !ok { + h = make(map[string][]string) + } + for i := 0; i < len(keyvals); i = i + 2 { + h[keyvals[i]] = append(h[keyvals[i]], keyvals[i+1]) + } + return context.WithValue(ctx, headerKey, h) +} diff --git a/vendor/github.com/googleapis/gax-go/v2/header.go b/vendor/github.com/googleapis/gax-go/v2/header.go index 6488461f4d..453fab7ecc 100644 --- a/vendor/github.com/googleapis/gax-go/v2/header.go +++ b/vendor/github.com/googleapis/gax-go/v2/header.go @@ -31,9 +31,15 @@ package gax import ( "bytes" + "context" + "fmt" + "net/http" "runtime" "strings" "unicode" + + "github.com/googleapis/gax-go/v2/callctx" + "google.golang.org/grpc/metadata" ) var ( @@ -117,3 +123,46 @@ func XGoogHeader(keyval ...string) string { } return buf.String()[1:] } + +// InsertMetadataIntoOutgoingContext is for use by the Google Cloud Libraries +// only. +// +// InsertMetadataIntoOutgoingContext returns a new context that merges the +// provided keyvals metadata pairs with any existing metadata/headers in the +// provided context. keyvals should have a corresponding value for every key +// provided. If there is an odd number of keyvals this method will panic. +// Existing values for keys will not be overwritten, instead provided values +// will be appended to the list of existing values. +func InsertMetadataIntoOutgoingContext(ctx context.Context, keyvals ...string) context.Context { + return metadata.NewOutgoingContext(ctx, insertMetadata(ctx, keyvals...)) +} + +// BuildHeaders is for use by the Google Cloud Libraries only. +// +// BuildHeaders returns a new http.Header that merges the provided +// keyvals header pairs with any existing metadata/headers in the provided +// context. keyvals should have a corresponding value for every key provided. +// If there is an odd number of keyvals this method will panic. +// Existing values for keys will not be overwritten, instead provided values +// will be appended to the list of existing values. +func BuildHeaders(ctx context.Context, keyvals ...string) http.Header { + return http.Header(insertMetadata(ctx, keyvals...)) +} + +func insertMetadata(ctx context.Context, keyvals ...string) metadata.MD { + if len(keyvals)%2 != 0 { + panic(fmt.Sprintf("gax: an even number of key value pairs must be provided, got %d", len(keyvals))) + } + out, ok := metadata.FromOutgoingContext(ctx) + if !ok { + out = metadata.MD(make(map[string][]string)) + } + headers := callctx.HeadersFromContext(ctx) + for k, v := range headers { + out[k] = append(out[k], v...) + } + for i := 0; i < len(keyvals); i = i + 2 { + out[keyvals[i]] = append(out[keyvals[i]], keyvals[i+1]) + } + return out +} diff --git a/vendor/github.com/googleapis/gax-go/v2/internal/version.go b/vendor/github.com/googleapis/gax-go/v2/internal/version.go index 374dcdb115..7425b5ffbb 100644 --- a/vendor/github.com/googleapis/gax-go/v2/internal/version.go +++ b/vendor/github.com/googleapis/gax-go/v2/internal/version.go @@ -30,4 +30,4 @@ package internal // Version is the current tagged release of the library. -const Version = "2.11.0" +const Version = "2.12.0" diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/LICENSE b/vendor/github.com/matttproud/golang_protobuf_extensions/v2/LICENSE similarity index 100% rename from vendor/github.com/matttproud/golang_protobuf_extensions/LICENSE rename to vendor/github.com/matttproud/golang_protobuf_extensions/v2/LICENSE diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/NOTICE b/vendor/github.com/matttproud/golang_protobuf_extensions/v2/NOTICE similarity index 100% rename from vendor/github.com/matttproud/golang_protobuf_extensions/NOTICE rename to vendor/github.com/matttproud/golang_protobuf_extensions/v2/NOTICE diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/.gitignore b/vendor/github.com/matttproud/golang_protobuf_extensions/v2/pbutil/.gitignore similarity index 100% rename from vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/.gitignore rename to vendor/github.com/matttproud/golang_protobuf_extensions/v2/pbutil/.gitignore diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/Makefile b/vendor/github.com/matttproud/golang_protobuf_extensions/v2/pbutil/Makefile similarity index 100% rename from vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/Makefile rename to vendor/github.com/matttproud/golang_protobuf_extensions/v2/pbutil/Makefile diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/decode.go b/vendor/github.com/matttproud/golang_protobuf_extensions/v2/pbutil/decode.go similarity index 83% rename from vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/decode.go rename to vendor/github.com/matttproud/golang_protobuf_extensions/v2/pbutil/decode.go index 258c0636aa..7c08e564f1 100644 --- a/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/decode.go +++ b/vendor/github.com/matttproud/golang_protobuf_extensions/v2/pbutil/decode.go @@ -19,9 +19,10 @@ import ( "errors" "io" - "github.com/golang/protobuf/proto" + "google.golang.org/protobuf/proto" ) +// TODO: Give error package name prefix in next minor release. var errInvalidVarint = errors.New("invalid varint32 encountered") // ReadDelimited decodes a message from the provided length-delimited stream, @@ -36,6 +37,12 @@ var errInvalidVarint = errors.New("invalid varint32 encountered") // of the stream has been reached in doing so. In that case, any subsequent // calls return (0, io.EOF). func ReadDelimited(r io.Reader, m proto.Message) (n int, err error) { + // TODO: Consider allowing the caller to specify a decode buffer in the + // next major version. + + // TODO: Consider using error wrapping to annotate error state in pass- + // through cases in the next minor version. + // Per AbstractParser#parsePartialDelimitedFrom with // CodedInputStream#readRawVarint32. var headerBuf [binary.MaxVarintLen32]byte @@ -53,15 +60,14 @@ func ReadDelimited(r io.Reader, m proto.Message) (n int, err error) { if err != nil { return bytesRead, err } - // A Reader should not return (0, nil), but if it does, - // it should be treated as no-op (according to the - // Reader contract). So let's go on... + // A Reader should not return (0, nil); but if it does, it should + // be treated as no-op according to the Reader contract. continue } bytesRead += newBytesRead // Now present everything read so far to the varint decoder and // see if a varint can be decoded already. - messageLength, varIntBytes = proto.DecodeVarint(headerBuf[:bytesRead]) + messageLength, varIntBytes = binary.Uvarint(headerBuf[:bytesRead]) } messageBuf := make([]byte, messageLength) diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/doc.go b/vendor/github.com/matttproud/golang_protobuf_extensions/v2/pbutil/doc.go similarity index 100% rename from vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/doc.go rename to vendor/github.com/matttproud/golang_protobuf_extensions/v2/pbutil/doc.go diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/encode.go b/vendor/github.com/matttproud/golang_protobuf_extensions/v2/pbutil/encode.go similarity index 91% rename from vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/encode.go rename to vendor/github.com/matttproud/golang_protobuf_extensions/v2/pbutil/encode.go index 8fb59ad226..e58dd9d297 100644 --- a/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/encode.go +++ b/vendor/github.com/matttproud/golang_protobuf_extensions/v2/pbutil/encode.go @@ -18,7 +18,7 @@ import ( "encoding/binary" "io" - "github.com/golang/protobuf/proto" + "google.golang.org/protobuf/proto" ) // WriteDelimited encodes and dumps a message to the provided writer prefixed @@ -28,6 +28,9 @@ import ( // number of bytes written and any applicable error. This is roughly // equivalent to the companion Java API's MessageLite#writeDelimitedTo. func WriteDelimited(w io.Writer, m proto.Message) (n int, err error) { + // TODO: Consider allowing the caller to specify an encode buffer in the + // next major version. + buffer, err := proto.Marshal(m) if err != nil { return 0, err diff --git a/vendor/github.com/prometheus/client_golang/prometheus/collectors/expvar_collector.go b/vendor/github.com/prometheus/client_golang/prometheus/collectors/expvar_collector.go index 3aa8d0590b..b22d862fbc 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/collectors/expvar_collector.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/collectors/expvar_collector.go @@ -22,7 +22,7 @@ import "github.com/prometheus/client_golang/prometheus" // Prometheus metrics. Note that the data models of expvar and Prometheus are // fundamentally different, and that the expvar Collector is inherently slower // than native Prometheus metrics. Thus, the expvar Collector is probably great -// for experiments and prototying, but you should seriously consider a more +// for experiments and prototyping, but you should seriously consider a more // direct implementation of Prometheus metrics for monitoring production // systems. // diff --git a/vendor/github.com/prometheus/client_golang/prometheus/collectors/go_collector_latest.go b/vendor/github.com/prometheus/client_golang/prometheus/collectors/go_collector_latest.go index 2f5616894e..bcfa4fa10e 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/collectors/go_collector_latest.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/collectors/go_collector_latest.go @@ -132,16 +132,19 @@ type GoCollectionOption uint32 const ( // GoRuntimeMemStatsCollection represents the metrics represented by runtime.MemStats structure. - // Deprecated. Use WithGoCollectorMemStatsMetricsDisabled() function to disable those metrics in the collector. + // + // Deprecated: Use WithGoCollectorMemStatsMetricsDisabled() function to disable those metrics in the collector. GoRuntimeMemStatsCollection GoCollectionOption = 1 << iota // GoRuntimeMetricsCollection is the new set of metrics represented by runtime/metrics package. - // Deprecated. Use WithGoCollectorRuntimeMetrics(GoRuntimeMetricsRule{Matcher: regexp.MustCompile("/.*")}) + // + // Deprecated: Use WithGoCollectorRuntimeMetrics(GoRuntimeMetricsRule{Matcher: regexp.MustCompile("/.*")}) // function to enable those metrics in the collector. GoRuntimeMetricsCollection ) // WithGoCollections allows enabling different collections for Go collector on top of base metrics. -// Deprecated. Use WithGoCollectorRuntimeMetrics() and WithGoCollectorMemStatsMetricsDisabled() instead to control metrics. +// +// Deprecated: Use WithGoCollectorRuntimeMetrics() and WithGoCollectorMemStatsMetricsDisabled() instead to control metrics. func WithGoCollections(flags GoCollectionOption) func(options *internal.GoCollectorOptions) { return func(options *internal.GoCollectorOptions) { if flags&GoRuntimeMemStatsCollection == 0 { diff --git a/vendor/github.com/prometheus/client_golang/prometheus/counter.go b/vendor/github.com/prometheus/client_golang/prometheus/counter.go index 62de4dc59a..4ce84e7a80 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/counter.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/counter.go @@ -20,6 +20,7 @@ import ( "time" dto "github.com/prometheus/client_model/go" + "google.golang.org/protobuf/types/known/timestamppb" ) // Counter is a Metric that represents a single numerical value that only ever @@ -66,7 +67,7 @@ type CounterVecOpts struct { CounterOpts // VariableLabels are used to partition the metric vector by the given set - // of labels. Each label value will be constrained with the optional Contraint + // of labels. Each label value will be constrained with the optional Constraint // function, if provided. VariableLabels ConstrainableLabels } @@ -90,8 +91,12 @@ func NewCounter(opts CounterOpts) Counter { nil, opts.ConstLabels, ) - result := &counter{desc: desc, labelPairs: desc.constLabelPairs, now: time.Now} + if opts.now == nil { + opts.now = time.Now + } + result := &counter{desc: desc, labelPairs: desc.constLabelPairs, now: opts.now} result.init(result) // Init self-collection. + result.createdTs = timestamppb.New(opts.now()) return result } @@ -106,10 +111,12 @@ type counter struct { selfCollector desc *Desc + createdTs *timestamppb.Timestamp labelPairs []*dto.LabelPair exemplar atomic.Value // Containing nil or a *dto.Exemplar. - now func() time.Time // To mock out time.Now() for testing. + // now is for testing purposes, by default it's time.Now. + now func() time.Time } func (c *counter) Desc() *Desc { @@ -159,8 +166,7 @@ func (c *counter) Write(out *dto.Metric) error { exemplar = e.(*dto.Exemplar) } val := c.get() - - return populateMetric(CounterValue, val, c.labelPairs, exemplar, out) + return populateMetric(CounterValue, val, c.labelPairs, exemplar, out, c.createdTs) } func (c *counter) updateExemplar(v float64, l Labels) { @@ -200,13 +206,17 @@ func (v2) NewCounterVec(opts CounterVecOpts) *CounterVec { opts.VariableLabels, opts.ConstLabels, ) + if opts.now == nil { + opts.now = time.Now + } return &CounterVec{ MetricVec: NewMetricVec(desc, func(lvs ...string) Metric { - if len(lvs) != len(desc.variableLabels) { - panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels.labelNames(), lvs)) + if len(lvs) != len(desc.variableLabels.names) { + panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels.names, lvs)) } - result := &counter{desc: desc, labelPairs: MakeLabelPairs(desc, lvs), now: time.Now} + result := &counter{desc: desc, labelPairs: MakeLabelPairs(desc, lvs), now: opts.now} result.init(result) // Init self-collection. + result.createdTs = timestamppb.New(opts.now()) return result }), } diff --git a/vendor/github.com/prometheus/client_golang/prometheus/desc.go b/vendor/github.com/prometheus/client_golang/prometheus/desc.go index deedc2dfbe..68ffe3c248 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/desc.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/desc.go @@ -52,7 +52,7 @@ type Desc struct { constLabelPairs []*dto.LabelPair // variableLabels contains names of labels and normalization function for // which the metric maintains variable values. - variableLabels ConstrainedLabels + variableLabels *compiledLabels // id is a hash of the values of the ConstLabels and fqName. This // must be unique among all registered descriptors and can therefore be // used as an identifier of the descriptor. @@ -93,7 +93,7 @@ func (v2) NewDesc(fqName, help string, variableLabels ConstrainableLabels, const d := &Desc{ fqName: fqName, help: help, - variableLabels: variableLabels.constrainedLabels(), + variableLabels: variableLabels.compile(), } if !model.IsValidMetricName(model.LabelValue(fqName)) { d.err = fmt.Errorf("%q is not a valid metric name", fqName) @@ -103,7 +103,7 @@ func (v2) NewDesc(fqName, help string, variableLabels ConstrainableLabels, const // their sorted label names) plus the fqName (at position 0). labelValues := make([]string, 1, len(constLabels)+1) labelValues[0] = fqName - labelNames := make([]string, 0, len(constLabels)+len(d.variableLabels)) + labelNames := make([]string, 0, len(constLabels)+len(d.variableLabels.names)) labelNameSet := map[string]struct{}{} // First add only the const label names and sort them... for labelName := range constLabels { @@ -128,13 +128,13 @@ func (v2) NewDesc(fqName, help string, variableLabels ConstrainableLabels, const // Now add the variable label names, but prefix them with something that // cannot be in a regular label name. That prevents matching the label // dimension with a different mix between preset and variable labels. - for _, label := range d.variableLabels { - if !checkLabelName(label.Name) { - d.err = fmt.Errorf("%q is not a valid label name for metric %q", label.Name, fqName) + for _, label := range d.variableLabels.names { + if !checkLabelName(label) { + d.err = fmt.Errorf("%q is not a valid label name for metric %q", label, fqName) return d } - labelNames = append(labelNames, "$"+label.Name) - labelNameSet[label.Name] = struct{}{} + labelNames = append(labelNames, "$"+label) + labelNameSet[label] = struct{}{} } if len(labelNames) != len(labelNameSet) { d.err = fmt.Errorf("duplicate label names in constant and variable labels for metric %q", fqName) @@ -189,11 +189,19 @@ func (d *Desc) String() string { fmt.Sprintf("%s=%q", lp.GetName(), lp.GetValue()), ) } + vlStrings := make([]string, 0, len(d.variableLabels.names)) + for _, vl := range d.variableLabels.names { + if fn, ok := d.variableLabels.labelConstraints[vl]; ok && fn != nil { + vlStrings = append(vlStrings, fmt.Sprintf("c(%s)", vl)) + } else { + vlStrings = append(vlStrings, vl) + } + } return fmt.Sprintf( - "Desc{fqName: %q, help: %q, constLabels: {%s}, variableLabels: %v}", + "Desc{fqName: %q, help: %q, constLabels: {%s}, variableLabels: {%s}}", d.fqName, d.help, strings.Join(lpStrings, ","), - d.variableLabels, + strings.Join(vlStrings, ","), ) } diff --git a/vendor/github.com/prometheus/client_golang/prometheus/expvar_collector.go b/vendor/github.com/prometheus/client_golang/prometheus/expvar_collector.go index c41ab37f3b..de5a856293 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/expvar_collector.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/expvar_collector.go @@ -48,7 +48,7 @@ func (e *expvarCollector) Collect(ch chan<- Metric) { continue } var v interface{} - labels := make([]string, len(desc.variableLabels)) + labels := make([]string, len(desc.variableLabels.names)) if err := json.Unmarshal([]byte(expVar.String()), &v); err != nil { ch <- NewInvalidMetric(desc, err) continue diff --git a/vendor/github.com/prometheus/client_golang/prometheus/gauge.go b/vendor/github.com/prometheus/client_golang/prometheus/gauge.go index f1ea6c76f7..dd2eac9406 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/gauge.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/gauge.go @@ -62,7 +62,7 @@ type GaugeVecOpts struct { GaugeOpts // VariableLabels are used to partition the metric vector by the given set - // of labels. Each label value will be constrained with the optional Contraint + // of labels. Each label value will be constrained with the optional Constraint // function, if provided. VariableLabels ConstrainableLabels } @@ -135,7 +135,7 @@ func (g *gauge) Sub(val float64) { func (g *gauge) Write(out *dto.Metric) error { val := math.Float64frombits(atomic.LoadUint64(&g.valBits)) - return populateMetric(GaugeValue, val, g.labelPairs, nil, out) + return populateMetric(GaugeValue, val, g.labelPairs, nil, out, nil) } // GaugeVec is a Collector that bundles a set of Gauges that all share the same @@ -166,8 +166,8 @@ func (v2) NewGaugeVec(opts GaugeVecOpts) *GaugeVec { ) return &GaugeVec{ MetricVec: NewMetricVec(desc, func(lvs ...string) Metric { - if len(lvs) != len(desc.variableLabels) { - panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels.labelNames(), lvs)) + if len(lvs) != len(desc.variableLabels.names) { + panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels.names, lvs)) } result := &gauge{desc: desc, labelPairs: MakeLabelPairs(desc, lvs)} result.init(result) // Init self-collection. diff --git a/vendor/github.com/prometheus/client_golang/prometheus/histogram.go b/vendor/github.com/prometheus/client_golang/prometheus/histogram.go index 8d818afe90..b5c8bcb395 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/histogram.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/histogram.go @@ -25,6 +25,7 @@ import ( dto "github.com/prometheus/client_model/go" "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/types/known/timestamppb" ) // nativeHistogramBounds for the frac of observed values. Only relevant for @@ -391,7 +392,7 @@ type HistogramOpts struct { // zero, it is replaced by default buckets. The default buckets are // DefBuckets if no buckets for a native histogram (see below) are used, // otherwise the default is no buckets. (In other words, if you want to - // use both reguler buckets and buckets for a native histogram, you have + // use both regular buckets and buckets for a native histogram, you have // to define the regular buckets here explicitly.) Buckets []float64 @@ -413,8 +414,8 @@ type HistogramOpts struct { // and 2, same as between 2 and 4, and 4 and 8, etc.). // // Details about the actually used factor: The factor is calculated as - // 2^(2^n), where n is an integer number between (and including) -8 and - // 4. n is chosen so that the resulting factor is the largest that is + // 2^(2^-n), where n is an integer number between (and including) -4 and + // 8. n is chosen so that the resulting factor is the largest that is // still smaller or equal to NativeHistogramBucketFactor. Note that the // smallest possible factor is therefore approx. 1.00271 (i.e. 2^(2^-8) // ). If NativeHistogramBucketFactor is greater than 1 but smaller than @@ -428,12 +429,12 @@ type HistogramOpts struct { // a major version bump. NativeHistogramBucketFactor float64 // All observations with an absolute value of less or equal - // NativeHistogramZeroThreshold are accumulated into a “zero” - // bucket. For best results, this should be close to a bucket - // boundary. This is usually the case if picking a power of two. If + // NativeHistogramZeroThreshold are accumulated into a “zero” bucket. + // For best results, this should be close to a bucket boundary. This is + // usually the case if picking a power of two. If // NativeHistogramZeroThreshold is left at zero, - // DefNativeHistogramZeroThreshold is used as the threshold. To configure - // a zero bucket with an actual threshold of zero (i.e. only + // DefNativeHistogramZeroThreshold is used as the threshold. To + // configure a zero bucket with an actual threshold of zero (i.e. only // observations of precisely zero will go into the zero bucket), set // NativeHistogramZeroThreshold to the NativeHistogramZeroThresholdZero // constant (or any negative float value). @@ -446,26 +447,37 @@ type HistogramOpts struct { // Histogram are sufficiently wide-spread. In particular, this could be // used as a DoS attack vector. Where the observed values depend on // external inputs, it is highly recommended to set a - // NativeHistogramMaxBucketNumber.) Once the set + // NativeHistogramMaxBucketNumber.) Once the set // NativeHistogramMaxBucketNumber is exceeded, the following strategy is - // enacted: First, if the last reset (or the creation) of the histogram - // is at least NativeHistogramMinResetDuration ago, then the whole - // histogram is reset to its initial state (including regular - // buckets). If less time has passed, or if - // NativeHistogramMinResetDuration is zero, no reset is - // performed. Instead, the zero threshold is increased sufficiently to - // reduce the number of buckets to or below - // NativeHistogramMaxBucketNumber, but not to more than - // NativeHistogramMaxZeroThreshold. Thus, if - // NativeHistogramMaxZeroThreshold is already at or below the current - // zero threshold, nothing happens at this step. After that, if the - // number of buckets still exceeds NativeHistogramMaxBucketNumber, the - // resolution of the histogram is reduced by doubling the width of the - // sparse buckets (up to a growth factor between one bucket to the next - // of 2^(2^4) = 65536, see above). + // enacted: + // - First, if the last reset (or the creation) of the histogram is at + // least NativeHistogramMinResetDuration ago, then the whole + // histogram is reset to its initial state (including regular + // buckets). + // - If less time has passed, or if NativeHistogramMinResetDuration is + // zero, no reset is performed. Instead, the zero threshold is + // increased sufficiently to reduce the number of buckets to or below + // NativeHistogramMaxBucketNumber, but not to more than + // NativeHistogramMaxZeroThreshold. Thus, if + // NativeHistogramMaxZeroThreshold is already at or below the current + // zero threshold, nothing happens at this step. + // - After that, if the number of buckets still exceeds + // NativeHistogramMaxBucketNumber, the resolution of the histogram is + // reduced by doubling the width of the sparse buckets (up to a + // growth factor between one bucket to the next of 2^(2^4) = 65536, + // see above). + // - Any increased zero threshold or reduced resolution is reset back + // to their original values once NativeHistogramMinResetDuration has + // passed (since the last reset or the creation of the histogram). NativeHistogramMaxBucketNumber uint32 NativeHistogramMinResetDuration time.Duration NativeHistogramMaxZeroThreshold float64 + + // now is for testing purposes, by default it's time.Now. + now func() time.Time + + // afterFunc is for testing purposes, by default it's time.AfterFunc. + afterFunc func(time.Duration, func()) *time.Timer } // HistogramVecOpts bundles the options to create a HistogramVec metric. @@ -475,7 +487,7 @@ type HistogramVecOpts struct { HistogramOpts // VariableLabels are used to partition the metric vector by the given set - // of labels. Each label value will be constrained with the optional Contraint + // of labels. Each label value will be constrained with the optional Constraint // function, if provided. VariableLabels ConstrainableLabels } @@ -499,12 +511,12 @@ func NewHistogram(opts HistogramOpts) Histogram { } func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogram { - if len(desc.variableLabels) != len(labelValues) { - panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels.labelNames(), labelValues)) + if len(desc.variableLabels.names) != len(labelValues) { + panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels.names, labelValues)) } - for _, n := range desc.variableLabels { - if n.Name == bucketLabel { + for _, n := range desc.variableLabels.names { + if n == bucketLabel { panic(errBucketLabelNotAllowed) } } @@ -514,6 +526,12 @@ func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogr } } + if opts.now == nil { + opts.now = time.Now + } + if opts.afterFunc == nil { + opts.afterFunc = time.AfterFunc + } h := &histogram{ desc: desc, upperBounds: opts.Buckets, @@ -521,8 +539,9 @@ func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogr nativeHistogramMaxBuckets: opts.NativeHistogramMaxBucketNumber, nativeHistogramMaxZeroThreshold: opts.NativeHistogramMaxZeroThreshold, nativeHistogramMinResetDuration: opts.NativeHistogramMinResetDuration, - lastResetTime: time.Now(), - now: time.Now, + lastResetTime: opts.now(), + now: opts.now, + afterFunc: opts.afterFunc, } if len(h.upperBounds) == 0 && opts.NativeHistogramBucketFactor <= 1 { h.upperBounds = DefBuckets @@ -701,9 +720,18 @@ type histogram struct { nativeHistogramMaxZeroThreshold float64 nativeHistogramMaxBuckets uint32 nativeHistogramMinResetDuration time.Duration - lastResetTime time.Time // Protected by mtx. - - now func() time.Time // To mock out time.Now() for testing. + // lastResetTime is protected by mtx. It is also used as created timestamp. + lastResetTime time.Time + // resetScheduled is protected by mtx. It is true if a reset is + // scheduled for a later time (when nativeHistogramMinResetDuration has + // passed). + resetScheduled bool + + // now is for testing purposes, by default it's time.Now. + now func() time.Time + + // afterFunc is for testing purposes, by default it's time.AfterFunc. + afterFunc func(time.Duration, func()) *time.Timer } func (h *histogram) Desc() *Desc { @@ -742,9 +770,10 @@ func (h *histogram) Write(out *dto.Metric) error { waitForCooldown(count, coldCounts) his := &dto.Histogram{ - Bucket: make([]*dto.Bucket, len(h.upperBounds)), - SampleCount: proto.Uint64(count), - SampleSum: proto.Float64(math.Float64frombits(atomic.LoadUint64(&coldCounts.sumBits))), + Bucket: make([]*dto.Bucket, len(h.upperBounds)), + SampleCount: proto.Uint64(count), + SampleSum: proto.Float64(math.Float64frombits(atomic.LoadUint64(&coldCounts.sumBits))), + CreatedTimestamp: timestamppb.New(h.lastResetTime), } out.Histogram = his out.Label = h.labelPairs @@ -782,6 +811,16 @@ func (h *histogram) Write(out *dto.Metric) error { his.ZeroCount = proto.Uint64(zeroBucket) his.NegativeSpan, his.NegativeDelta = makeBuckets(&coldCounts.nativeHistogramBucketsNegative) his.PositiveSpan, his.PositiveDelta = makeBuckets(&coldCounts.nativeHistogramBucketsPositive) + + // Add a no-op span to a histogram without observations and with + // a zero threshold of zero. Otherwise, a native histogram would + // look like a classic histogram to scrapers. + if *his.ZeroThreshold == 0 && *his.ZeroCount == 0 && len(his.PositiveSpan) == 0 && len(his.NegativeSpan) == 0 { + his.PositiveSpan = []*dto.BucketSpan{{ + Offset: proto.Int32(0), + Length: proto.Uint32(0), + }} + } } addAndResetCounts(hotCounts, coldCounts) return nil @@ -848,26 +887,39 @@ func (h *histogram) limitBuckets(counts *histogramCounts, value float64, bucket if h.maybeReset(hotCounts, coldCounts, coldIdx, value, bucket) { return } + // One of the other strategies will happen. To undo what they will do as + // soon as enough time has passed to satisfy + // h.nativeHistogramMinResetDuration, schedule a reset at the right time + // if we haven't done so already. + if h.nativeHistogramMinResetDuration > 0 && !h.resetScheduled { + h.resetScheduled = true + h.afterFunc(h.nativeHistogramMinResetDuration-h.now().Sub(h.lastResetTime), h.reset) + } + if h.maybeWidenZeroBucket(hotCounts, coldCounts) { return } h.doubleBucketWidth(hotCounts, coldCounts) } -// maybeReset resests the whole histogram if at least h.nativeHistogramMinResetDuration -// has been passed. It returns true if the histogram has been reset. The caller -// must have locked h.mtx. -func (h *histogram) maybeReset(hot, cold *histogramCounts, coldIdx uint64, value float64, bucket int) bool { +// maybeReset resets the whole histogram if at least +// h.nativeHistogramMinResetDuration has been passed. It returns true if the +// histogram has been reset. The caller must have locked h.mtx. +func (h *histogram) maybeReset( + hot, cold *histogramCounts, coldIdx uint64, value float64, bucket int, +) bool { // We are using the possibly mocked h.now() rather than // time.Since(h.lastResetTime) to enable testing. - if h.nativeHistogramMinResetDuration == 0 || h.now().Sub(h.lastResetTime) < h.nativeHistogramMinResetDuration { + if h.nativeHistogramMinResetDuration == 0 || // No reset configured. + h.resetScheduled || // Do not interefere if a reset is already scheduled. + h.now().Sub(h.lastResetTime) < h.nativeHistogramMinResetDuration { return false } // Completely reset coldCounts. h.resetCounts(cold) // Repeat the latest observation to not lose it completely. cold.observe(value, bucket, true) - // Make coldCounts the new hot counts while ressetting countAndHotIdx. + // Make coldCounts the new hot counts while resetting countAndHotIdx. n := atomic.SwapUint64(&h.countAndHotIdx, (coldIdx<<63)+1) count := n & ((1 << 63) - 1) waitForCooldown(count, hot) @@ -877,6 +929,29 @@ func (h *histogram) maybeReset(hot, cold *histogramCounts, coldIdx uint64, value return true } +// reset resets the whole histogram. It locks h.mtx itself, i.e. it has to be +// called without having locked h.mtx. +func (h *histogram) reset() { + h.mtx.Lock() + defer h.mtx.Unlock() + + n := atomic.LoadUint64(&h.countAndHotIdx) + hotIdx := n >> 63 + coldIdx := (^n) >> 63 + hot := h.counts[hotIdx] + cold := h.counts[coldIdx] + // Completely reset coldCounts. + h.resetCounts(cold) + // Make coldCounts the new hot counts while resetting countAndHotIdx. + n = atomic.SwapUint64(&h.countAndHotIdx, coldIdx<<63) + count := n & ((1 << 63) - 1) + waitForCooldown(count, hot) + // Finally, reset the formerly hot counts, too. + h.resetCounts(hot) + h.lastResetTime = h.now() + h.resetScheduled = false +} + // maybeWidenZeroBucket widens the zero bucket until it includes the existing // buckets closest to the zero bucket (which could be two, if an equidistant // negative and a positive bucket exists, but usually it's only one bucket to be @@ -1176,6 +1251,7 @@ type constHistogram struct { sum float64 buckets map[float64]uint64 labelPairs []*dto.LabelPair + createdTs *timestamppb.Timestamp } func (h *constHistogram) Desc() *Desc { @@ -1183,7 +1259,9 @@ func (h *constHistogram) Desc() *Desc { } func (h *constHistogram) Write(out *dto.Metric) error { - his := &dto.Histogram{} + his := &dto.Histogram{ + CreatedTimestamp: h.createdTs, + } buckets := make([]*dto.Bucket, 0, len(h.buckets)) @@ -1230,7 +1308,7 @@ func NewConstHistogram( if desc.err != nil { return nil, desc.err } - if err := validateLabelValues(labelValues, len(desc.variableLabels)); err != nil { + if err := validateLabelValues(labelValues, len(desc.variableLabels.names)); err != nil { return nil, err } return &constHistogram{ @@ -1324,7 +1402,7 @@ func makeBuckets(buckets *sync.Map) ([]*dto.BucketSpan, []int64) { // Multiple spans with only small gaps in between are probably // encoded more efficiently as one larger span with a few empty // buckets. Needs some research to find the sweet spot. For now, - // we assume that gaps of one ore two buckets should not create + // we assume that gaps of one or two buckets should not create // a new span. iDelta := int32(i - nextI) if n == 0 || iDelta > 2 { diff --git a/vendor/github.com/prometheus/client_golang/prometheus/internal/difflib.go b/vendor/github.com/prometheus/client_golang/prometheus/internal/difflib.go index fd0750f2cf..a595a20362 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/internal/difflib.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/internal/difflib.go @@ -14,7 +14,7 @@ // It provides tools to compare sequences of strings and generate textual diffs. // // Maintaining `GetUnifiedDiffString` here because original repository -// (https://github.com/pmezard/go-difflib) is no loger maintained. +// (https://github.com/pmezard/go-difflib) is no longer maintained. package internal import ( diff --git a/vendor/github.com/prometheus/client_golang/prometheus/labels.go b/vendor/github.com/prometheus/client_golang/prometheus/labels.go index 63ff8683ce..c21911f292 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/labels.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/labels.go @@ -32,19 +32,15 @@ import ( // create a Desc. type Labels map[string]string +// LabelConstraint normalizes label values. +type LabelConstraint func(string) string + // ConstrainedLabels represents a label name and its constrain function // to normalize label values. This type is commonly used when constructing // metric vector Collectors. type ConstrainedLabel struct { Name string - Constraint func(string) string -} - -func (cl ConstrainedLabel) Constrain(v string) string { - if cl.Constraint == nil { - return v - } - return cl.Constraint(v) + Constraint LabelConstraint } // ConstrainableLabels is an interface that allows creating of labels that can @@ -58,7 +54,7 @@ func (cl ConstrainedLabel) Constrain(v string) string { // }, // }) type ConstrainableLabels interface { - constrainedLabels() ConstrainedLabels + compile() *compiledLabels labelNames() []string } @@ -67,8 +63,20 @@ type ConstrainableLabels interface { // metric vector Collectors. type ConstrainedLabels []ConstrainedLabel -func (cls ConstrainedLabels) constrainedLabels() ConstrainedLabels { - return cls +func (cls ConstrainedLabels) compile() *compiledLabels { + compiled := &compiledLabels{ + names: make([]string, len(cls)), + labelConstraints: map[string]LabelConstraint{}, + } + + for i, label := range cls { + compiled.names[i] = label.Name + if label.Constraint != nil { + compiled.labelConstraints[label.Name] = label.Constraint + } + } + + return compiled } func (cls ConstrainedLabels) labelNames() []string { @@ -92,18 +100,36 @@ func (cls ConstrainedLabels) labelNames() []string { // } type UnconstrainedLabels []string -func (uls UnconstrainedLabels) constrainedLabels() ConstrainedLabels { - constrainedLabels := make([]ConstrainedLabel, len(uls)) - for i, l := range uls { - constrainedLabels[i] = ConstrainedLabel{Name: l} +func (uls UnconstrainedLabels) compile() *compiledLabels { + return &compiledLabels{ + names: uls, } - return constrainedLabels } func (uls UnconstrainedLabels) labelNames() []string { return uls } +type compiledLabels struct { + names []string + labelConstraints map[string]LabelConstraint +} + +func (cls *compiledLabels) compile() *compiledLabels { + return cls +} + +func (cls *compiledLabels) labelNames() []string { + return cls.names +} + +func (cls *compiledLabels) constrain(labelName, value string) string { + if fn, ok := cls.labelConstraints[labelName]; ok && fn != nil { + return fn(value) + } + return value +} + // reservedLabelPrefix is a prefix which is not legal in user-supplied // label names. const reservedLabelPrefix = "__" @@ -139,6 +165,8 @@ func validateValuesInLabels(labels Labels, expectedNumberOfValues int) error { func validateLabelValues(vals []string, expectedNumberOfValues int) error { if len(vals) != expectedNumberOfValues { + // The call below makes vals escape, copy them to avoid that. + vals := append([]string(nil), vals...) return fmt.Errorf( "%w: expected %d label values but got %d in %#v", errInconsistentCardinality, expectedNumberOfValues, diff --git a/vendor/github.com/prometheus/client_golang/prometheus/metric.go b/vendor/github.com/prometheus/client_golang/prometheus/metric.go index 07bbc9d768..f018e57237 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/metric.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/metric.go @@ -92,6 +92,9 @@ type Opts struct { // machine_role metric). See also // https://prometheus.io/docs/instrumenting/writing_exporters/#target-labels-not-static-scraped-labels ConstLabels Labels + + // now is for testing purposes, by default it's time.Now. + now func() time.Time } // BuildFQName joins the given three name components by "_". Empty name diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_other.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_other.go index c0152cdb61..8c1136ceea 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_other.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_other.go @@ -11,8 +11,8 @@ // See the License for the specific language governing permissions and // limitations under the License. -//go:build !windows && !js -// +build !windows,!js +//go:build !windows && !js && !wasip1 +// +build !windows,!js,!wasip1 package prometheus diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_wasip1.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_wasip1.go new file mode 100644 index 0000000000..d8d9a6d7a2 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_wasip1.go @@ -0,0 +1,26 @@ +// Copyright 2023 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build wasip1 +// +build wasip1 + +package prometheus + +func canCollectProcess() bool { + return false +} + +func (*processCollector) processCollect(chan<- Metric) { + // noop on this platform + return +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promauto/auto.go b/vendor/github.com/prometheus/client_golang/prometheus/promauto/auto.go index fa90115921..58f96599f1 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/promauto/auto.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/promauto/auto.go @@ -17,7 +17,7 @@ // constructors register the Collectors with a registry before returning them. // There are two sets of constructors. The constructors in the first set are // top-level functions, while the constructors in the other set are methods of -// the Factory type. The top-level function return Collectors registered with +// the Factory type. The top-level functions return Collectors registered with // the global registry (prometheus.DefaultRegisterer), while the methods return // Collectors registered with the registry the Factory was constructed with. All // constructors panic if the registration fails. @@ -85,7 +85,7 @@ // } // // A Factory is created with the With(prometheus.Registerer) function, which -// enables two usage pattern. With(prometheus.Registerer) can be called once per +// enables two usage patterns. With(prometheus.Registerer) can be called once per // line: // // var ( @@ -153,7 +153,7 @@ // importing a package. // // A separate package allows conservative users to entirely ignore it. And -// whoever wants to use it, will do so explicitly, with an opportunity to read +// whoever wants to use it will do so explicitly, with an opportunity to read // this warning. // // Enjoy promauto responsibly! diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go index 3793036ad0..356edb7868 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go @@ -389,15 +389,12 @@ func isLabelCurried(c prometheus.Collector, label string) bool { return true } -// emptyLabels is a one-time allocation for non-partitioned metrics to avoid -// unnecessary allocations on each request. -var emptyLabels = prometheus.Labels{} - func labels(code, method bool, reqMethod string, status int, extraMethods ...string) prometheus.Labels { + labels := prometheus.Labels{} + if !(code || method) { - return emptyLabels + return labels } - labels := prometheus.Labels{} if code { labels["code"] = sanitizeCode(status) diff --git a/vendor/github.com/prometheus/client_golang/prometheus/registry.go b/vendor/github.com/prometheus/client_golang/prometheus/registry.go index 44da9433be..5e2ced25a0 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/registry.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/registry.go @@ -548,7 +548,7 @@ func (r *Registry) Gather() ([]*dto.MetricFamily, error) { goroutineBudget-- runtime.Gosched() } - // Once both checkedMetricChan and uncheckdMetricChan are closed + // Once both checkedMetricChan and uncheckedMetricChan are closed // and drained, the contraption above will nil out cmc and umc, // and then we can leave the collect loop here. if cmc == nil && umc == nil { @@ -963,9 +963,9 @@ func checkDescConsistency( // Is the desc consistent with the content of the metric? lpsFromDesc := make([]*dto.LabelPair, len(desc.constLabelPairs), len(dtoMetric.Label)) copy(lpsFromDesc, desc.constLabelPairs) - for _, l := range desc.variableLabels { + for _, l := range desc.variableLabels.names { lpsFromDesc = append(lpsFromDesc, &dto.LabelPair{ - Name: proto.String(l.Name), + Name: proto.String(l), }) } if len(lpsFromDesc) != len(dtoMetric.Label) { diff --git a/vendor/github.com/prometheus/client_golang/prometheus/summary.go b/vendor/github.com/prometheus/client_golang/prometheus/summary.go index dd359264e5..1462704446 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/summary.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/summary.go @@ -26,6 +26,7 @@ import ( "github.com/beorn7/perks/quantile" "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/types/known/timestamppb" ) // quantileLabel is used for the label that defines the quantile in a @@ -145,6 +146,9 @@ type SummaryOpts struct { // is the internal buffer size of the underlying package // "github.com/bmizerany/perks/quantile"). BufCap uint32 + + // now is for testing purposes, by default it's time.Now. + now func() time.Time } // SummaryVecOpts bundles the options to create a SummaryVec metric. @@ -154,7 +158,7 @@ type SummaryVecOpts struct { SummaryOpts // VariableLabels are used to partition the metric vector by the given set - // of labels. Each label value will be constrained with the optional Contraint + // of labels. Each label value will be constrained with the optional Constraint // function, if provided. VariableLabels ConstrainableLabels } @@ -188,12 +192,12 @@ func NewSummary(opts SummaryOpts) Summary { } func newSummary(desc *Desc, opts SummaryOpts, labelValues ...string) Summary { - if len(desc.variableLabels) != len(labelValues) { - panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels.labelNames(), labelValues)) + if len(desc.variableLabels.names) != len(labelValues) { + panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels.names, labelValues)) } - for _, n := range desc.variableLabels { - if n.Name == quantileLabel { + for _, n := range desc.variableLabels.names { + if n == quantileLabel { panic(errQuantileLabelNotAllowed) } } @@ -222,6 +226,9 @@ func newSummary(desc *Desc, opts SummaryOpts, labelValues ...string) Summary { opts.BufCap = DefBufCap } + if opts.now == nil { + opts.now = time.Now + } if len(opts.Objectives) == 0 { // Use the lock-free implementation of a Summary without objectives. s := &noObjectivesSummary{ @@ -230,6 +237,7 @@ func newSummary(desc *Desc, opts SummaryOpts, labelValues ...string) Summary { counts: [2]*summaryCounts{{}, {}}, } s.init(s) // Init self-collection. + s.createdTs = timestamppb.New(opts.now()) return s } @@ -245,7 +253,7 @@ func newSummary(desc *Desc, opts SummaryOpts, labelValues ...string) Summary { coldBuf: make([]float64, 0, opts.BufCap), streamDuration: opts.MaxAge / time.Duration(opts.AgeBuckets), } - s.headStreamExpTime = time.Now().Add(s.streamDuration) + s.headStreamExpTime = opts.now().Add(s.streamDuration) s.hotBufExpTime = s.headStreamExpTime for i := uint32(0); i < opts.AgeBuckets; i++ { @@ -259,6 +267,7 @@ func newSummary(desc *Desc, opts SummaryOpts, labelValues ...string) Summary { sort.Float64s(s.sortedObjectives) s.init(s) // Init self-collection. + s.createdTs = timestamppb.New(opts.now()) return s } @@ -286,6 +295,8 @@ type summary struct { headStream *quantile.Stream headStreamIdx int headStreamExpTime, hotBufExpTime time.Time + + createdTs *timestamppb.Timestamp } func (s *summary) Desc() *Desc { @@ -307,7 +318,9 @@ func (s *summary) Observe(v float64) { } func (s *summary) Write(out *dto.Metric) error { - sum := &dto.Summary{} + sum := &dto.Summary{ + CreatedTimestamp: s.createdTs, + } qs := make([]*dto.Quantile, 0, len(s.objectives)) s.bufMtx.Lock() @@ -440,6 +453,8 @@ type noObjectivesSummary struct { counts [2]*summaryCounts labelPairs []*dto.LabelPair + + createdTs *timestamppb.Timestamp } func (s *noObjectivesSummary) Desc() *Desc { @@ -490,8 +505,9 @@ func (s *noObjectivesSummary) Write(out *dto.Metric) error { } sum := &dto.Summary{ - SampleCount: proto.Uint64(count), - SampleSum: proto.Float64(math.Float64frombits(atomic.LoadUint64(&coldCounts.sumBits))), + SampleCount: proto.Uint64(count), + SampleSum: proto.Float64(math.Float64frombits(atomic.LoadUint64(&coldCounts.sumBits))), + CreatedTimestamp: s.createdTs, } out.Summary = sum @@ -681,6 +697,7 @@ type constSummary struct { sum float64 quantiles map[float64]float64 labelPairs []*dto.LabelPair + createdTs *timestamppb.Timestamp } func (s *constSummary) Desc() *Desc { @@ -688,7 +705,9 @@ func (s *constSummary) Desc() *Desc { } func (s *constSummary) Write(out *dto.Metric) error { - sum := &dto.Summary{} + sum := &dto.Summary{ + CreatedTimestamp: s.createdTs, + } qs := make([]*dto.Quantile, 0, len(s.quantiles)) sum.SampleCount = proto.Uint64(s.count) @@ -737,7 +756,7 @@ func NewConstSummary( if desc.err != nil { return nil, desc.err } - if err := validateLabelValues(labelValues, len(desc.variableLabels)); err != nil { + if err := validateLabelValues(labelValues, len(desc.variableLabels.names)); err != nil { return nil, err } return &constSummary{ diff --git a/vendor/github.com/prometheus/client_golang/prometheus/value.go b/vendor/github.com/prometheus/client_golang/prometheus/value.go index 5f6bb80014..cc23011fad 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/value.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/value.go @@ -14,6 +14,7 @@ package prometheus import ( + "errors" "fmt" "sort" "time" @@ -91,7 +92,7 @@ func (v *valueFunc) Desc() *Desc { } func (v *valueFunc) Write(out *dto.Metric) error { - return populateMetric(v.valType, v.function(), v.labelPairs, nil, out) + return populateMetric(v.valType, v.function(), v.labelPairs, nil, out, nil) } // NewConstMetric returns a metric with one fixed value that cannot be @@ -105,12 +106,12 @@ func NewConstMetric(desc *Desc, valueType ValueType, value float64, labelValues if desc.err != nil { return nil, desc.err } - if err := validateLabelValues(labelValues, len(desc.variableLabels)); err != nil { + if err := validateLabelValues(labelValues, len(desc.variableLabels.names)); err != nil { return nil, err } metric := &dto.Metric{} - if err := populateMetric(valueType, value, MakeLabelPairs(desc, labelValues), nil, metric); err != nil { + if err := populateMetric(valueType, value, MakeLabelPairs(desc, labelValues), nil, metric, nil); err != nil { return nil, err } @@ -130,6 +131,43 @@ func MustNewConstMetric(desc *Desc, valueType ValueType, value float64, labelVal return m } +// NewConstMetricWithCreatedTimestamp does the same thing as NewConstMetric, but generates Counters +// with created timestamp set and returns an error for other metric types. +func NewConstMetricWithCreatedTimestamp(desc *Desc, valueType ValueType, value float64, ct time.Time, labelValues ...string) (Metric, error) { + if desc.err != nil { + return nil, desc.err + } + if err := validateLabelValues(labelValues, len(desc.variableLabels.names)); err != nil { + return nil, err + } + switch valueType { + case CounterValue: + break + default: + return nil, errors.New("created timestamps are only supported for counters") + } + + metric := &dto.Metric{} + if err := populateMetric(valueType, value, MakeLabelPairs(desc, labelValues), nil, metric, timestamppb.New(ct)); err != nil { + return nil, err + } + + return &constMetric{ + desc: desc, + metric: metric, + }, nil +} + +// MustNewConstMetricWithCreatedTimestamp is a version of NewConstMetricWithCreatedTimestamp that panics where +// NewConstMetricWithCreatedTimestamp would have returned an error. +func MustNewConstMetricWithCreatedTimestamp(desc *Desc, valueType ValueType, value float64, ct time.Time, labelValues ...string) Metric { + m, err := NewConstMetricWithCreatedTimestamp(desc, valueType, value, ct, labelValues...) + if err != nil { + panic(err) + } + return m +} + type constMetric struct { desc *Desc metric *dto.Metric @@ -153,11 +191,12 @@ func populateMetric( labelPairs []*dto.LabelPair, e *dto.Exemplar, m *dto.Metric, + ct *timestamppb.Timestamp, ) error { m.Label = labelPairs switch t { case CounterValue: - m.Counter = &dto.Counter{Value: proto.Float64(v), Exemplar: e} + m.Counter = &dto.Counter{Value: proto.Float64(v), Exemplar: e, CreatedTimestamp: ct} case GaugeValue: m.Gauge = &dto.Gauge{Value: proto.Float64(v)} case UntypedValue: @@ -176,19 +215,19 @@ func populateMetric( // This function is only needed for custom Metric implementations. See MetricVec // example. func MakeLabelPairs(desc *Desc, labelValues []string) []*dto.LabelPair { - totalLen := len(desc.variableLabels) + len(desc.constLabelPairs) + totalLen := len(desc.variableLabels.names) + len(desc.constLabelPairs) if totalLen == 0 { // Super fast path. return nil } - if len(desc.variableLabels) == 0 { + if len(desc.variableLabels.names) == 0 { // Moderately fast path. return desc.constLabelPairs } labelPairs := make([]*dto.LabelPair, 0, totalLen) - for i, l := range desc.variableLabels { + for i, l := range desc.variableLabels.names { labelPairs = append(labelPairs, &dto.LabelPair{ - Name: proto.String(l.Name), + Name: proto.String(l), Value: proto.String(labelValues[i]), }) } diff --git a/vendor/github.com/prometheus/client_golang/prometheus/vec.go b/vendor/github.com/prometheus/client_golang/prometheus/vec.go index f0d0015a0f..955cfd59f8 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/vec.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/vec.go @@ -20,24 +20,6 @@ import ( "github.com/prometheus/common/model" ) -var labelsPool = &sync.Pool{ - New: func() interface{} { - return make(Labels) - }, -} - -func getLabelsFromPool() Labels { - return labelsPool.Get().(Labels) -} - -func putLabelsToPool(labels Labels) { - for k := range labels { - delete(labels, k) - } - - labelsPool.Put(labels) -} - // MetricVec is a Collector to bundle metrics of the same name that differ in // their label values. MetricVec is not used directly but as a building block // for implementations of vectors of a given metric type, like GaugeVec, @@ -91,6 +73,7 @@ func NewMetricVec(desc *Desc, newMetric func(lvs ...string) Metric) *MetricVec { // See also the CounterVec example. func (m *MetricVec) DeleteLabelValues(lvs ...string) bool { lvs = constrainLabelValues(m.desc, lvs, m.curry) + h, err := m.hashLabelValues(lvs) if err != nil { return false @@ -110,8 +93,8 @@ func (m *MetricVec) DeleteLabelValues(lvs ...string) bool { // This method is used for the same purpose as DeleteLabelValues(...string). See // there for pros and cons of the two methods. func (m *MetricVec) Delete(labels Labels) bool { - labels = constrainLabels(m.desc, labels) - defer putLabelsToPool(labels) + labels, closer := constrainLabels(m.desc, labels) + defer closer() h, err := m.hashLabels(labels) if err != nil { @@ -128,8 +111,8 @@ func (m *MetricVec) Delete(labels Labels) bool { // Note that curried labels will never be matched if deleting from the curried vector. // To match curried labels with DeletePartialMatch, it must be called on the base vector. func (m *MetricVec) DeletePartialMatch(labels Labels) int { - labels = constrainLabels(m.desc, labels) - defer putLabelsToPool(labels) + labels, closer := constrainLabels(m.desc, labels) + defer closer() return m.metricMap.deleteByLabels(labels, m.curry) } @@ -169,11 +152,11 @@ func (m *MetricVec) CurryWith(labels Labels) (*MetricVec, error) { oldCurry = m.curry iCurry int ) - for i, label := range m.desc.variableLabels { - val, ok := labels[label.Name] + for i, labelName := range m.desc.variableLabels.names { + val, ok := labels[labelName] if iCurry < len(oldCurry) && oldCurry[iCurry].index == i { if ok { - return nil, fmt.Errorf("label name %q is already curried", label.Name) + return nil, fmt.Errorf("label name %q is already curried", labelName) } newCurry = append(newCurry, oldCurry[iCurry]) iCurry++ @@ -181,7 +164,10 @@ func (m *MetricVec) CurryWith(labels Labels) (*MetricVec, error) { if !ok { continue // Label stays uncurried. } - newCurry = append(newCurry, curriedLabelValue{i, label.Constrain(val)}) + newCurry = append(newCurry, curriedLabelValue{ + i, + m.desc.variableLabels.constrain(labelName, val), + }) } } if l := len(oldCurry) + len(labels) - len(newCurry); l > 0 { @@ -250,8 +236,8 @@ func (m *MetricVec) GetMetricWithLabelValues(lvs ...string) (Metric, error) { // around MetricVec, implementing a vector for a specific Metric implementation, // for example GaugeVec. func (m *MetricVec) GetMetricWith(labels Labels) (Metric, error) { - labels = constrainLabels(m.desc, labels) - defer putLabelsToPool(labels) + labels, closer := constrainLabels(m.desc, labels) + defer closer() h, err := m.hashLabels(labels) if err != nil { @@ -262,7 +248,7 @@ func (m *MetricVec) GetMetricWith(labels Labels) (Metric, error) { } func (m *MetricVec) hashLabelValues(vals []string) (uint64, error) { - if err := validateLabelValues(vals, len(m.desc.variableLabels)-len(m.curry)); err != nil { + if err := validateLabelValues(vals, len(m.desc.variableLabels.names)-len(m.curry)); err != nil { return 0, err } @@ -271,7 +257,7 @@ func (m *MetricVec) hashLabelValues(vals []string) (uint64, error) { curry = m.curry iVals, iCurry int ) - for i := 0; i < len(m.desc.variableLabels); i++ { + for i := 0; i < len(m.desc.variableLabels.names); i++ { if iCurry < len(curry) && curry[iCurry].index == i { h = m.hashAdd(h, curry[iCurry].value) iCurry++ @@ -285,7 +271,7 @@ func (m *MetricVec) hashLabelValues(vals []string) (uint64, error) { } func (m *MetricVec) hashLabels(labels Labels) (uint64, error) { - if err := validateValuesInLabels(labels, len(m.desc.variableLabels)-len(m.curry)); err != nil { + if err := validateValuesInLabels(labels, len(m.desc.variableLabels.names)-len(m.curry)); err != nil { return 0, err } @@ -294,17 +280,17 @@ func (m *MetricVec) hashLabels(labels Labels) (uint64, error) { curry = m.curry iCurry int ) - for i, label := range m.desc.variableLabels { - val, ok := labels[label.Name] + for i, labelName := range m.desc.variableLabels.names { + val, ok := labels[labelName] if iCurry < len(curry) && curry[iCurry].index == i { if ok { - return 0, fmt.Errorf("label name %q is already curried", label.Name) + return 0, fmt.Errorf("label name %q is already curried", labelName) } h = m.hashAdd(h, curry[iCurry].value) iCurry++ } else { if !ok { - return 0, fmt.Errorf("label name %q missing in label map", label.Name) + return 0, fmt.Errorf("label name %q missing in label map", labelName) } h = m.hashAdd(h, val) } @@ -482,7 +468,7 @@ func valueMatchesVariableOrCurriedValue(targetValue string, index int, values [] func matchPartialLabels(desc *Desc, values []string, labels Labels, curry []curriedLabelValue) bool { for l, v := range labels { // Check if the target label exists in our metrics and get the index. - varLabelIndex, validLabel := indexOf(l, desc.variableLabels.labelNames()) + varLabelIndex, validLabel := indexOf(l, desc.variableLabels.names) if validLabel { // Check the value of that label against the target value. // We don't consider curried values in partial matches. @@ -626,7 +612,7 @@ func matchLabels(desc *Desc, values []string, labels Labels, curry []curriedLabe return false } iCurry := 0 - for i, k := range desc.variableLabels { + for i, k := range desc.variableLabels.names { if iCurry < len(curry) && curry[iCurry].index == i { if values[i] != curry[iCurry].value { return false @@ -634,7 +620,7 @@ func matchLabels(desc *Desc, values []string, labels Labels, curry []curriedLabe iCurry++ continue } - if values[i] != labels[k.Name] { + if values[i] != labels[k] { return false } } @@ -644,13 +630,13 @@ func matchLabels(desc *Desc, values []string, labels Labels, curry []curriedLabe func extractLabelValues(desc *Desc, labels Labels, curry []curriedLabelValue) []string { labelValues := make([]string, len(labels)+len(curry)) iCurry := 0 - for i, k := range desc.variableLabels { + for i, k := range desc.variableLabels.names { if iCurry < len(curry) && curry[iCurry].index == i { labelValues[i] = curry[iCurry].value iCurry++ continue } - labelValues[i] = labels[k.Name] + labelValues[i] = labels[k] } return labelValues } @@ -670,20 +656,37 @@ func inlineLabelValues(lvs []string, curry []curriedLabelValue) []string { return labelValues } -func constrainLabels(desc *Desc, labels Labels) Labels { - constrainedLabels := getLabelsFromPool() - for l, v := range labels { - if i, ok := indexOf(l, desc.variableLabels.labelNames()); ok { - v = desc.variableLabels[i].Constrain(v) - } +var labelsPool = &sync.Pool{ + New: func() interface{} { + return make(Labels) + }, +} - constrainedLabels[l] = v +func constrainLabels(desc *Desc, labels Labels) (Labels, func()) { + if len(desc.variableLabels.labelConstraints) == 0 { + // Fast path when there's no constraints + return labels, func() {} } - return constrainedLabels + constrainedLabels := labelsPool.Get().(Labels) + for l, v := range labels { + constrainedLabels[l] = desc.variableLabels.constrain(l, v) + } + + return constrainedLabels, func() { + for k := range constrainedLabels { + delete(constrainedLabels, k) + } + labelsPool.Put(constrainedLabels) + } } func constrainLabelValues(desc *Desc, lvs []string, curry []curriedLabelValue) []string { + if len(desc.variableLabels.labelConstraints) == 0 { + // Fast path when there's no constraints + return lvs + } + constrainedValues := make([]string, len(lvs)) var iCurry, iLVs int for i := 0; i < len(lvs)+len(curry); i++ { @@ -692,8 +695,11 @@ func constrainLabelValues(desc *Desc, lvs []string, curry []curriedLabelValue) [ continue } - if i < len(desc.variableLabels) { - constrainedValues[iLVs] = desc.variableLabels[i].Constrain(lvs[iLVs]) + if i < len(desc.variableLabels.names) { + constrainedValues[iLVs] = desc.variableLabels.constrain( + desc.variableLabels.names[i], + lvs[iLVs], + ) } else { constrainedValues[iLVs] = lvs[iLVs] } diff --git a/vendor/github.com/prometheus/client_model/go/metrics.pb.go b/vendor/github.com/prometheus/client_model/go/metrics.pb.go index 84946b2703..cee360db7f 100644 --- a/vendor/github.com/prometheus/client_model/go/metrics.pb.go +++ b/vendor/github.com/prometheus/client_model/go/metrics.pb.go @@ -474,6 +474,9 @@ type Histogram struct { NegativeDelta []int64 `protobuf:"zigzag64,10,rep,name=negative_delta,json=negativeDelta" json:"negative_delta,omitempty"` // Count delta of each bucket compared to previous one (or to zero for 1st bucket). NegativeCount []float64 `protobuf:"fixed64,11,rep,name=negative_count,json=negativeCount" json:"negative_count,omitempty"` // Absolute count of each bucket. // Positive buckets for the native histogram. + // Use a no-op span (offset 0, length 0) for a native histogram without any + // observations yet and with a zero_threshold of 0. Otherwise, it would be + // indistinguishable from a classic histogram. PositiveSpan []*BucketSpan `protobuf:"bytes,12,rep,name=positive_span,json=positiveSpan" json:"positive_span,omitempty"` // Use either "positive_delta" or "positive_count", the former for // regular histograms with integer counts, the latter for float diff --git a/vendor/github.com/prometheus/common/expfmt/decode.go b/vendor/github.com/prometheus/common/expfmt/decode.go index 9063978151..0ca86a3dc7 100644 --- a/vendor/github.com/prometheus/common/expfmt/decode.go +++ b/vendor/github.com/prometheus/common/expfmt/decode.go @@ -22,7 +22,7 @@ import ( dto "github.com/prometheus/client_model/go" - "github.com/matttproud/golang_protobuf_extensions/pbutil" + "github.com/matttproud/golang_protobuf_extensions/v2/pbutil" "github.com/prometheus/common/model" ) diff --git a/vendor/github.com/prometheus/common/expfmt/encode.go b/vendor/github.com/prometheus/common/expfmt/encode.go index 7f611ffaad..ca21406000 100644 --- a/vendor/github.com/prometheus/common/expfmt/encode.go +++ b/vendor/github.com/prometheus/common/expfmt/encode.go @@ -18,7 +18,7 @@ import ( "io" "net/http" - "github.com/matttproud/golang_protobuf_extensions/pbutil" + "github.com/matttproud/golang_protobuf_extensions/v2/pbutil" "github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg" "google.golang.org/protobuf/encoding/prototext" diff --git a/vendor/github.com/prometheus/procfs/Makefile.common b/vendor/github.com/prometheus/procfs/Makefile.common index 0ce7ea4612..062a281856 100644 --- a/vendor/github.com/prometheus/procfs/Makefile.common +++ b/vendor/github.com/prometheus/procfs/Makefile.common @@ -61,7 +61,7 @@ PROMU_URL := https://github.com/prometheus/promu/releases/download/v$(PROMU_ SKIP_GOLANGCI_LINT := GOLANGCI_LINT := GOLANGCI_LINT_OPTS ?= -GOLANGCI_LINT_VERSION ?= v1.53.3 +GOLANGCI_LINT_VERSION ?= v1.54.2 # golangci-lint only supports linux, darwin and windows platforms on i386/amd64. # windows isn't included here because of the path separator being different. ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux darwin)) diff --git a/vendor/github.com/prometheus/procfs/fs_statfs_notype.go b/vendor/github.com/prometheus/procfs/fs_statfs_notype.go index 13d74e3957..134767d69a 100644 --- a/vendor/github.com/prometheus/procfs/fs_statfs_notype.go +++ b/vendor/github.com/prometheus/procfs/fs_statfs_notype.go @@ -11,8 +11,8 @@ // See the License for the specific language governing permissions and // limitations under the License. -//go:build netbsd || openbsd || solaris || windows || nostatfs -// +build netbsd openbsd solaris windows nostatfs +//go:build !freebsd && !linux +// +build !freebsd,!linux package procfs diff --git a/vendor/github.com/prometheus/procfs/fs_statfs_type.go b/vendor/github.com/prometheus/procfs/fs_statfs_type.go index bee151445a..80df79c319 100644 --- a/vendor/github.com/prometheus/procfs/fs_statfs_type.go +++ b/vendor/github.com/prometheus/procfs/fs_statfs_type.go @@ -11,8 +11,8 @@ // See the License for the specific language governing permissions and // limitations under the License. -//go:build !netbsd && !openbsd && !solaris && !windows && !nostatfs -// +build !netbsd,!openbsd,!solaris,!windows,!nostatfs +//go:build freebsd || linux +// +build freebsd linux package procfs diff --git a/vendor/github.com/prometheus/procfs/mountstats.go b/vendor/github.com/prometheus/procfs/mountstats.go index 852c8c4a0e..9d8af6db74 100644 --- a/vendor/github.com/prometheus/procfs/mountstats.go +++ b/vendor/github.com/prometheus/procfs/mountstats.go @@ -44,6 +44,14 @@ const ( fieldTransport11TCPLen = 13 fieldTransport11UDPLen = 10 + + // kernel version >= 4.14 MaxLen + // See: https://elixir.bootlin.com/linux/v6.4.8/source/net/sunrpc/xprtrdma/xprt_rdma.h#L393 + fieldTransport11RDMAMaxLen = 28 + + // kernel version <= 4.2 MinLen + // See: https://elixir.bootlin.com/linux/v4.2.8/source/net/sunrpc/xprtrdma/xprt_rdma.h#L331 + fieldTransport11RDMAMinLen = 20 ) // A Mount is a device mount parsed from /proc/[pid]/mountstats. @@ -233,6 +241,33 @@ type NFSTransportStats struct { // A running counter, incremented on each request as the current size of the // pending queue. CumulativePendingQueue uint64 + + // Stats below only available with stat version 1.1. + // Transport over RDMA + + // accessed when sending a call + ReadChunkCount uint64 + WriteChunkCount uint64 + ReplyChunkCount uint64 + TotalRdmaRequest uint64 + + // rarely accessed error counters + PullupCopyCount uint64 + HardwayRegisterCount uint64 + FailedMarshalCount uint64 + BadReplyCount uint64 + MrsRecovered uint64 + MrsOrphaned uint64 + MrsAllocated uint64 + EmptySendctxQ uint64 + + // accessed when receiving a reply + TotalRdmaReply uint64 + FixupCopyCount uint64 + ReplyWaitsForSend uint64 + LocalInvNeeded uint64 + NomsgCallCount uint64 + BcallCount uint64 } // parseMountStats parses a /proc/[pid]/mountstats file and returns a slice @@ -587,14 +622,17 @@ func parseNFSTransportStats(ss []string, statVersion string) (*NFSTransportStats expectedLength = fieldTransport11TCPLen } else if protocol == "udp" { expectedLength = fieldTransport11UDPLen + } else if protocol == "rdma" { + expectedLength = fieldTransport11RDMAMinLen } else { return nil, fmt.Errorf("%w: invalid NFS protocol \"%s\" in stats 1.1 statement: %v", ErrFileParse, protocol, ss) } - if len(ss) != expectedLength { - return nil, fmt.Errorf("%w: invalid NFS transport stats 1.1 statement: %v", ErrFileParse, ss) + if (len(ss) != expectedLength && (protocol == "tcp" || protocol == "udp")) || + (protocol == "rdma" && len(ss) < expectedLength) { + return nil, fmt.Errorf("%w: invalid NFS transport stats 1.1 statement: %v, protocol: %v", ErrFileParse, ss, protocol) } default: - return nil, fmt.Errorf("%s: Unrecognized NFS transport stats version: %q", ErrFileParse, statVersion) + return nil, fmt.Errorf("%s: Unrecognized NFS transport stats version: %q, protocol: %v", ErrFileParse, statVersion, protocol) } // Allocate enough for v1.1 stats since zero value for v1.1 stats will be okay @@ -604,7 +642,9 @@ func parseNFSTransportStats(ss []string, statVersion string) (*NFSTransportStats // Note: slice length must be set to length of v1.1 stats to avoid a panic when // only v1.0 stats are present. // See: https://github.com/prometheus/node_exporter/issues/571. - ns := make([]uint64, fieldTransport11TCPLen) + // + // Note: NFS Over RDMA slice length is fieldTransport11RDMAMaxLen + ns := make([]uint64, fieldTransport11RDMAMaxLen+3) for i, s := range ss { n, err := strconv.ParseUint(s, 10, 64) if err != nil { @@ -622,9 +662,14 @@ func parseNFSTransportStats(ss []string, statVersion string) (*NFSTransportStats // we set them to 0 here. if protocol == "udp" { ns = append(ns[:2], append(make([]uint64, 3), ns[2:]...)...) + } else if protocol == "tcp" { + ns = append(ns[:fieldTransport11TCPLen], make([]uint64, fieldTransport11RDMAMaxLen-fieldTransport11TCPLen+3)...) + } else if protocol == "rdma" { + ns = append(ns[:fieldTransport10TCPLen], append(make([]uint64, 3), ns[fieldTransport10TCPLen:]...)...) } return &NFSTransportStats{ + // NFS xprt over tcp or udp Protocol: protocol, Port: ns[0], Bind: ns[1], @@ -636,8 +681,32 @@ func parseNFSTransportStats(ss []string, statVersion string) (*NFSTransportStats BadTransactionIDs: ns[7], CumulativeActiveRequests: ns[8], CumulativeBacklog: ns[9], - MaximumRPCSlotsUsed: ns[10], - CumulativeSendingQueue: ns[11], - CumulativePendingQueue: ns[12], + + // NFS xprt over tcp or udp + // And statVersion 1.1 + MaximumRPCSlotsUsed: ns[10], + CumulativeSendingQueue: ns[11], + CumulativePendingQueue: ns[12], + + // NFS xprt over rdma + // And stat Version 1.1 + ReadChunkCount: ns[13], + WriteChunkCount: ns[14], + ReplyChunkCount: ns[15], + TotalRdmaRequest: ns[16], + PullupCopyCount: ns[17], + HardwayRegisterCount: ns[18], + FailedMarshalCount: ns[19], + BadReplyCount: ns[20], + MrsRecovered: ns[21], + MrsOrphaned: ns[22], + MrsAllocated: ns[23], + EmptySendctxQ: ns[24], + TotalRdmaReply: ns[25], + FixupCopyCount: ns[26], + ReplyWaitsForSend: ns[27], + LocalInvNeeded: ns[28], + NomsgCallCount: ns[29], + BcallCount: ns[30], }, nil } diff --git a/vendor/github.com/prometheus/procfs/proc_fdinfo.go b/vendor/github.com/prometheus/procfs/proc_fdinfo.go index 4b7933e4f9..fa761b3529 100644 --- a/vendor/github.com/prometheus/procfs/proc_fdinfo.go +++ b/vendor/github.com/prometheus/procfs/proc_fdinfo.go @@ -26,6 +26,7 @@ var ( rPos = regexp.MustCompile(`^pos:\s+(\d+)$`) rFlags = regexp.MustCompile(`^flags:\s+(\d+)$`) rMntID = regexp.MustCompile(`^mnt_id:\s+(\d+)$`) + rIno = regexp.MustCompile(`^ino:\s+(\d+)$`) rInotify = regexp.MustCompile(`^inotify`) rInotifyParts = regexp.MustCompile(`^inotify\s+wd:([0-9a-f]+)\s+ino:([0-9a-f]+)\s+sdev:([0-9a-f]+)(?:\s+mask:([0-9a-f]+))?`) ) @@ -40,6 +41,8 @@ type ProcFDInfo struct { Flags string // Mount point ID MntID string + // Inode number + Ino string // List of inotify lines (structured) in the fdinfo file (kernel 3.8+ only) InotifyInfos []InotifyInfo } @@ -51,7 +54,7 @@ func (p Proc) FDInfo(fd string) (*ProcFDInfo, error) { return nil, err } - var text, pos, flags, mntid string + var text, pos, flags, mntid, ino string var inotify []InotifyInfo scanner := bufio.NewScanner(bytes.NewReader(data)) @@ -63,6 +66,8 @@ func (p Proc) FDInfo(fd string) (*ProcFDInfo, error) { flags = rFlags.FindStringSubmatch(text)[1] } else if rMntID.MatchString(text) { mntid = rMntID.FindStringSubmatch(text)[1] + } else if rIno.MatchString(text) { + ino = rIno.FindStringSubmatch(text)[1] } else if rInotify.MatchString(text) { newInotify, err := parseInotifyInfo(text) if err != nil { @@ -77,6 +82,7 @@ func (p Proc) FDInfo(fd string) (*ProcFDInfo, error) { Pos: pos, Flags: flags, MntID: mntid, + Ino: ino, InotifyInfos: inotify, } diff --git a/vendor/github.com/prometheus/procfs/proc_maps.go b/vendor/github.com/prometheus/procfs/proc_maps.go index 727549a13f..7e75c286b5 100644 --- a/vendor/github.com/prometheus/procfs/proc_maps.go +++ b/vendor/github.com/prometheus/procfs/proc_maps.go @@ -63,17 +63,17 @@ type ProcMap struct { // parseDevice parses the device token of a line and converts it to a dev_t // (mkdev) like structure. func parseDevice(s string) (uint64, error) { - toks := strings.Split(s, ":") - if len(toks) < 2 { - return 0, fmt.Errorf("%w: unexpected number of fields, expected: 2, got: %q", ErrFileParse, len(toks)) + i := strings.Index(s, ":") + if i == -1 { + return 0, fmt.Errorf("%w: expected separator `:` in %s", ErrFileParse, s) } - major, err := strconv.ParseUint(toks[0], 16, 0) + major, err := strconv.ParseUint(s[0:i], 16, 0) if err != nil { return 0, err } - minor, err := strconv.ParseUint(toks[1], 16, 0) + minor, err := strconv.ParseUint(s[i+1:], 16, 0) if err != nil { return 0, err } @@ -93,17 +93,17 @@ func parseAddress(s string) (uintptr, error) { // parseAddresses parses the start-end address. func parseAddresses(s string) (uintptr, uintptr, error) { - toks := strings.Split(s, "-") - if len(toks) < 2 { - return 0, 0, fmt.Errorf("%w: invalid address", ErrFileParse) + idx := strings.Index(s, "-") + if idx == -1 { + return 0, 0, fmt.Errorf("%w: expected separator `-` in %s", ErrFileParse, s) } - saddr, err := parseAddress(toks[0]) + saddr, err := parseAddress(s[0:idx]) if err != nil { return 0, 0, err } - eaddr, err := parseAddress(toks[1]) + eaddr, err := parseAddress(s[idx+1:]) if err != nil { return 0, 0, err } diff --git a/vendor/github.com/prometheus/procfs/proc_status.go b/vendor/github.com/prometheus/procfs/proc_status.go index c055d075db..46307f5721 100644 --- a/vendor/github.com/prometheus/procfs/proc_status.go +++ b/vendor/github.com/prometheus/procfs/proc_status.go @@ -23,7 +23,7 @@ import ( ) // ProcStatus provides status information about the process, -// read from /proc/[pid]/stat. +// read from /proc/[pid]/status. type ProcStatus struct { // The process ID. PID int @@ -32,6 +32,8 @@ type ProcStatus struct { // Thread group ID. TGID int + // List of Pid namespace. + NSpids []uint64 // Peak virtual memory size. VmPeak uint64 // nolint:revive @@ -127,6 +129,8 @@ func (s *ProcStatus) fillStatus(k string, vString string, vUint uint64, vUintByt copy(s.UIDs[:], strings.Split(vString, "\t")) case "Gid": copy(s.GIDs[:], strings.Split(vString, "\t")) + case "NSpid": + s.NSpids = calcNSPidsList(vString) case "VmPeak": s.VmPeak = vUintBytes case "VmSize": @@ -200,3 +204,18 @@ func calcCpusAllowedList(cpuString string) []uint64 { sort.Slice(g, func(i, j int) bool { return g[i] < g[j] }) return g } + +func calcNSPidsList(nspidsString string) []uint64 { + s := strings.Split(nspidsString, " ") + var nspids []uint64 + + for _, nspid := range s { + nspid, _ := strconv.ParseUint(nspid, 10, 64) + if nspid == 0 { + continue + } + nspids = append(nspids, nspid) + } + + return nspids +} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/interceptor.go b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/interceptor.go index fa015e9ac8..3b487a9362 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/interceptor.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/interceptor.go @@ -125,27 +125,13 @@ func UnaryClientInterceptor(opts ...Option) grpc.UnaryClientInterceptor { } } -type streamEventType int - -type streamEvent struct { - Type streamEventType - Err error -} - -const ( - receiveEndEvent streamEventType = iota - errorEvent -) - // clientStream wraps around the embedded grpc.ClientStream, and intercepts the RecvMsg and // SendMsg method call. type clientStream struct { grpc.ClientStream + desc *grpc.StreamDesc - desc *grpc.StreamDesc - events chan streamEvent - eventsDone chan struct{} - finished chan error + span trace.Span receivedEvent bool sentEvent bool @@ -160,11 +146,11 @@ func (w *clientStream) RecvMsg(m interface{}) error { err := w.ClientStream.RecvMsg(m) if err == nil && !w.desc.ServerStreams { - w.sendStreamEvent(receiveEndEvent, nil) + w.endSpan(nil) } else if err == io.EOF { - w.sendStreamEvent(receiveEndEvent, nil) + w.endSpan(nil) } else if err != nil { - w.sendStreamEvent(errorEvent, err) + w.endSpan(err) } else { w.receivedMessageID++ @@ -186,7 +172,7 @@ func (w *clientStream) SendMsg(m interface{}) error { } if err != nil { - w.sendStreamEvent(errorEvent, err) + w.endSpan(err) } return err @@ -195,7 +181,7 @@ func (w *clientStream) SendMsg(m interface{}) error { func (w *clientStream) Header() (metadata.MD, error) { md, err := w.ClientStream.Header() if err != nil { - w.sendStreamEvent(errorEvent, err) + w.endSpan(err) } return md, err @@ -204,54 +190,32 @@ func (w *clientStream) Header() (metadata.MD, error) { func (w *clientStream) CloseSend() error { err := w.ClientStream.CloseSend() if err != nil { - w.sendStreamEvent(errorEvent, err) + w.endSpan(err) } return err } -func wrapClientStream(ctx context.Context, s grpc.ClientStream, desc *grpc.StreamDesc, cfg *config) *clientStream { - events := make(chan streamEvent) - eventsDone := make(chan struct{}) - finished := make(chan error) - - go func() { - defer close(eventsDone) - - for { - select { - case event := <-events: - switch event.Type { - case receiveEndEvent: - finished <- nil - return - case errorEvent: - finished <- event.Err - return - } - case <-ctx.Done(): - finished <- ctx.Err() - return - } - } - }() - +func wrapClientStream(ctx context.Context, s grpc.ClientStream, desc *grpc.StreamDesc, span trace.Span, cfg *config) *clientStream { return &clientStream{ ClientStream: s, + span: span, desc: desc, - events: events, - eventsDone: eventsDone, - finished: finished, receivedEvent: cfg.ReceivedEvent, sentEvent: cfg.SentEvent, } } -func (w *clientStream) sendStreamEvent(eventType streamEventType, err error) { - select { - case <-w.eventsDone: - case w.events <- streamEvent{Type: eventType, Err: err}: +func (w *clientStream) endSpan(err error) { + if err != nil { + s, _ := status.FromError(err) + w.span.SetStatus(codes.Error, s.Message()) + w.span.SetAttributes(statusCodeAttr(s.Code())) + } else { + w.span.SetAttributes(statusCodeAttr(grpc_codes.OK)) } + + w.span.End() } // StreamClientInterceptor returns a grpc.StreamClientInterceptor suitable @@ -306,22 +270,7 @@ func StreamClientInterceptor(opts ...Option) grpc.StreamClientInterceptor { span.End() return s, err } - stream := wrapClientStream(ctx, s, desc, cfg) - - go func() { - err := <-stream.finished - - if err != nil { - s, _ := status.FromError(err) - span.SetStatus(codes.Error, s.Message()) - span.SetAttributes(statusCodeAttr(s.Code())) - } else { - span.SetAttributes(statusCodeAttr(grpc_codes.OK)) - } - - span.End() - }() - + stream := wrapClientStream(ctx, s, desc, span, cfg) return stream, nil } } @@ -391,9 +340,11 @@ func UnaryServerInterceptor(opts ...Option) grpc.UnaryServerInterceptor { grpcStatusCodeAttr := statusCodeAttr(s.Code()) span.SetAttributes(grpcStatusCodeAttr) - elapsedTime := time.Since(before).Milliseconds() + // Use floating point division here for higher precision (instead of Millisecond method). + elapsedTime := float64(time.Since(before)) / float64(time.Millisecond) + metricAttrs = append(metricAttrs, grpcStatusCodeAttr) - cfg.rpcDuration.Record(ctx, float64(elapsedTime), metric.WithAttributes(metricAttrs...)) + cfg.rpcDuration.Record(ctx, elapsedTime, metric.WithAttributes(metricAttrs...)) return resp, err } diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/stats_handler.go b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/stats_handler.go index 0211e55e00..e41e6df618 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/stats_handler.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/stats_handler.go @@ -194,10 +194,13 @@ func (c *config) handleRPC(ctx context.Context, rs stats.RPCStats) { span.End() metricAttrs = append(metricAttrs, rpcStatusAttr) - c.rpcDuration.Record(wctx, float64(rs.EndTime.Sub(rs.BeginTime)), metric.WithAttributes(metricAttrs...)) - c.rpcRequestsPerRPC.Record(wctx, gctx.messagesReceived, metric.WithAttributes(metricAttrs...)) - c.rpcResponsesPerRPC.Record(wctx, gctx.messagesSent, metric.WithAttributes(metricAttrs...)) + // Use floating point division here for higher precision (instead of Millisecond method). + elapsedTime := float64(rs.EndTime.Sub(rs.BeginTime)) / float64(time.Millisecond) + + c.rpcDuration.Record(wctx, elapsedTime, metric.WithAttributes(metricAttrs...)) + c.rpcRequestsPerRPC.Record(wctx, atomic.LoadInt64(&gctx.messagesReceived), metric.WithAttributes(metricAttrs...)) + c.rpcResponsesPerRPC.Record(wctx, atomic.LoadInt64(&gctx.messagesSent), metric.WithAttributes(metricAttrs...)) default: return } diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/version.go b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/version.go index 5c13a7ceab..f47c8a6751 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/version.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/version.go @@ -16,7 +16,7 @@ package otelgrpc // import "go.opentelemetry.io/contrib/instrumentation/google.g // Version is the current release version of the gRPC instrumentation. func Version() string { - return "0.46.0" + return "0.46.1" // This string is updated by the pre_release.sh script during release } diff --git a/vendor/go.opentelemetry.io/otel/.gitignore b/vendor/go.opentelemetry.io/otel/.gitignore index 9248055655..895c7664be 100644 --- a/vendor/go.opentelemetry.io/otel/.gitignore +++ b/vendor/go.opentelemetry.io/otel/.gitignore @@ -14,13 +14,9 @@ go.work.sum gen/ /example/dice/dice -/example/fib/fib -/example/fib/traces.txt -/example/jaeger/jaeger /example/namedtracer/namedtracer /example/otel-collector/otel-collector /example/opencensus/opencensus /example/passthrough/passthrough /example/prometheus/prometheus -/example/view/view /example/zipkin/zipkin diff --git a/vendor/go.opentelemetry.io/otel/CHANGELOG.md b/vendor/go.opentelemetry.io/otel/CHANGELOG.md index c4e7ad475f..24874f856e 100644 --- a/vendor/go.opentelemetry.io/otel/CHANGELOG.md +++ b/vendor/go.opentelemetry.io/otel/CHANGELOG.md @@ -8,6 +8,20 @@ This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.htm ## [Unreleased] +## [1.21.0/0.44.0] 2023-11-16 + +### Removed + +- Remove the deprecated `go.opentelemetry.io/otel/bridge/opencensus.NewTracer`. (#4706) +- Remove the deprecated `go.opentelemetry.io/otel/exporters/otlp/otlpmetric` module. (#4707) +- Remove the deprecated `go.opentelemetry.io/otel/example/view` module. (#4708) +- Remove the deprecated `go.opentelemetry.io/otel/example/fib` module. (#4723) + +### Fixed + +- Do not parse non-protobuf responses in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#4719) +- Do not parse non-protobuf responses in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`. (#4719) + ## [1.20.0/0.43.0] 2023-11-10 This release brings a breaking change for custom trace API implementations. Some interfaces (`TracerProvider`, `Tracer`, `Span`) now embed the `go.opentelemetry.io/otel/trace/embedded` types. Implementors need to update their implementations based on what they want the default behavior to be. See the "API Implementations" section of the [trace API] package documentation for more information about how to accomplish this. @@ -2721,7 +2735,8 @@ It contains api and sdk for trace and meter. - CircleCI build CI manifest files. - CODEOWNERS file to track owners of this project. -[Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v1.20.0...HEAD +[Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v1.21.0...HEAD +[1.21.0/0.44.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.21.0 [1.20.0/0.43.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.20.0 [1.19.0/0.42.0/0.0.7]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.19.0 [1.19.0-rc.1/0.42.0-rc.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.19.0-rc.1 diff --git a/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md b/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md index a00dbca7b0..850606ae69 100644 --- a/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md +++ b/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md @@ -90,6 +90,10 @@ git push Open a pull request against the main `opentelemetry-go` repo. Be sure to add the pull request ID to the entry you added to `CHANGELOG.md`. +Avoid rebasing and force-pushing to your branch to facilitate reviewing the pull request. +Rewriting Git history makes it difficult to keep track of iterations during code review. +All pull requests are squashed to a single commit upon merge to `main`. + ### How to Receive Comments * If the PR is not ready for review, please put `[WIP]` in the title, diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/version.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/version.go index 620ea88bf1..8ee285b8d5 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/version.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/version.go @@ -16,5 +16,5 @@ package otlptrace // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace" // Version is the current release version of the OpenTelemetry OTLP trace exporter in use. func Version() string { - return "1.20.0" + return "1.21.0" } diff --git a/vendor/go.opentelemetry.io/otel/sdk/version.go b/vendor/go.opentelemetry.io/otel/sdk/version.go index 7048c788e9..422d4c964b 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/version.go +++ b/vendor/go.opentelemetry.io/otel/sdk/version.go @@ -16,5 +16,5 @@ package sdk // import "go.opentelemetry.io/otel/sdk" // Version is the current release version of the OpenTelemetry SDK in use. func Version() string { - return "1.20.0" + return "1.21.0" } diff --git a/vendor/go.opentelemetry.io/otel/version.go b/vendor/go.opentelemetry.io/otel/version.go index 5a92f1d4b6..e2f743585d 100644 --- a/vendor/go.opentelemetry.io/otel/version.go +++ b/vendor/go.opentelemetry.io/otel/version.go @@ -16,5 +16,5 @@ package otel // import "go.opentelemetry.io/otel" // Version is the current release version of OpenTelemetry in use. func Version() string { - return "1.20.0" + return "1.21.0" } diff --git a/vendor/go.opentelemetry.io/otel/versions.yaml b/vendor/go.opentelemetry.io/otel/versions.yaml index 82366e7998..3c153c9d6f 100644 --- a/vendor/go.opentelemetry.io/otel/versions.yaml +++ b/vendor/go.opentelemetry.io/otel/versions.yaml @@ -14,13 +14,12 @@ module-sets: stable-v1: - version: v1.20.0 + version: v1.21.0 modules: - go.opentelemetry.io/otel - go.opentelemetry.io/otel/bridge/opentracing - go.opentelemetry.io/otel/bridge/opentracing/test - go.opentelemetry.io/otel/example/dice - - go.opentelemetry.io/otel/example/fib - go.opentelemetry.io/otel/example/namedtracer - go.opentelemetry.io/otel/example/otel-collector - go.opentelemetry.io/otel/example/passthrough @@ -35,14 +34,12 @@ module-sets: - go.opentelemetry.io/otel/sdk/metric - go.opentelemetry.io/otel/trace experimental-metrics: - version: v0.43.0 + version: v0.44.0 modules: - go.opentelemetry.io/otel/bridge/opencensus - go.opentelemetry.io/otel/bridge/opencensus/test - go.opentelemetry.io/otel/example/opencensus - go.opentelemetry.io/otel/example/prometheus - - go.opentelemetry.io/otel/example/view - - go.opentelemetry.io/otel/exporters/otlp/otlpmetric - go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc - go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp - go.opentelemetry.io/otel/exporters/prometheus diff --git a/vendor/google.golang.org/api/internal/gensupport/send.go b/vendor/google.golang.org/api/internal/gensupport/send.go index 693a1b1aba..f39dd00d99 100644 --- a/vendor/google.golang.org/api/internal/gensupport/send.go +++ b/vendor/google.golang.org/api/internal/gensupport/send.go @@ -15,6 +15,7 @@ import ( "github.com/google/uuid" "github.com/googleapis/gax-go/v2" + "github.com/googleapis/gax-go/v2/callctx" ) // Use this error type to return an error which allows introspection of both @@ -43,6 +44,16 @@ func (e wrappedCallErr) Is(target error) bool { // req.WithContext, then calls any functions returned by the hooks in // reverse order. func SendRequest(ctx context.Context, client *http.Client, req *http.Request) (*http.Response, error) { + // Add headers set in context metadata. + if ctx != nil { + headers := callctx.HeadersFromContext(ctx) + for k, vals := range headers { + for _, v := range vals { + req.Header.Add(k, v) + } + } + } + // Disallow Accept-Encoding because it interferes with the automatic gzip handling // done by the default http.Transport. See https://github.com/google/google-api-go-client/issues/219. if _, ok := req.Header["Accept-Encoding"]; ok { @@ -77,6 +88,16 @@ func send(ctx context.Context, client *http.Client, req *http.Request) (*http.Re // req.WithContext, then calls any functions returned by the hooks in // reverse order. func SendRequestWithRetry(ctx context.Context, client *http.Client, req *http.Request, retry *RetryConfig) (*http.Response, error) { + // Add headers set in context metadata. + if ctx != nil { + headers := callctx.HeadersFromContext(ctx) + for k, vals := range headers { + for _, v := range vals { + req.Header.Add(k, v) + } + } + } + // Disallow Accept-Encoding because it interferes with the automatic gzip handling // done by the default http.Transport. See https://github.com/google/google-api-go-client/issues/219. if _, ok := req.Header["Accept-Encoding"]; ok { diff --git a/vendor/google.golang.org/api/internal/version.go b/vendor/google.golang.org/api/internal/version.go index 46ad187ec1..7a7266777b 100644 --- a/vendor/google.golang.org/api/internal/version.go +++ b/vendor/google.golang.org/api/internal/version.go @@ -5,4 +5,4 @@ package internal // Version is the current tagged release of the library. -const Version = "0.126.0" +const Version = "0.132.0" diff --git a/vendor/google.golang.org/api/storage/v1/storage-api.json b/vendor/google.golang.org/api/storage/v1/storage-api.json index edebc73ad4..6212071181 100644 --- a/vendor/google.golang.org/api/storage/v1/storage-api.json +++ b/vendor/google.golang.org/api/storage/v1/storage-api.json @@ -26,7 +26,7 @@ "description": "Stores and retrieves potentially large, immutable data objects.", "discoveryVersion": "v1", "documentationLink": "https://developers.google.com/storage/docs/json_api/", - "etag": "\"34333739363230323936363635393736363430\"", + "etag": "\"39353535313838393033333032363632303533\"", "icons": { "x16": "https://www.google.com/images/icons/product/cloud_storage-16.png", "x32": "https://www.google.com/images/icons/product/cloud_storage-32.png" @@ -1311,7 +1311,7 @@ "type": "string" }, "object": { - "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + "description": "Name of the object. For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](https://cloud.google.com/storage/docs/request-endpoints#encoding).", "location": "path", "required": true, "type": "string" @@ -1357,7 +1357,7 @@ "type": "string" }, "object": { - "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + "description": "Name of the object. For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](https://cloud.google.com/storage/docs/request-endpoints#encoding).", "location": "path", "required": true, "type": "string" @@ -1399,7 +1399,7 @@ "type": "string" }, "object": { - "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + "description": "Name of the object. For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](https://cloud.google.com/storage/docs/request-endpoints#encoding).", "location": "path", "required": true, "type": "string" @@ -1444,7 +1444,7 @@ "type": "string" }, "object": { - "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + "description": "Name of the object. For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](https://cloud.google.com/storage/docs/request-endpoints#encoding).", "location": "path", "required": true, "type": "string" @@ -1493,7 +1493,7 @@ "type": "string" }, "object": { - "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + "description": "Name of the object. For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](https://cloud.google.com/storage/docs/request-endpoints#encoding).", "location": "path", "required": true, "type": "string" @@ -1545,7 +1545,7 @@ "type": "string" }, "object": { - "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + "description": "Name of the object. For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](https://cloud.google.com/storage/docs/request-endpoints#encoding).", "location": "path", "required": true, "type": "string" @@ -1588,7 +1588,7 @@ "type": "string" }, "destinationObject": { - "description": "Name of the new object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + "description": "Name of the new object. For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](https://cloud.google.com/storage/docs/request-endpoints#encoding).", "location": "path", "required": true, "type": "string" @@ -1662,7 +1662,7 @@ ], "parameters": { "destinationBucket": { - "description": "Name of the bucket in which to store the new object. Overrides the provided object metadata's bucket value, if any.For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + "description": "Name of the bucket in which to store the new object. Overrides the provided object metadata's bucket value, if any.For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](https://cloud.google.com/storage/docs/request-endpoints#encoding).", "location": "path", "required": true, "type": "string" @@ -1773,7 +1773,7 @@ "type": "string" }, "sourceObject": { - "description": "Name of the source object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + "description": "Name of the source object. For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](https://cloud.google.com/storage/docs/request-endpoints#encoding).", "location": "path", "required": true, "type": "string" @@ -1843,7 +1843,7 @@ "type": "string" }, "object": { - "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + "description": "Name of the object. For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](https://cloud.google.com/storage/docs/request-endpoints#encoding).", "location": "path", "required": true, "type": "string" @@ -1907,7 +1907,7 @@ "type": "string" }, "object": { - "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + "description": "Name of the object. For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](https://cloud.google.com/storage/docs/request-endpoints#encoding).", "location": "path", "required": true, "type": "string" @@ -1967,7 +1967,7 @@ "type": "string" }, "object": { - "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + "description": "Name of the object. For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](https://cloud.google.com/storage/docs/request-endpoints#encoding).", "location": "path", "required": true, "type": "string" @@ -2054,7 +2054,7 @@ "type": "string" }, "name": { - "description": "Name of the object. Required when the object metadata is not otherwise provided. Overrides the object metadata's name value, if any. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + "description": "Name of the object. Required when the object metadata is not otherwise provided. Overrides the object metadata's name value, if any. For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](https://cloud.google.com/storage/docs/request-endpoints#encoding).", "location": "query", "type": "string" }, @@ -2252,7 +2252,7 @@ "type": "string" }, "object": { - "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + "description": "Name of the object. For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](https://cloud.google.com/storage/docs/request-endpoints#encoding).", "location": "path", "required": true, "type": "string" @@ -2332,7 +2332,7 @@ "type": "string" }, "destinationObject": { - "description": "Name of the new object. Required when the object metadata is not otherwise provided. Overrides the object metadata's name value, if any. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + "description": "Name of the new object. Required when the object metadata is not otherwise provided. Overrides the object metadata's name value, if any. For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](https://cloud.google.com/storage/docs/request-endpoints#encoding).", "location": "path", "required": true, "type": "string" @@ -2443,7 +2443,7 @@ "type": "string" }, "sourceObject": { - "description": "Name of the source object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + "description": "Name of the source object. For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](https://cloud.google.com/storage/docs/request-endpoints#encoding).", "location": "path", "required": true, "type": "string" @@ -2489,7 +2489,7 @@ "type": "string" }, "object": { - "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + "description": "Name of the object. For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](https://cloud.google.com/storage/docs/request-endpoints#encoding).", "location": "path", "required": true, "type": "string" @@ -2536,7 +2536,7 @@ "type": "string" }, "object": { - "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + "description": "Name of the object. For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](https://cloud.google.com/storage/docs/request-endpoints#encoding).", "location": "path", "required": true, "type": "string" @@ -2612,7 +2612,7 @@ "type": "string" }, "object": { - "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + "description": "Name of the object. For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](https://cloud.google.com/storage/docs/request-endpoints#encoding).", "location": "path", "required": true, "type": "string" @@ -3010,7 +3010,7 @@ } } }, - "revision": "20230301", + "revision": "20230710", "rootUrl": "https://storage.googleapis.com/", "schemas": { "Bucket": { diff --git a/vendor/google.golang.org/api/storage/v1/storage-gen.go b/vendor/google.golang.org/api/storage/v1/storage-gen.go index e11bf2e6d3..69a6e41e7c 100644 --- a/vendor/google.golang.org/api/storage/v1/storage-gen.go +++ b/vendor/google.golang.org/api/storage/v1/storage-gen.go @@ -7260,7 +7260,8 @@ type ObjectAccessControlsDeleteCall struct { // user-emailAddress, group-groupId, group-emailAddress, allUsers, or // allAuthenticatedUsers. // - object: Name of the object. For information about how to URL encode -// object names to be path safe, see Encoding URI Path Parts. +// object names to be path safe, see Encoding URI Path Parts +// (https://cloud.google.com/storage/docs/request-endpoints#encoding). func (r *ObjectAccessControlsService) Delete(bucket string, object string, entity string) *ObjectAccessControlsDeleteCall { c := &ObjectAccessControlsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket @@ -7375,7 +7376,7 @@ func (c *ObjectAccessControlsDeleteCall) Do(opts ...googleapi.CallOption) error // "type": "string" // }, // "object": { - // "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + // "description": "Name of the object. For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](https://cloud.google.com/storage/docs/request-endpoints#encoding).", // "location": "path", // "required": true, // "type": "string" @@ -7416,7 +7417,8 @@ type ObjectAccessControlsGetCall struct { // user-emailAddress, group-groupId, group-emailAddress, allUsers, or // allAuthenticatedUsers. // - object: Name of the object. For information about how to URL encode -// object names to be path safe, see Encoding URI Path Parts. +// object names to be path safe, see Encoding URI Path Parts +// (https://cloud.google.com/storage/docs/request-endpoints#encoding). func (r *ObjectAccessControlsService) Get(bucket string, object string, entity string) *ObjectAccessControlsGetCall { c := &ObjectAccessControlsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket @@ -7569,7 +7571,7 @@ func (c *ObjectAccessControlsGetCall) Do(opts ...googleapi.CallOption) (*ObjectA // "type": "string" // }, // "object": { - // "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + // "description": "Name of the object. For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](https://cloud.google.com/storage/docs/request-endpoints#encoding).", // "location": "path", // "required": true, // "type": "string" @@ -7608,7 +7610,8 @@ type ObjectAccessControlsInsertCall struct { // // - bucket: Name of a bucket. // - object: Name of the object. For information about how to URL encode -// object names to be path safe, see Encoding URI Path Parts. +// object names to be path safe, see Encoding URI Path Parts +// (https://cloud.google.com/storage/docs/request-endpoints#encoding). func (r *ObjectAccessControlsService) Insert(bucket string, object string, objectaccesscontrol *ObjectAccessControl) *ObjectAccessControlsInsertCall { c := &ObjectAccessControlsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket @@ -7745,7 +7748,7 @@ func (c *ObjectAccessControlsInsertCall) Do(opts ...googleapi.CallOption) (*Obje // "type": "string" // }, // "object": { - // "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + // "description": "Name of the object. For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](https://cloud.google.com/storage/docs/request-endpoints#encoding).", // "location": "path", // "required": true, // "type": "string" @@ -7787,7 +7790,8 @@ type ObjectAccessControlsListCall struct { // // - bucket: Name of a bucket. // - object: Name of the object. For information about how to URL encode -// object names to be path safe, see Encoding URI Path Parts. +// object names to be path safe, see Encoding URI Path Parts +// (https://cloud.google.com/storage/docs/request-endpoints#encoding). func (r *ObjectAccessControlsService) List(bucket string, object string) *ObjectAccessControlsListCall { c := &ObjectAccessControlsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket @@ -7931,7 +7935,7 @@ func (c *ObjectAccessControlsListCall) Do(opts ...googleapi.CallOption) (*Object // "type": "string" // }, // "object": { - // "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + // "description": "Name of the object. For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](https://cloud.google.com/storage/docs/request-endpoints#encoding).", // "location": "path", // "required": true, // "type": "string" @@ -7974,7 +7978,8 @@ type ObjectAccessControlsPatchCall struct { // user-emailAddress, group-groupId, group-emailAddress, allUsers, or // allAuthenticatedUsers. // - object: Name of the object. For information about how to URL encode -// object names to be path safe, see Encoding URI Path Parts. +// object names to be path safe, see Encoding URI Path Parts +// (https://cloud.google.com/storage/docs/request-endpoints#encoding). func (r *ObjectAccessControlsService) Patch(bucket string, object string, entity string, objectaccesscontrol *ObjectAccessControl) *ObjectAccessControlsPatchCall { c := &ObjectAccessControlsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket @@ -8120,7 +8125,7 @@ func (c *ObjectAccessControlsPatchCall) Do(opts ...googleapi.CallOption) (*Objec // "type": "string" // }, // "object": { - // "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + // "description": "Name of the object. For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](https://cloud.google.com/storage/docs/request-endpoints#encoding).", // "location": "path", // "required": true, // "type": "string" @@ -8166,7 +8171,8 @@ type ObjectAccessControlsUpdateCall struct { // user-emailAddress, group-groupId, group-emailAddress, allUsers, or // allAuthenticatedUsers. // - object: Name of the object. For information about how to URL encode -// object names to be path safe, see Encoding URI Path Parts. +// object names to be path safe, see Encoding URI Path Parts +// (https://cloud.google.com/storage/docs/request-endpoints#encoding). func (r *ObjectAccessControlsService) Update(bucket string, object string, entity string, objectaccesscontrol *ObjectAccessControl) *ObjectAccessControlsUpdateCall { c := &ObjectAccessControlsUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket @@ -8312,7 +8318,7 @@ func (c *ObjectAccessControlsUpdateCall) Do(opts ...googleapi.CallOption) (*Obje // "type": "string" // }, // "object": { - // "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + // "description": "Name of the object. For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](https://cloud.google.com/storage/docs/request-endpoints#encoding).", // "location": "path", // "required": true, // "type": "string" @@ -8357,7 +8363,8 @@ type ObjectsComposeCall struct { // objects. The destination object is stored in this bucket. // - destinationObject: Name of the new object. For information about // how to URL encode object names to be path safe, see Encoding URI -// Path Parts. +// Path Parts +// (https://cloud.google.com/storage/docs/request-endpoints#encoding). func (r *ObjectsService) Compose(destinationBucket string, destinationObject string, composerequest *ComposeRequest) *ObjectsComposeCall { c := &ObjectsComposeCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.destinationBucket = destinationBucket @@ -8540,7 +8547,7 @@ func (c *ObjectsComposeCall) Do(opts ...googleapi.CallOption) (*Object, error) { // "type": "string" // }, // "destinationObject": { - // "description": "Name of the new object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + // "description": "Name of the new object. For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](https://cloud.google.com/storage/docs/request-endpoints#encoding).", // "location": "path", // "required": true, // "type": "string" @@ -8625,7 +8632,8 @@ type ObjectsCopyCall struct { // - destinationBucket: Name of the bucket in which to store the new // object. Overrides the provided object metadata's bucket value, if // any.For information about how to URL encode object names to be path -// safe, see Encoding URI Path Parts. +// safe, see Encoding URI Path Parts +// (https://cloud.google.com/storage/docs/request-endpoints#encoding). // - destinationObject: Name of the new object. Required when the object // metadata is not otherwise provided. Overrides the object metadata's // name value, if any. @@ -8633,7 +8641,8 @@ type ObjectsCopyCall struct { // object. // - sourceObject: Name of the source object. For information about how // to URL encode object names to be path safe, see Encoding URI Path -// Parts. +// Parts +// (https://cloud.google.com/storage/docs/request-endpoints#encoding). func (r *ObjectsService) Copy(sourceBucket string, sourceObject string, destinationBucket string, destinationObject string, object *Object) *ObjectsCopyCall { c := &ObjectsCopyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.sourceBucket = sourceBucket @@ -8894,7 +8903,7 @@ func (c *ObjectsCopyCall) Do(opts ...googleapi.CallOption) (*Object, error) { // ], // "parameters": { // "destinationBucket": { - // "description": "Name of the bucket in which to store the new object. Overrides the provided object metadata's bucket value, if any.For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + // "description": "Name of the bucket in which to store the new object. Overrides the provided object metadata's bucket value, if any.For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](https://cloud.google.com/storage/docs/request-endpoints#encoding).", // "location": "path", // "required": true, // "type": "string" @@ -9005,7 +9014,7 @@ func (c *ObjectsCopyCall) Do(opts ...googleapi.CallOption) (*Object, error) { // "type": "string" // }, // "sourceObject": { - // "description": "Name of the source object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + // "description": "Name of the source object. For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](https://cloud.google.com/storage/docs/request-endpoints#encoding).", // "location": "path", // "required": true, // "type": "string" @@ -9049,7 +9058,8 @@ type ObjectsDeleteCall struct { // // - bucket: Name of the bucket in which the object resides. // - object: Name of the object. For information about how to URL encode -// object names to be path safe, see Encoding URI Path Parts. +// object names to be path safe, see Encoding URI Path Parts +// (https://cloud.google.com/storage/docs/request-endpoints#encoding). func (r *ObjectsService) Delete(bucket string, object string) *ObjectsDeleteCall { c := &ObjectsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket @@ -9215,7 +9225,7 @@ func (c *ObjectsDeleteCall) Do(opts ...googleapi.CallOption) error { // "type": "string" // }, // "object": { - // "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + // "description": "Name of the object. For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](https://cloud.google.com/storage/docs/request-endpoints#encoding).", // "location": "path", // "required": true, // "type": "string" @@ -9252,7 +9262,8 @@ type ObjectsGetCall struct { // // - bucket: Name of the bucket in which the object resides. // - object: Name of the object. For information about how to URL encode -// object names to be path safe, see Encoding URI Path Parts. +// object names to be path safe, see Encoding URI Path Parts +// (https://cloud.google.com/storage/docs/request-endpoints#encoding). func (r *ObjectsService) Get(bucket string, object string) *ObjectsGetCall { c := &ObjectsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket @@ -9484,7 +9495,7 @@ func (c *ObjectsGetCall) Do(opts ...googleapi.CallOption) (*Object, error) { // "type": "string" // }, // "object": { - // "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + // "description": "Name of the object. For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](https://cloud.google.com/storage/docs/request-endpoints#encoding).", // "location": "path", // "required": true, // "type": "string" @@ -9541,7 +9552,8 @@ type ObjectsGetIamPolicyCall struct { // // - bucket: Name of the bucket in which the object resides. // - object: Name of the object. For information about how to URL encode -// object names to be path safe, see Encoding URI Path Parts. +// object names to be path safe, see Encoding URI Path Parts +// (https://cloud.google.com/storage/docs/request-endpoints#encoding). func (r *ObjectsService) GetIamPolicy(bucket string, object string) *ObjectsGetIamPolicyCall { c := &ObjectsGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket @@ -9685,7 +9697,7 @@ func (c *ObjectsGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, err // "type": "string" // }, // "object": { - // "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + // "description": "Name of the object. For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](https://cloud.google.com/storage/docs/request-endpoints#encoding).", // "location": "path", // "required": true, // "type": "string" @@ -9797,7 +9809,8 @@ func (c *ObjectsInsertCall) KmsKeyName(kmsKeyName string) *ObjectsInsertCall { // Name sets the optional parameter "name": Name of the object. Required // when the object metadata is not otherwise provided. Overrides the // object metadata's name value, if any. For information about how to -// URL encode object names to be path safe, see Encoding URI Path Parts. +// URL encode object names to be path safe, see Encoding URI Path Parts +// (https://cloud.google.com/storage/docs/request-endpoints#encoding). func (c *ObjectsInsertCall) Name(name string) *ObjectsInsertCall { c.urlParams_.Set("name", name) return c @@ -10107,7 +10120,7 @@ func (c *ObjectsInsertCall) Do(opts ...googleapi.CallOption) (*Object, error) { // "type": "string" // }, // "name": { - // "description": "Name of the object. Required when the object metadata is not otherwise provided. Overrides the object metadata's name value, if any. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + // "description": "Name of the object. Required when the object metadata is not otherwise provided. Overrides the object metadata's name value, if any. For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](https://cloud.google.com/storage/docs/request-endpoints#encoding).", // "location": "query", // "type": "string" // }, @@ -10517,7 +10530,8 @@ type ObjectsPatchCall struct { // // - bucket: Name of the bucket in which the object resides. // - object: Name of the object. For information about how to URL encode -// object names to be path safe, see Encoding URI Path Parts. +// object names to be path safe, see Encoding URI Path Parts +// (https://cloud.google.com/storage/docs/request-endpoints#encoding). func (r *ObjectsService) Patch(bucket string, object string, object2 *Object) *ObjectsPatchCall { c := &ObjectsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket @@ -10756,7 +10770,7 @@ func (c *ObjectsPatchCall) Do(opts ...googleapi.CallOption) (*Object, error) { // "type": "string" // }, // "object": { - // "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + // "description": "Name of the object. For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](https://cloud.google.com/storage/docs/request-endpoints#encoding).", // "location": "path", // "required": true, // "type": "string" @@ -10839,12 +10853,14 @@ type ObjectsRewriteCall struct { // - destinationObject: Name of the new object. Required when the object // metadata is not otherwise provided. Overrides the object metadata's // name value, if any. For information about how to URL encode object -// names to be path safe, see Encoding URI Path Parts. +// names to be path safe, see Encoding URI Path Parts +// (https://cloud.google.com/storage/docs/request-endpoints#encoding). // - sourceBucket: Name of the bucket in which to find the source // object. // - sourceObject: Name of the source object. For information about how // to URL encode object names to be path safe, see Encoding URI Path -// Parts. +// Parts +// (https://cloud.google.com/storage/docs/request-endpoints#encoding). func (r *ObjectsService) Rewrite(sourceBucket string, sourceObject string, destinationBucket string, destinationObject string, object *Object) *ObjectsRewriteCall { c := &ObjectsRewriteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.sourceBucket = sourceBucket @@ -11140,7 +11156,7 @@ func (c *ObjectsRewriteCall) Do(opts ...googleapi.CallOption) (*RewriteResponse, // "type": "string" // }, // "destinationObject": { - // "description": "Name of the new object. Required when the object metadata is not otherwise provided. Overrides the object metadata's name value, if any. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + // "description": "Name of the new object. Required when the object metadata is not otherwise provided. Overrides the object metadata's name value, if any. For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](https://cloud.google.com/storage/docs/request-endpoints#encoding).", // "location": "path", // "required": true, // "type": "string" @@ -11251,7 +11267,7 @@ func (c *ObjectsRewriteCall) Do(opts ...googleapi.CallOption) (*RewriteResponse, // "type": "string" // }, // "sourceObject": { - // "description": "Name of the source object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + // "description": "Name of the source object. For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](https://cloud.google.com/storage/docs/request-endpoints#encoding).", // "location": "path", // "required": true, // "type": "string" @@ -11294,7 +11310,8 @@ type ObjectsSetIamPolicyCall struct { // // - bucket: Name of the bucket in which the object resides. // - object: Name of the object. For information about how to URL encode -// object names to be path safe, see Encoding URI Path Parts. +// object names to be path safe, see Encoding URI Path Parts +// (https://cloud.google.com/storage/docs/request-endpoints#encoding). func (r *ObjectsService) SetIamPolicy(bucket string, object string, policy *Policy) *ObjectsSetIamPolicyCall { c := &ObjectsSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket @@ -11431,7 +11448,7 @@ func (c *ObjectsSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, err // "type": "string" // }, // "object": { - // "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + // "description": "Name of the object. For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](https://cloud.google.com/storage/docs/request-endpoints#encoding).", // "location": "path", // "required": true, // "type": "string" @@ -11475,7 +11492,8 @@ type ObjectsTestIamPermissionsCall struct { // // - bucket: Name of the bucket in which the object resides. // - object: Name of the object. For information about how to URL encode -// object names to be path safe, see Encoding URI Path Parts. +// object names to be path safe, see Encoding URI Path Parts +// (https://cloud.google.com/storage/docs/request-endpoints#encoding). // - permissions: Permissions to test. func (r *ObjectsService) TestIamPermissions(bucket string, object string, permissions []string) *ObjectsTestIamPermissionsCall { c := &ObjectsTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -11622,7 +11640,7 @@ func (c *ObjectsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestI // "type": "string" // }, // "object": { - // "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + // "description": "Name of the object. For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](https://cloud.google.com/storage/docs/request-endpoints#encoding).", // "location": "path", // "required": true, // "type": "string" @@ -11671,7 +11689,8 @@ type ObjectsUpdateCall struct { // // - bucket: Name of the bucket in which the object resides. // - object: Name of the object. For information about how to URL encode -// object names to be path safe, see Encoding URI Path Parts. +// object names to be path safe, see Encoding URI Path Parts +// (https://cloud.google.com/storage/docs/request-endpoints#encoding). func (r *ObjectsService) Update(bucket string, object string, object2 *Object) *ObjectsUpdateCall { c := &ObjectsUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket @@ -11910,7 +11929,7 @@ func (c *ObjectsUpdateCall) Do(opts ...googleapi.CallOption) (*Object, error) { // "type": "string" // }, // "object": { - // "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + // "description": "Name of the object. For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](https://cloud.google.com/storage/docs/request-endpoints#encoding).", // "location": "path", // "required": true, // "type": "string" diff --git a/vendor/modules.txt b/vendor/modules.txt index 2e0023cad6..3a6c663c5c 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -111,8 +111,8 @@ github.com/apparentlymart/go-textseg/textseg # github.com/apparentlymart/go-textseg/v13 v13.0.0 ## explicit; go 1.16 github.com/apparentlymart/go-textseg/v13/textseg -# github.com/argoproj/argo-cd/v2 v2.9.21 -## explicit; go 1.19 +# github.com/argoproj/argo-cd/v2 v2.12.10 +## explicit; go 1.21.0 github.com/argoproj/argo-cd/v2/common github.com/argoproj/argo-cd/v2/pkg/apiclient/account github.com/argoproj/argo-cd/v2/pkg/apiclient/application @@ -138,6 +138,7 @@ github.com/argoproj/argo-cd/v2/util/git github.com/argoproj/argo-cd/v2/util/glob github.com/argoproj/argo-cd/v2/util/grpc github.com/argoproj/argo-cd/v2/util/helm +github.com/argoproj/argo-cd/v2/util/http github.com/argoproj/argo-cd/v2/util/io github.com/argoproj/argo-cd/v2/util/io/files github.com/argoproj/argo-cd/v2/util/io/path @@ -145,10 +146,11 @@ github.com/argoproj/argo-cd/v2/util/kube github.com/argoproj/argo-cd/v2/util/log github.com/argoproj/argo-cd/v2/util/password github.com/argoproj/argo-cd/v2/util/proxy +github.com/argoproj/argo-cd/v2/util/regex github.com/argoproj/argo-cd/v2/util/security github.com/argoproj/argo-cd/v2/util/settings github.com/argoproj/argo-cd/v2/util/tls -# github.com/argoproj/argo-workflows/v3 v3.5.10 => github.com/devtron-labs/argo-workflows/v3 v3.5.13 +# github.com/argoproj/argo-workflows/v3 v3.5.13 => github.com/devtron-labs/argo-workflows/v3 v3.5.13 ## explicit; go 1.21 github.com/argoproj/argo-workflows/v3/errors github.com/argoproj/argo-workflows/v3/pkg/apis/workflow @@ -164,7 +166,7 @@ github.com/argoproj/argo-workflows/v3/util/slice github.com/argoproj/argo-workflows/v3/util/wait github.com/argoproj/argo-workflows/v3/workflow/common github.com/argoproj/argo-workflows/v3/workflow/util -# github.com/argoproj/gitops-engine v0.7.1-0.20240718175351-6b2984ebc470 +# github.com/argoproj/gitops-engine v0.7.1-0.20250129155113-faf5a4e5c37d ## explicit; go 1.21 github.com/argoproj/gitops-engine/internal/kubernetes_vendor/pkg/api/v1/endpoints github.com/argoproj/gitops-engine/internal/kubernetes_vendor/pkg/util/hash @@ -187,8 +189,8 @@ github.com/argoproj/pkg/grpc/http github.com/argoproj/pkg/rand github.com/argoproj/pkg/sync github.com/argoproj/pkg/time -# github.com/aws/aws-sdk-go v1.44.317 -## explicit; go 1.11 +# github.com/aws/aws-sdk-go v1.50.8 +## explicit; go 1.19 github.com/aws/aws-sdk-go/aws github.com/aws/aws-sdk-go/aws/arn github.com/aws/aws-sdk-go/aws/auth/bearer @@ -390,6 +392,10 @@ github.com/dgryski/go-rendezvous # github.com/distribution/reference v0.6.0 ## explicit; go 1.20 github.com/distribution/reference +# github.com/dlclark/regexp2 v1.11.2 +## explicit; go 1.13 +github.com/dlclark/regexp2 +github.com/dlclark/regexp2/syntax # github.com/docker/cli v24.0.6+incompatible ## explicit github.com/docker/cli/cli/config/types @@ -409,7 +415,7 @@ github.com/emirpasic/gods/lists/arraylist github.com/emirpasic/gods/trees github.com/emirpasic/gods/trees/binaryheap github.com/emirpasic/gods/utils -# github.com/evanphx/json-patch v5.7.0+incompatible +# github.com/evanphx/json-patch v5.9.0+incompatible ## explicit github.com/evanphx/json-patch # github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d @@ -671,15 +677,16 @@ github.com/google/uuid # github.com/google/wire v0.6.0 ## explicit; go 1.12 github.com/google/wire -# github.com/googleapis/enterprise-certificate-proxy v0.2.3 +# github.com/googleapis/enterprise-certificate-proxy v0.2.5 ## explicit; go 1.19 github.com/googleapis/enterprise-certificate-proxy/client github.com/googleapis/enterprise-certificate-proxy/client/util -# github.com/googleapis/gax-go/v2 v2.11.0 +# github.com/googleapis/gax-go/v2 v2.12.0 ## explicit; go 1.19 github.com/googleapis/gax-go/v2 github.com/googleapis/gax-go/v2/apierror github.com/googleapis/gax-go/v2/apierror/internal/proto +github.com/googleapis/gax-go/v2/callctx github.com/googleapis/gax-go/v2/internal # github.com/gorilla/mux v1.8.0 ## explicit; go 1.12 @@ -804,9 +811,9 @@ github.com/mailru/easyjson/jwriter # github.com/mattn/go-ieproxy v0.0.1 ## explicit; go 1.14 github.com/mattn/go-ieproxy -# github.com/matttproud/golang_protobuf_extensions v1.0.4 -## explicit; go 1.9 -github.com/matttproud/golang_protobuf_extensions/pbutil +# github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 +## explicit; go 1.19 +github.com/matttproud/golang_protobuf_extensions/v2/pbutil # github.com/microsoft/azure-devops-go-api/azuredevops v1.0.0-b5 ## explicit; go 1.12 github.com/microsoft/azure-devops-go-api/azuredevops @@ -899,8 +906,8 @@ github.com/pmezard/go-difflib/difflib # github.com/posthog/posthog-go v0.0.0-20210610161230-cd4408afb35a ## explicit; go 1.15 github.com/posthog/posthog-go -# github.com/prometheus/client_golang v1.16.0 -## explicit; go 1.17 +# github.com/prometheus/client_golang v1.18.0 +## explicit; go 1.19 github.com/prometheus/client_golang/api github.com/prometheus/client_golang/api/prometheus/v1 github.com/prometheus/client_golang/prometheus @@ -908,15 +915,15 @@ github.com/prometheus/client_golang/prometheus/collectors github.com/prometheus/client_golang/prometheus/internal github.com/prometheus/client_golang/prometheus/promauto github.com/prometheus/client_golang/prometheus/promhttp -# github.com/prometheus/client_model v0.4.1-0.20230718164431-9a2bf3000d16 -## explicit; go 1.18 +# github.com/prometheus/client_model v0.5.0 +## explicit; go 1.19 github.com/prometheus/client_model/go -# github.com/prometheus/common v0.44.0 -## explicit; go 1.18 +# github.com/prometheus/common v0.45.0 +## explicit; go 1.20 github.com/prometheus/common/expfmt github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg github.com/prometheus/common/model -# github.com/prometheus/procfs v0.11.1 +# github.com/prometheus/procfs v0.12.0 ## explicit; go 1.19 github.com/prometheus/procfs github.com/prometheus/procfs/internal/fs @@ -1064,7 +1071,7 @@ go.opencensus.io/trace/tracestate ## explicit; go 1.19 go.opentelemetry.io/contrib/instrumentation/github.com/gorilla/mux/otelmux go.opentelemetry.io/contrib/instrumentation/github.com/gorilla/mux/otelmux/internal/semconvutil -# go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.0 +# go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.1 ## explicit; go 1.20 go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/internal @@ -1072,7 +1079,7 @@ go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/inte ## explicit; go 1.19 go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil -# go.opentelemetry.io/otel v1.20.0 +# go.opentelemetry.io/otel v1.21.0 ## explicit; go 1.20 go.opentelemetry.io/otel go.opentelemetry.io/otel/attribute @@ -1087,22 +1094,22 @@ go.opentelemetry.io/otel/semconv/internal go.opentelemetry.io/otel/semconv/v1.17.0 go.opentelemetry.io/otel/semconv/v1.21.0 go.opentelemetry.io/otel/semconv/v1.4.0 -# go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.20.0 +# go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.21.0 ## explicit; go 1.20 go.opentelemetry.io/otel/exporters/otlp/otlptrace go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform -# go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.20.0 +# go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.21.0 ## explicit; go 1.20 go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/envconfig go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/retry -# go.opentelemetry.io/otel/metric v1.20.0 +# go.opentelemetry.io/otel/metric v1.21.0 ## explicit; go 1.20 go.opentelemetry.io/otel/metric go.opentelemetry.io/otel/metric/embedded -# go.opentelemetry.io/otel/sdk v1.20.0 +# go.opentelemetry.io/otel/sdk v1.21.0 ## explicit; go 1.20 go.opentelemetry.io/otel/sdk go.opentelemetry.io/otel/sdk/instrumentation @@ -1110,7 +1117,7 @@ go.opentelemetry.io/otel/sdk/internal go.opentelemetry.io/otel/sdk/internal/env go.opentelemetry.io/otel/sdk/resource go.opentelemetry.io/otel/sdk/trace -# go.opentelemetry.io/otel/trace v1.20.0 +# go.opentelemetry.io/otel/trace v1.21.0 ## explicit; go 1.20 go.opentelemetry.io/otel/trace go.opentelemetry.io/otel/trace/embedded @@ -1270,7 +1277,7 @@ golang.org/x/tools/internal/versions ## explicit; go 1.17 golang.org/x/xerrors golang.org/x/xerrors/internal -# google.golang.org/api v0.126.0 +# google.golang.org/api v0.132.0 ## explicit; go 1.19 google.golang.org/api/googleapi google.golang.org/api/googleapi/transport diff --git a/wire_gen.go b/wire_gen.go index d07584dd46..0ed70cf982 100644 --- a/wire_gen.go +++ b/wire_gen.go @@ -35,7 +35,7 @@ import ( client3 "github.com/devtron-labs/devtron/api/helm-app" "github.com/devtron-labs/devtron/api/helm-app/gRPC" "github.com/devtron-labs/devtron/api/helm-app/service" - read5 "github.com/devtron-labs/devtron/api/helm-app/service/read" + read6 "github.com/devtron-labs/devtron/api/helm-app/service/read" "github.com/devtron-labs/devtron/api/infraConfig" application3 "github.com/devtron-labs/devtron/api/k8s/application" capacity2 "github.com/devtron-labs/devtron/api/k8s/capacity" @@ -105,6 +105,7 @@ import ( "github.com/devtron-labs/devtron/internal/util" "github.com/devtron-labs/devtron/pkg/apiToken" app2 "github.com/devtron-labs/devtron/pkg/app" + read13 "github.com/devtron-labs/devtron/pkg/app/appDetails/read" "github.com/devtron-labs/devtron/pkg/app/dbMigration" "github.com/devtron-labs/devtron/pkg/app/status" "github.com/devtron-labs/devtron/pkg/appClone" @@ -115,7 +116,7 @@ import ( "github.com/devtron-labs/devtron/pkg/appStore/chartProvider" "github.com/devtron-labs/devtron/pkg/appStore/discover/repository" service6 "github.com/devtron-labs/devtron/pkg/appStore/discover/service" - read4 "github.com/devtron-labs/devtron/pkg/appStore/installedApp/read" + read5 "github.com/devtron-labs/devtron/pkg/appStore/installedApp/read" repository3 "github.com/devtron-labs/devtron/pkg/appStore/installedApp/repository" service5 "github.com/devtron-labs/devtron/pkg/appStore/installedApp/service" "github.com/devtron-labs/devtron/pkg/appStore/installedApp/service/EAMode" @@ -129,7 +130,7 @@ import ( service4 "github.com/devtron-labs/devtron/pkg/appStore/values/service" appWorkflow2 "github.com/devtron-labs/devtron/pkg/appWorkflow" "github.com/devtron-labs/devtron/pkg/argoApplication" - read17 "github.com/devtron-labs/devtron/pkg/argoApplication/read" + read22 "github.com/devtron-labs/devtron/pkg/argoApplication/read" config2 "github.com/devtron-labs/devtron/pkg/argoApplication/read/config" "github.com/devtron-labs/devtron/pkg/asyncProvider" "github.com/devtron-labs/devtron/pkg/attributes" @@ -140,37 +141,39 @@ import ( repository4 "github.com/devtron-labs/devtron/pkg/auth/user/repository" "github.com/devtron-labs/devtron/pkg/build/artifacts" "github.com/devtron-labs/devtron/pkg/build/artifacts/imageTagging" - read12 "github.com/devtron-labs/devtron/pkg/build/artifacts/imageTagging/read" + read17 "github.com/devtron-labs/devtron/pkg/build/artifacts/imageTagging/read" "github.com/devtron-labs/devtron/pkg/build/git/gitHost" - read16 "github.com/devtron-labs/devtron/pkg/build/git/gitHost/read" + read21 "github.com/devtron-labs/devtron/pkg/build/git/gitHost/read" repository26 "github.com/devtron-labs/devtron/pkg/build/git/gitHost/repository" - read11 "github.com/devtron-labs/devtron/pkg/build/git/gitMaterial/read" + read15 "github.com/devtron-labs/devtron/pkg/build/git/gitMaterial/read" repository20 "github.com/devtron-labs/devtron/pkg/build/git/gitMaterial/repository" "github.com/devtron-labs/devtron/pkg/build/git/gitProvider" - read6 "github.com/devtron-labs/devtron/pkg/build/git/gitProvider/read" + read7 "github.com/devtron-labs/devtron/pkg/build/git/gitProvider/read" repository11 "github.com/devtron-labs/devtron/pkg/build/git/gitProvider/repository" "github.com/devtron-labs/devtron/pkg/build/git/gitWebhook" repository23 "github.com/devtron-labs/devtron/pkg/build/git/gitWebhook/repository" pipeline2 "github.com/devtron-labs/devtron/pkg/build/pipeline" - read10 "github.com/devtron-labs/devtron/pkg/build/pipeline/read" + read14 "github.com/devtron-labs/devtron/pkg/build/pipeline/read" service7 "github.com/devtron-labs/devtron/pkg/bulkAction/service" "github.com/devtron-labs/devtron/pkg/chart" "github.com/devtron-labs/devtron/pkg/chart/gitOpsConfig" + read16 "github.com/devtron-labs/devtron/pkg/chart/read" "github.com/devtron-labs/devtron/pkg/chartRepo" "github.com/devtron-labs/devtron/pkg/chartRepo/repository" "github.com/devtron-labs/devtron/pkg/cluster" "github.com/devtron-labs/devtron/pkg/cluster/environment" - read2 "github.com/devtron-labs/devtron/pkg/cluster/environment/read" + read3 "github.com/devtron-labs/devtron/pkg/cluster/environment/read" "github.com/devtron-labs/devtron/pkg/cluster/environment/repository" rbac2 "github.com/devtron-labs/devtron/pkg/cluster/rbac" - "github.com/devtron-labs/devtron/pkg/cluster/read" + read2 "github.com/devtron-labs/devtron/pkg/cluster/read" repository5 "github.com/devtron-labs/devtron/pkg/cluster/repository" "github.com/devtron-labs/devtron/pkg/clusterTerminalAccess" "github.com/devtron-labs/devtron/pkg/commonService" "github.com/devtron-labs/devtron/pkg/config/configDiff" - read9 "github.com/devtron-labs/devtron/pkg/config/read" + read10 "github.com/devtron-labs/devtron/pkg/config/read" delete2 "github.com/devtron-labs/devtron/pkg/delete" "github.com/devtron-labs/devtron/pkg/deployment/common" + read11 "github.com/devtron-labs/devtron/pkg/deployment/common/read" "github.com/devtron-labs/devtron/pkg/deployment/deployedApp" "github.com/devtron-labs/devtron/pkg/deployment/deployedApp/status/resourceTree" "github.com/devtron-labs/devtron/pkg/deployment/gitOps/config" @@ -178,12 +181,13 @@ import ( "github.com/devtron-labs/devtron/pkg/deployment/gitOps/validation" "github.com/devtron-labs/devtron/pkg/deployment/manifest" "github.com/devtron-labs/devtron/pkg/deployment/manifest/configMapAndSecret" - read14 "github.com/devtron-labs/devtron/pkg/deployment/manifest/configMapAndSecret/read" + read19 "github.com/devtron-labs/devtron/pkg/deployment/manifest/configMapAndSecret/read" "github.com/devtron-labs/devtron/pkg/deployment/manifest/deployedAppMetrics" repository16 "github.com/devtron-labs/devtron/pkg/deployment/manifest/deployedAppMetrics/repository" "github.com/devtron-labs/devtron/pkg/deployment/manifest/deploymentTemplate" "github.com/devtron-labs/devtron/pkg/deployment/manifest/deploymentTemplate/chartRef" - read7 "github.com/devtron-labs/devtron/pkg/deployment/manifest/deploymentTemplate/read" + read12 "github.com/devtron-labs/devtron/pkg/deployment/manifest/deploymentTemplate/chartRef/read" + read8 "github.com/devtron-labs/devtron/pkg/deployment/manifest/deploymentTemplate/read" "github.com/devtron-labs/devtron/pkg/deployment/manifest/publish" "github.com/devtron-labs/devtron/pkg/deployment/providerConfig" "github.com/devtron-labs/devtron/pkg/deployment/trigger/devtronApps" @@ -192,7 +196,7 @@ import ( "github.com/devtron-labs/devtron/pkg/deploymentGroup" "github.com/devtron-labs/devtron/pkg/devtronResource" "github.com/devtron-labs/devtron/pkg/devtronResource/history/deployment/cdPipeline" - read8 "github.com/devtron-labs/devtron/pkg/devtronResource/read" + read9 "github.com/devtron-labs/devtron/pkg/devtronResource/read" repository13 "github.com/devtron-labs/devtron/pkg/devtronResource/repository" "github.com/devtron-labs/devtron/pkg/dockerRegistry" "github.com/devtron-labs/devtron/pkg/eventProcessor" @@ -218,6 +222,8 @@ import ( "github.com/devtron-labs/devtron/pkg/kubernetesResourceAuditLogs" repository27 "github.com/devtron-labs/devtron/pkg/kubernetesResourceAuditLogs/repository" "github.com/devtron-labs/devtron/pkg/module" + bean2 "github.com/devtron-labs/devtron/pkg/module/bean" + "github.com/devtron-labs/devtron/pkg/module/read" "github.com/devtron-labs/devtron/pkg/module/repo" "github.com/devtron-labs/devtron/pkg/module/store" "github.com/devtron-labs/devtron/pkg/notifier" @@ -235,7 +241,7 @@ import ( "github.com/devtron-labs/devtron/pkg/plugin" repository19 "github.com/devtron-labs/devtron/pkg/plugin/repository" "github.com/devtron-labs/devtron/pkg/policyGovernance/security/imageScanning" - read13 "github.com/devtron-labs/devtron/pkg/policyGovernance/security/imageScanning/read" + read18 "github.com/devtron-labs/devtron/pkg/policyGovernance/security/imageScanning/read" repository24 "github.com/devtron-labs/devtron/pkg/policyGovernance/security/imageScanning/repository" "github.com/devtron-labs/devtron/pkg/policyGovernance/security/scanTool" repository15 "github.com/devtron-labs/devtron/pkg/policyGovernance/security/scanTool/repository" @@ -246,7 +252,7 @@ import ( "github.com/devtron-labs/devtron/pkg/server/store" "github.com/devtron-labs/devtron/pkg/sql" "github.com/devtron-labs/devtron/pkg/team" - read3 "github.com/devtron-labs/devtron/pkg/team/read" + read4 "github.com/devtron-labs/devtron/pkg/team/read" repository8 "github.com/devtron-labs/devtron/pkg/team/repository" "github.com/devtron-labs/devtron/pkg/terminal" util3 "github.com/devtron-labs/devtron/pkg/util" @@ -255,7 +261,7 @@ import ( repository12 "github.com/devtron-labs/devtron/pkg/variables/repository" "github.com/devtron-labs/devtron/pkg/webhook/helm" "github.com/devtron-labs/devtron/pkg/workflow/cd" - read15 "github.com/devtron-labs/devtron/pkg/workflow/cd/read" + read20 "github.com/devtron-labs/devtron/pkg/workflow/cd/read" "github.com/devtron-labs/devtron/pkg/workflow/dag" status2 "github.com/devtron-labs/devtron/pkg/workflow/status" util2 "github.com/devtron-labs/devtron/util" @@ -333,7 +339,9 @@ func InitializeApp() (*App, error) { if err != nil { return nil, err } - gitOpsConfigReadServiceImpl := config.NewGitOpsConfigReadServiceImpl(sugaredLogger, gitOpsConfigRepositoryImpl, userServiceImpl, environmentVariables) + moduleRepositoryImpl := moduleRepo.NewModuleRepositoryImpl(db) + moduleReadServiceImpl := read.NewModuleReadServiceImpl(sugaredLogger, moduleRepositoryImpl) + gitOpsConfigReadServiceImpl := config.NewGitOpsConfigReadServiceImpl(sugaredLogger, gitOpsConfigRepositoryImpl, userServiceImpl, environmentVariables, moduleReadServiceImpl) clusterRepositoryImpl := repository5.NewClusterRepositoryImpl(db, sugaredLogger) k8sRuntimeConfig, err := k8s.GetRuntimeConfig() if err != nil { @@ -346,7 +354,7 @@ func InitializeApp() (*App, error) { syncMap := informer.NewGlobalMapClusterNamespace() k8sInformerFactoryImpl := informer.NewK8sInformerFactoryImpl(sugaredLogger, syncMap, k8sServiceImpl) cronLoggerImpl := cron.NewCronLoggerImpl(sugaredLogger) - clusterReadServiceImpl := read.NewClusterReadServiceImpl(sugaredLogger, clusterRepositoryImpl) + clusterReadServiceImpl := read2.NewClusterReadServiceImpl(sugaredLogger, clusterRepositoryImpl) clusterServiceImpl, err := cluster.NewClusterServiceImpl(clusterRepositoryImpl, sugaredLogger, k8sServiceImpl, k8sInformerFactoryImpl, userAuthRepositoryImpl, userRepositoryImpl, roleGroupRepositoryImpl, environmentVariables, cronLoggerImpl, clusterReadServiceImpl) if err != nil { return nil, err @@ -359,7 +367,6 @@ func InitializeApp() (*App, error) { if err != nil { return nil, err } - moduleRepositoryImpl := moduleRepo.NewModuleRepositoryImpl(db) argoApplicationConfigServiceImpl := config2.NewArgoApplicationConfigServiceImpl(sugaredLogger, k8sServiceImpl, clusterRepositoryImpl) k8sCommonServiceImpl := k8s2.NewK8sCommonServiceImpl(sugaredLogger, k8sServiceImpl, argoApplicationConfigServiceImpl, clusterReadServiceImpl) versionServiceImpl := version.NewVersionServiceImpl(sugaredLogger) @@ -390,12 +397,13 @@ func InitializeApp() (*App, error) { runnable := asyncProvider.NewAsyncRunnable(sugaredLogger) repositoryCredsK8sClientImpl := repoCredsK8sClient.NewRepositoryCredsK8sClientImpl(sugaredLogger, k8sServiceImpl) argoClientWrapperServiceEAImpl := argocdServer.NewArgoClientWrapperServiceEAImpl(sugaredLogger, repositoryCredsK8sClientImpl, argoCDConfigGetterImpl) - argoClientWrapperServiceImpl := argocdServer.NewArgoClientWrapperServiceImpl(serviceClientImpl, repositoryServiceClientImpl, clusterServiceClientImpl, serviceClientImpl2, certificateServiceClientImpl, sugaredLogger, acdConfig, gitOpsConfigReadServiceImpl, gitOperationServiceImpl, runnable, argoCDConfigGetterImpl, argoClientWrapperServiceEAImpl) + argoK8sClientImpl := argocdServer.NewArgoK8sClientImpl(sugaredLogger, k8sServiceImpl) + argoClientWrapperServiceImpl := argocdServer.NewArgoClientWrapperServiceImpl(serviceClientImpl, repositoryServiceClientImpl, clusterServiceClientImpl, serviceClientImpl2, certificateServiceClientImpl, sugaredLogger, acdConfig, gitOpsConfigReadServiceImpl, gitOperationServiceImpl, runnable, argoCDConfigGetterImpl, argoClientWrapperServiceEAImpl, argoK8sClientImpl) clusterServiceImplExtended := cluster.NewClusterServiceImplExtended(environmentRepositoryImpl, grafanaClientImpl, installedAppRepositoryImpl, gitOpsConfigReadServiceImpl, clusterServiceImpl, argoClientWrapperServiceImpl) loginService := middleware.NewUserLogin(sessionManager, k8sClient) userAuthServiceImpl := user.NewUserAuthServiceImpl(userAuthRepositoryImpl, sessionManager, loginService, sugaredLogger, userRepositoryImpl, roleGroupRepositoryImpl, userServiceImpl) environmentServiceImpl := environment.NewEnvironmentServiceImpl(environmentRepositoryImpl, clusterServiceImplExtended, sugaredLogger, k8sServiceImpl, k8sInformerFactoryImpl, userAuthServiceImpl, attributesRepositoryImpl, clusterReadServiceImpl) - environmentReadServiceImpl := read2.NewEnvironmentReadServiceImpl(sugaredLogger, environmentRepositoryImpl) + environmentReadServiceImpl := read3.NewEnvironmentReadServiceImpl(sugaredLogger, environmentRepositoryImpl) validate, err := util.IntValidator() if err != nil { return nil, err @@ -413,7 +421,7 @@ func InitializeApp() (*App, error) { return nil, err } teamRepositoryImpl := repository8.NewTeamRepositoryImpl(db) - teamReadServiceImpl := read3.NewTeamReadService(sugaredLogger, teamRepositoryImpl) + teamReadServiceImpl := read4.NewTeamReadService(sugaredLogger, teamRepositoryImpl) teamServiceImpl := team.NewTeamServiceImpl(sugaredLogger, teamRepositoryImpl, userAuthServiceImpl, teamReadServiceImpl) appRepositoryImpl := app.NewAppRepositoryImpl(db, sugaredLogger) pipelineRepositoryImpl := pipelineConfig.NewPipelineRepositoryImpl(db, sugaredLogger) @@ -433,7 +441,7 @@ func InitializeApp() (*App, error) { } helmAppClientImpl := gRPC.NewHelmAppClientImpl(sugaredLogger, helmClientConfig, configuration) pumpImpl := connector.NewPumpImpl(sugaredLogger) - installedAppReadServiceEAImpl := read4.NewInstalledAppReadServiceEAImpl(sugaredLogger, installedAppRepositoryImpl) + installedAppReadServiceEAImpl := read5.NewInstalledAppReadServiceEAImpl(sugaredLogger, installedAppRepositoryImpl) dbMigrationServiceImpl := dbMigration.NewDbMigrationServiceImpl(sugaredLogger, appRepositoryImpl, installedAppReadServiceEAImpl) enforcerUtilHelmImpl := rbac.NewEnforcerUtilHelmImpl(sugaredLogger, clusterRepositoryImpl, teamRepositoryImpl, appRepositoryImpl, installedAppRepositoryImpl, dbMigrationServiceImpl) serverDataStoreServerDataStore := serverDataStore.InitServerDataStore() @@ -442,7 +450,7 @@ func InitializeApp() (*App, error) { if err != nil { return nil, err } - helmAppReadServiceImpl := read5.NewHelmAppReadServiceImpl(sugaredLogger, clusterReadServiceImpl) + helmAppReadServiceImpl := read6.NewHelmAppReadServiceImpl(sugaredLogger, clusterReadServiceImpl) helmAppServiceImpl := service.NewHelmAppServiceImpl(sugaredLogger, clusterServiceImplExtended, helmAppClientImpl, pumpImpl, enforcerUtilHelmImpl, serverDataStoreServerDataStore, serverEnvConfigServerEnvConfig, appStoreApplicationVersionRepositoryImpl, environmentServiceImpl, pipelineRepositoryImpl, installedAppRepositoryImpl, appRepositoryImpl, clusterRepositoryImpl, k8sServiceImpl, helmReleaseConfig, helmAppReadServiceImpl) dockerArtifactStoreRepositoryImpl := repository9.NewDockerArtifactStoreRepositoryImpl(db) dockerRegistryIpsConfigRepositoryImpl := repository9.NewDockerRegistryIpsConfigRepositoryImpl(db) @@ -470,16 +478,17 @@ func InitializeApp() (*App, error) { chartRepositoryImpl := chartRepoRepository.NewChartRepository(db, transactionUtilImpl) envConfigOverrideRepositoryImpl := chartConfig.NewEnvConfigOverrideRepository(db) gitProviderRepositoryImpl := repository11.NewGitProviderRepositoryImpl(db) - gitProviderReadServiceImpl := read6.NewGitProviderReadService(sugaredLogger, gitProviderRepositoryImpl) - envConfigOverrideReadServiceImpl := read7.NewEnvConfigOverrideReadServiceImpl(envConfigOverrideRepositoryImpl, sugaredLogger) - commonServiceImpl := commonService.NewCommonServiceImpl(sugaredLogger, chartRepositoryImpl, envConfigOverrideRepositoryImpl, dockerArtifactStoreRepositoryImpl, attributesRepositoryImpl, environmentRepositoryImpl, appRepositoryImpl, gitOpsConfigReadServiceImpl, gitProviderReadServiceImpl, envConfigOverrideReadServiceImpl, teamReadServiceImpl) + gitProviderReadServiceImpl := read7.NewGitProviderReadService(sugaredLogger, gitProviderRepositoryImpl) + envConfigOverrideReadServiceImpl := read8.NewEnvConfigOverrideReadServiceImpl(envConfigOverrideRepositoryImpl, chartRepositoryImpl, sugaredLogger) + commonBaseServiceImpl := commonService.NewCommonBaseServiceImpl(sugaredLogger, environmentVariables, moduleReadServiceImpl) + commonServiceImpl := commonService.NewCommonServiceImpl(sugaredLogger, chartRepositoryImpl, envConfigOverrideRepositoryImpl, dockerArtifactStoreRepositoryImpl, attributesRepositoryImpl, environmentRepositoryImpl, appRepositoryImpl, gitOpsConfigReadServiceImpl, gitProviderReadServiceImpl, envConfigOverrideReadServiceImpl, commonBaseServiceImpl, teamReadServiceImpl) configMapRepositoryImpl := chartConfig.NewConfigMapRepositoryImpl(sugaredLogger, db) mergeUtil := util.MergeUtil{ Logger: sugaredLogger, } scopedVariableRepositoryImpl := repository12.NewScopedVariableRepository(db, sugaredLogger, transactionUtilImpl) devtronResourceSearchableKeyRepositoryImpl := repository13.NewDevtronResourceSearchableKeyRepositoryImpl(sugaredLogger, db) - devtronResourceSearchableKeyServiceImpl, err := read8.NewDevtronResourceSearchableKeyServiceImpl(sugaredLogger, devtronResourceSearchableKeyRepositoryImpl) + devtronResourceSearchableKeyServiceImpl, err := read9.NewDevtronResourceSearchableKeyServiceImpl(sugaredLogger, devtronResourceSearchableKeyRepositoryImpl) if err != nil { return nil, err } @@ -507,7 +516,7 @@ func InitializeApp() (*App, error) { if err != nil { return nil, err } - configReadServiceImpl := read9.NewConfigReadServiceImpl(sugaredLogger, commonServiceImpl, configMapRepositoryImpl, mergeUtil, scopedVariableCMCSManagerImpl) + configReadServiceImpl := read10.NewConfigReadServiceImpl(sugaredLogger, commonServiceImpl, configMapRepositoryImpl, mergeUtil, scopedVariableCMCSManagerImpl) globalCMCSRepositoryImpl := repository2.NewGlobalCMCSRepositoryImpl(sugaredLogger, db) globalCMCSServiceImpl := pipeline.NewGlobalCMCSServiceImpl(sugaredLogger, globalCMCSRepositoryImpl) argoWorkflowExecutorImpl := executors.NewArgoWorkflowExecutorImpl(sugaredLogger) @@ -536,7 +545,7 @@ func InitializeApp() (*App, error) { if err != nil { return nil, err } - moduleEnvConfig, err := module.ParseModuleEnvConfig() + moduleEnvConfig, err := bean2.ParseModuleEnvConfig() if err != nil { return nil, err } @@ -567,29 +576,32 @@ func InitializeApp() (*App, error) { pipelineStatusSyncDetailServiceImpl := status.NewPipelineStatusSyncDetailServiceImpl(sugaredLogger, pipelineStatusSyncDetailRepositoryImpl) installedAppVersionHistoryRepositoryImpl := repository3.NewInstalledAppVersionHistoryRepositoryImpl(sugaredLogger, db) repositoryImpl := deploymentConfig.NewRepositoryImpl(db) - deploymentConfigServiceImpl := common.NewDeploymentConfigServiceImpl(repositoryImpl, sugaredLogger, chartRepositoryImpl, pipelineRepositoryImpl, appRepositoryImpl, installedAppReadServiceEAImpl, environmentVariables) + chartRefRepositoryImpl := chartRepoRepository.NewChartRefRepositoryImpl(db) + deploymentConfigReadServiceImpl := read11.NewDeploymentConfigReadServiceImpl(sugaredLogger, repositoryImpl, environmentVariables, chartRepositoryImpl, pipelineRepositoryImpl, appRepositoryImpl, environmentRepositoryImpl, envConfigOverrideReadServiceImpl) + deploymentConfigServiceImpl := common.NewDeploymentConfigServiceImpl(repositoryImpl, sugaredLogger, chartRepositoryImpl, pipelineRepositoryImpl, appRepositoryImpl, installedAppReadServiceEAImpl, environmentVariables, envConfigOverrideReadServiceImpl, environmentRepositoryImpl, chartRefRepositoryImpl, deploymentConfigReadServiceImpl, acdAuthConfig) pipelineStatusTimelineServiceImpl := status.NewPipelineStatusTimelineServiceImpl(sugaredLogger, pipelineStatusTimelineRepositoryImpl, cdWorkflowRepositoryImpl, userServiceImpl, pipelineStatusTimelineResourcesServiceImpl, pipelineStatusSyncDetailServiceImpl, installedAppRepositoryImpl, installedAppVersionHistoryRepositoryImpl, deploymentConfigServiceImpl) appServiceConfig, err := app2.GetAppServiceConfig() if err != nil { return nil, err } appStatusServiceImpl := appStatus2.NewAppStatusServiceImpl(appStatusRepositoryImpl, sugaredLogger, enforcerImpl, enforcerUtilImpl) - installedAppReadServiceImpl := read4.NewInstalledAppReadServiceImpl(installedAppReadServiceEAImpl) - chartRefRepositoryImpl := chartRepoRepository.NewChartRefRepositoryImpl(db) - chartRefServiceImpl := chartRef.NewChartRefServiceImpl(sugaredLogger, chartRefRepositoryImpl, chartTemplateServiceImpl, chartRepositoryImpl, mergeUtil) - deploymentTemplateServiceImpl := deploymentTemplate.NewDeploymentTemplateServiceImpl(sugaredLogger, chartRefServiceImpl, chartTemplateServiceImpl, chartRepositoryImpl) + installedAppReadServiceImpl := read5.NewInstalledAppReadServiceImpl(installedAppReadServiceEAImpl) + chartRefReadServiceImpl := read12.NewChartRefReadServiceImpl(sugaredLogger, chartRefRepositoryImpl) + chartRefServiceImpl := chartRef.NewChartRefServiceImpl(sugaredLogger, chartRefRepositoryImpl, chartRefReadServiceImpl, chartTemplateServiceImpl, chartRepositoryImpl, mergeUtil) + deploymentTemplateServiceImpl := deploymentTemplate.NewDeploymentTemplateServiceImpl(sugaredLogger, chartRefServiceImpl, chartTemplateServiceImpl, chartRepositoryImpl, deploymentConfigServiceImpl) appListingRepositoryQueryBuilder := helper.NewAppListingRepositoryQueryBuilder(sugaredLogger) + appListingRepositoryImpl := repository2.NewAppListingRepositoryImpl(sugaredLogger, db, appListingRepositoryQueryBuilder, environmentRepositoryImpl) appWorkflowRepositoryImpl := appWorkflow.NewAppWorkflowRepositoryImpl(sugaredLogger, db) - appListingRepositoryImpl := repository2.NewAppListingRepositoryImpl(sugaredLogger, db, appListingRepositoryQueryBuilder, environmentRepositoryImpl, gitOpsConfigRepositoryImpl, appWorkflowRepositoryImpl, repositoryImpl) + appDetailsReadServiceImpl := read13.NewAppDetailsReadServiceImpl(db, sugaredLogger, gitOpsConfigReadServiceImpl, deploymentConfigReadServiceImpl, appWorkflowRepositoryImpl, appListingRepositoryImpl) appListingViewBuilderImpl := app2.NewAppListingViewBuilderImpl(sugaredLogger) linkoutsRepositoryImpl := repository2.NewLinkoutsRepositoryImpl(sugaredLogger, db) ciTemplateOverrideRepositoryImpl := pipelineConfig.NewCiTemplateOverrideRepositoryImpl(db, sugaredLogger) - ciPipelineConfigReadServiceImpl := read10.NewCiPipelineConfigReadServiceImpl(sugaredLogger, ciPipelineRepositoryImpl, ciTemplateOverrideRepositoryImpl) + ciPipelineConfigReadServiceImpl := read14.NewCiPipelineConfigReadServiceImpl(sugaredLogger, ciPipelineRepositoryImpl, ciTemplateOverrideRepositoryImpl) dockerRegistryIpsConfigServiceImpl := dockerRegistry.NewDockerRegistryIpsConfigServiceImpl(sugaredLogger, dockerRegistryIpsConfigRepositoryImpl, k8sServiceImpl, dockerArtifactStoreRepositoryImpl, clusterReadServiceImpl, ciPipelineConfigReadServiceImpl) appLevelMetricsRepositoryImpl := repository16.NewAppLevelMetricsRepositoryImpl(db, sugaredLogger) envLevelAppMetricsRepositoryImpl := repository16.NewEnvLevelAppMetricsRepositoryImpl(db, sugaredLogger) deployedAppMetricsServiceImpl := deployedAppMetrics.NewDeployedAppMetricsServiceImpl(sugaredLogger, appLevelMetricsRepositoryImpl, envLevelAppMetricsRepositoryImpl, chartRefServiceImpl) - appListingServiceImpl := app2.NewAppListingServiceImpl(sugaredLogger, appListingRepositoryImpl, appRepositoryImpl, appListingViewBuilderImpl, pipelineRepositoryImpl, linkoutsRepositoryImpl, cdWorkflowRepositoryImpl, pipelineOverrideRepositoryImpl, environmentRepositoryImpl, chartRepositoryImpl, ciPipelineRepositoryImpl, dockerRegistryIpsConfigServiceImpl, userRepositoryImpl, deployedAppMetricsServiceImpl, ciArtifactRepositoryImpl, envConfigOverrideReadServiceImpl, ciPipelineConfigReadServiceImpl) + appListingServiceImpl := app2.NewAppListingServiceImpl(sugaredLogger, appListingRepositoryImpl, appDetailsReadServiceImpl, appRepositoryImpl, appListingViewBuilderImpl, pipelineRepositoryImpl, linkoutsRepositoryImpl, cdWorkflowRepositoryImpl, pipelineOverrideRepositoryImpl, environmentRepositoryImpl, chartRepositoryImpl, ciPipelineRepositoryImpl, dockerRegistryIpsConfigServiceImpl, userRepositoryImpl, deployedAppMetricsServiceImpl, ciArtifactRepositoryImpl, envConfigOverrideReadServiceImpl, ciPipelineConfigReadServiceImpl) workflowStageRepositoryImpl := repository17.NewWorkflowStageRepositoryImpl(sugaredLogger, db) workFlowStageStatusServiceImpl := workflowStatus.NewWorkflowStageFlowStatusServiceImpl(sugaredLogger, workflowStageRepositoryImpl, ciWorkflowRepositoryImpl, cdWorkflowRepositoryImpl, transactionUtilImpl) cdWorkflowRunnerServiceImpl := cd.NewCdWorkflowRunnerServiceImpl(sugaredLogger, cdWorkflowRepositoryImpl, workFlowStageStatusServiceImpl, transactionUtilImpl) @@ -622,7 +634,7 @@ func InitializeApp() (*App, error) { return nil, err } materialRepositoryImpl := repository20.NewMaterialRepositoryImpl(db) - gitMaterialReadServiceImpl := read11.NewGitMaterialReadServiceImpl(sugaredLogger, materialRepositoryImpl) + gitMaterialReadServiceImpl := read15.NewGitMaterialReadServiceImpl(sugaredLogger, materialRepositoryImpl) appCrudOperationServiceImpl := app2.NewAppCrudOperationServiceImpl(appLabelRepositoryImpl, sugaredLogger, appRepositoryImpl, userRepositoryImpl, installedAppRepositoryImpl, genericNoteServiceImpl, installedAppDBServiceImpl, crudOperationServiceConfig, dbMigrationServiceImpl, gitMaterialReadServiceImpl) imageTagRepositoryImpl := repository2.NewImageTagRepository(db, sugaredLogger) customTagServiceImpl := pipeline.NewCustomTagService(sugaredLogger, imageTagRepositoryImpl) @@ -647,11 +659,12 @@ func InitializeApp() (*App, error) { ciBuildConfigServiceImpl := pipeline.NewCiBuildConfigServiceImpl(sugaredLogger, ciBuildConfigRepositoryImpl) ciTemplateServiceImpl := pipeline.NewCiTemplateServiceImpl(sugaredLogger, ciBuildConfigServiceImpl, ciTemplateRepositoryImpl, ciTemplateOverrideRepositoryImpl) pipelineConfigRepositoryImpl := chartConfig.NewPipelineConfigRepository(db) - configMapServiceImpl := pipeline.NewConfigMapServiceImpl(chartRepositoryImpl, sugaredLogger, chartRepoRepositoryImpl, mergeUtil, pipelineConfigRepositoryImpl, configMapRepositoryImpl, envConfigOverrideRepositoryImpl, commonServiceImpl, appRepositoryImpl, configMapHistoryServiceImpl, environmentRepositoryImpl, scopedVariableCMCSManagerImpl) + configMapServiceImpl := pipeline.NewConfigMapServiceImpl(chartRepositoryImpl, sugaredLogger, chartRepoRepositoryImpl, mergeUtil, pipelineConfigRepositoryImpl, configMapRepositoryImpl, commonServiceImpl, appRepositoryImpl, configMapHistoryServiceImpl, environmentRepositoryImpl, scopedVariableCMCSManagerImpl) deploymentTemplateHistoryRepositoryImpl := repository21.NewDeploymentTemplateHistoryRepositoryImpl(sugaredLogger, db) deploymentTemplateHistoryServiceImpl := deploymentTemplate.NewDeploymentTemplateHistoryServiceImpl(sugaredLogger, deploymentTemplateHistoryRepositoryImpl, pipelineRepositoryImpl, chartRepositoryImpl, userServiceImpl, cdWorkflowRepositoryImpl, scopedVariableManagerImpl, deployedAppMetricsServiceImpl, chartRefServiceImpl) - chartServiceImpl := chart.NewChartServiceImpl(chartRepositoryImpl, sugaredLogger, chartTemplateServiceImpl, chartRepoRepositoryImpl, appRepositoryImpl, mergeUtil, envConfigOverrideRepositoryImpl, pipelineConfigRepositoryImpl, environmentRepositoryImpl, deploymentTemplateHistoryServiceImpl, scopedVariableManagerImpl, deployedAppMetricsServiceImpl, chartRefServiceImpl, gitOpsConfigReadServiceImpl, deploymentConfigServiceImpl, envConfigOverrideReadServiceImpl) - ciCdPipelineOrchestratorImpl := pipeline.NewCiCdPipelineOrchestrator(appRepositoryImpl, sugaredLogger, materialRepositoryImpl, pipelineRepositoryImpl, ciPipelineRepositoryImpl, ciPipelineMaterialRepositoryImpl, cdWorkflowRepositoryImpl, clientImpl, ciCdConfig, appWorkflowRepositoryImpl, environmentRepositoryImpl, attributesServiceImpl, appCrudOperationServiceImpl, userAuthServiceImpl, prePostCdScriptHistoryServiceImpl, pipelineStageServiceImpl, gitMaterialHistoryServiceImpl, ciPipelineHistoryServiceImpl, ciTemplateReadServiceImpl, ciTemplateServiceImpl, dockerArtifactStoreRepositoryImpl, ciArtifactRepositoryImpl, configMapServiceImpl, customTagServiceImpl, genericNoteServiceImpl, chartServiceImpl, transactionUtilImpl, gitOpsConfigReadServiceImpl, deploymentConfigServiceImpl) + chartReadServiceImpl := read16.NewChartReadServiceImpl(sugaredLogger, chartRepositoryImpl, deploymentConfigServiceImpl, deployedAppMetricsServiceImpl, gitOpsConfigReadServiceImpl) + chartServiceImpl := chart.NewChartServiceImpl(chartRepositoryImpl, sugaredLogger, chartTemplateServiceImpl, chartRepoRepositoryImpl, appRepositoryImpl, mergeUtil, envConfigOverrideRepositoryImpl, pipelineConfigRepositoryImpl, environmentRepositoryImpl, deploymentTemplateHistoryServiceImpl, scopedVariableManagerImpl, deployedAppMetricsServiceImpl, chartRefServiceImpl, gitOpsConfigReadServiceImpl, deploymentConfigServiceImpl, envConfigOverrideReadServiceImpl, chartReadServiceImpl) + ciCdPipelineOrchestratorImpl := pipeline.NewCiCdPipelineOrchestrator(appRepositoryImpl, sugaredLogger, materialRepositoryImpl, pipelineRepositoryImpl, ciPipelineRepositoryImpl, ciPipelineMaterialRepositoryImpl, cdWorkflowRepositoryImpl, clientImpl, ciCdConfig, appWorkflowRepositoryImpl, environmentRepositoryImpl, attributesServiceImpl, appCrudOperationServiceImpl, userAuthServiceImpl, prePostCdScriptHistoryServiceImpl, pipelineStageServiceImpl, gitMaterialHistoryServiceImpl, ciPipelineHistoryServiceImpl, ciTemplateReadServiceImpl, ciTemplateServiceImpl, dockerArtifactStoreRepositoryImpl, ciArtifactRepositoryImpl, configMapServiceImpl, customTagServiceImpl, genericNoteServiceImpl, chartServiceImpl, transactionUtilImpl, gitOpsConfigReadServiceImpl, deploymentConfigServiceImpl, deploymentConfigReadServiceImpl, chartReadServiceImpl) ciServiceImpl := pipeline.NewCiServiceImpl(sugaredLogger, workflowServiceImpl, ciPipelineMaterialRepositoryImpl, workFlowStageStatusServiceImpl, eventRESTClientImpl, eventSimpleFactoryImpl, ciPipelineRepositoryImpl, ciArtifactRepositoryImpl, pipelineStageServiceImpl, userServiceImpl, ciTemplateReadServiceImpl, appCrudOperationServiceImpl, environmentRepositoryImpl, appRepositoryImpl, scopedVariableManagerImpl, customTagServiceImpl, pluginInputVariableParserImpl, globalPluginServiceImpl, infraProviderImpl, ciCdPipelineOrchestratorImpl, attributesServiceImpl, ciWorkflowRepositoryImpl, transactionUtilImpl) ciLogServiceImpl, err := pipeline.NewCiLogServiceImpl(sugaredLogger, ciServiceImpl, k8sServiceImpl) if err != nil { @@ -661,7 +674,7 @@ func InitializeApp() (*App, error) { resourceGroupMappingRepositoryImpl := resourceGroup.NewResourceGroupMappingRepositoryImpl(db) resourceGroupServiceImpl := resourceGroup2.NewResourceGroupServiceImpl(sugaredLogger, resourceGroupRepositoryImpl, resourceGroupMappingRepositoryImpl, enforcerUtilImpl, devtronResourceSearchableKeyServiceImpl, appStatusRepositoryImpl) imageTaggingRepositoryImpl := repository22.NewImageTaggingRepositoryImpl(db, transactionUtilImpl) - imageTaggingReadServiceImpl, err := read12.NewImageTaggingReadServiceImpl(imageTaggingRepositoryImpl, sugaredLogger) + imageTaggingReadServiceImpl, err := read17.NewImageTaggingReadServiceImpl(imageTaggingRepositoryImpl, sugaredLogger) if err != nil { return nil, err } @@ -683,11 +696,13 @@ func InitializeApp() (*App, error) { deploymentGroupRepositoryImpl := repository2.NewDeploymentGroupRepositoryImpl(sugaredLogger, db) pipelineStrategyHistoryRepositoryImpl := repository21.NewPipelineStrategyHistoryRepositoryImpl(sugaredLogger, db) pipelineStrategyHistoryServiceImpl := history.NewPipelineStrategyHistoryServiceImpl(sugaredLogger, pipelineStrategyHistoryRepositoryImpl, userServiceImpl) - propertiesConfigServiceImpl := pipeline.NewPropertiesConfigServiceImpl(sugaredLogger, envConfigOverrideRepositoryImpl, chartRepositoryImpl, environmentRepositoryImpl, deploymentTemplateHistoryServiceImpl, scopedVariableManagerImpl, deployedAppMetricsServiceImpl, envConfigOverrideReadServiceImpl) + propertiesConfigServiceImpl := pipeline.NewPropertiesConfigServiceImpl(sugaredLogger, envConfigOverrideRepositoryImpl, chartRepositoryImpl, environmentRepositoryImpl, deploymentTemplateHistoryServiceImpl, scopedVariableManagerImpl, deployedAppMetricsServiceImpl, envConfigOverrideReadServiceImpl, deploymentConfigServiceImpl) + installedAppDBExtendedServiceImpl := FullMode.NewInstalledAppDBExtendedServiceImpl(installedAppDBServiceImpl, appStatusServiceImpl, gitOpsConfigReadServiceImpl) + gitOpsValidationServiceImpl := validation.NewGitOpsValidationServiceImpl(sugaredLogger, gitFactory, gitOperationServiceImpl, gitOpsConfigReadServiceImpl, chartTemplateServiceImpl, chartServiceImpl, installedAppDBExtendedServiceImpl) imageDigestPolicyServiceImpl := imageDigestPolicy.NewImageDigestPolicyServiceImpl(sugaredLogger, qualifierMappingServiceImpl, devtronResourceSearchableKeyServiceImpl) pipelineConfigEventPublishServiceImpl := out.NewPipelineConfigEventPublishServiceImpl(sugaredLogger, pubSubClientServiceImpl) deploymentTypeOverrideServiceImpl := providerConfig.NewDeploymentTypeOverrideServiceImpl(sugaredLogger, environmentVariables, attributesServiceImpl) - cdPipelineConfigServiceImpl := pipeline.NewCdPipelineConfigServiceImpl(sugaredLogger, pipelineRepositoryImpl, environmentRepositoryImpl, pipelineConfigRepositoryImpl, appWorkflowRepositoryImpl, pipelineStageServiceImpl, appRepositoryImpl, appServiceImpl, deploymentGroupRepositoryImpl, ciCdPipelineOrchestratorImpl, appStatusRepositoryImpl, ciPipelineRepositoryImpl, prePostCdScriptHistoryServiceImpl, clusterRepositoryImpl, helmAppServiceImpl, enforcerUtilImpl, pipelineStrategyHistoryServiceImpl, chartRepositoryImpl, resourceGroupServiceImpl, propertiesConfigServiceImpl, deploymentTemplateHistoryServiceImpl, scopedVariableManagerImpl, environmentVariables, customTagServiceImpl, ciPipelineConfigServiceImpl, buildPipelineSwitchServiceImpl, argoClientWrapperServiceImpl, deployedAppMetricsServiceImpl, gitOpsConfigReadServiceImpl, gitOperationServiceImpl, chartServiceImpl, imageDigestPolicyServiceImpl, pipelineConfigEventPublishServiceImpl, deploymentTypeOverrideServiceImpl, deploymentConfigServiceImpl) + cdPipelineConfigServiceImpl := pipeline.NewCdPipelineConfigServiceImpl(sugaredLogger, pipelineRepositoryImpl, environmentRepositoryImpl, pipelineConfigRepositoryImpl, appWorkflowRepositoryImpl, pipelineStageServiceImpl, appRepositoryImpl, appServiceImpl, deploymentGroupRepositoryImpl, ciCdPipelineOrchestratorImpl, appStatusRepositoryImpl, ciPipelineRepositoryImpl, prePostCdScriptHistoryServiceImpl, clusterRepositoryImpl, helmAppServiceImpl, enforcerUtilImpl, pipelineStrategyHistoryServiceImpl, chartRepositoryImpl, resourceGroupServiceImpl, propertiesConfigServiceImpl, deploymentTemplateHistoryServiceImpl, scopedVariableManagerImpl, environmentVariables, customTagServiceImpl, ciPipelineConfigServiceImpl, buildPipelineSwitchServiceImpl, argoClientWrapperServiceImpl, deployedAppMetricsServiceImpl, gitOpsConfigReadServiceImpl, gitOpsValidationServiceImpl, gitOperationServiceImpl, chartServiceImpl, imageDigestPolicyServiceImpl, pipelineConfigEventPublishServiceImpl, deploymentTypeOverrideServiceImpl, deploymentConfigServiceImpl, envConfigOverrideReadServiceImpl, chartRefReadServiceImpl, chartTemplateServiceImpl, gitFactory, clusterReadServiceImpl, installedAppReadServiceImpl, chartReadServiceImpl) appArtifactManagerImpl := pipeline.NewAppArtifactManagerImpl(sugaredLogger, cdWorkflowRepositoryImpl, userServiceImpl, imageTaggingServiceImpl, ciArtifactRepositoryImpl, ciWorkflowRepositoryImpl, pipelineStageServiceImpl, cdPipelineConfigServiceImpl, dockerArtifactStoreRepositoryImpl, ciPipelineRepositoryImpl, ciTemplateReadServiceImpl) devtronAppCMCSServiceImpl := pipeline.NewDevtronAppCMCSServiceImpl(sugaredLogger, appServiceImpl, attributesRepositoryImpl) globalStrategyMetadataChartRefMappingRepositoryImpl := chartRepoRepository.NewGlobalStrategyMetadataChartRefMappingRepositoryImpl(db, sugaredLogger) @@ -705,19 +720,17 @@ func InitializeApp() (*App, error) { if err != nil { return nil, err } - appDeploymentTypeChangeManagerImpl := pipeline.NewAppDeploymentTypeChangeManagerImpl(sugaredLogger, pipelineRepositoryImpl, appServiceImpl, appStatusRepositoryImpl, helmAppServiceImpl, appArtifactManagerImpl, cdPipelineConfigServiceImpl, gitOpsConfigReadServiceImpl, chartServiceImpl, workflowEventPublishServiceImpl, deploymentConfigServiceImpl) + appDeploymentTypeChangeManagerImpl := pipeline.NewAppDeploymentTypeChangeManagerImpl(sugaredLogger, pipelineRepositoryImpl, appServiceImpl, appStatusRepositoryImpl, helmAppServiceImpl, appArtifactManagerImpl, cdPipelineConfigServiceImpl, gitOpsConfigReadServiceImpl, chartServiceImpl, workflowEventPublishServiceImpl, deploymentConfigServiceImpl, chartReadServiceImpl) devtronAppConfigServiceImpl := pipeline.NewDevtronAppConfigServiceImpl(sugaredLogger, ciCdPipelineOrchestratorImpl, appRepositoryImpl, pipelineRepositoryImpl, resourceGroupServiceImpl, enforcerUtilImpl, ciMaterialConfigServiceImpl) pipelineBuilderImpl := pipeline.NewPipelineBuilderImpl(sugaredLogger, gitMaterialReadServiceImpl, chartRepositoryImpl, ciPipelineConfigServiceImpl, ciMaterialConfigServiceImpl, appArtifactManagerImpl, devtronAppCMCSServiceImpl, devtronAppStrategyServiceImpl, appDeploymentTypeChangeManagerImpl, cdPipelineConfigServiceImpl, devtronAppConfigServiceImpl) deploymentTemplateValidationServiceImpl := deploymentTemplate.NewDeploymentTemplateValidationServiceImpl(sugaredLogger, chartRefServiceImpl, scopedVariableManagerImpl) - installedAppDBExtendedServiceImpl := FullMode.NewInstalledAppDBExtendedServiceImpl(installedAppDBServiceImpl, appStatusServiceImpl, gitOpsConfigReadServiceImpl) - gitOpsValidationServiceImpl := validation.NewGitOpsValidationServiceImpl(sugaredLogger, gitFactory, gitOperationServiceImpl, gitOpsConfigReadServiceImpl, chartTemplateServiceImpl, chartServiceImpl, installedAppDBExtendedServiceImpl) - devtronAppGitOpConfigServiceImpl := gitOpsConfig.NewDevtronAppGitOpConfigServiceImpl(sugaredLogger, chartRepositoryImpl, chartServiceImpl, gitOpsConfigReadServiceImpl, gitOpsValidationServiceImpl, argoClientWrapperServiceImpl, deploymentConfigServiceImpl) + devtronAppGitOpConfigServiceImpl := gitOpsConfig.NewDevtronAppGitOpConfigServiceImpl(sugaredLogger, chartRepositoryImpl, chartServiceImpl, gitOpsConfigReadServiceImpl, gitOpsValidationServiceImpl, argoClientWrapperServiceImpl, deploymentConfigServiceImpl, chartReadServiceImpl) cdHandlerImpl := pipeline.NewCdHandlerImpl(sugaredLogger, userServiceImpl, cdWorkflowRepositoryImpl, ciLogServiceImpl, ciArtifactRepositoryImpl, ciPipelineMaterialRepositoryImpl, pipelineRepositoryImpl, environmentRepositoryImpl, ciWorkflowRepositoryImpl, enforcerUtilImpl, resourceGroupServiceImpl, imageTaggingServiceImpl, k8sServiceImpl, workflowServiceImpl, clusterServiceImplExtended, blobStorageConfigServiceImpl, customTagServiceImpl, deploymentConfigServiceImpl, workFlowStageStatusServiceImpl, cdWorkflowRunnerServiceImpl) appWorkflowServiceImpl := appWorkflow2.NewAppWorkflowServiceImpl(sugaredLogger, appWorkflowRepositoryImpl, ciCdPipelineOrchestratorImpl, ciPipelineRepositoryImpl, pipelineRepositoryImpl, enforcerUtilImpl, resourceGroupServiceImpl, appRepositoryImpl, userAuthServiceImpl, chartServiceImpl, deploymentConfigServiceImpl) - appCloneServiceImpl := appClone.NewAppCloneServiceImpl(sugaredLogger, pipelineBuilderImpl, attributesServiceImpl, chartServiceImpl, configMapServiceImpl, appWorkflowServiceImpl, appListingServiceImpl, propertiesConfigServiceImpl, pipelineStageServiceImpl, ciTemplateReadServiceImpl, appRepositoryImpl, ciPipelineRepositoryImpl, pipelineRepositoryImpl, ciPipelineConfigServiceImpl, gitOpsConfigReadServiceImpl) + appCloneServiceImpl := appClone.NewAppCloneServiceImpl(sugaredLogger, pipelineBuilderImpl, attributesServiceImpl, chartServiceImpl, configMapServiceImpl, appWorkflowServiceImpl, appListingServiceImpl, propertiesConfigServiceImpl, pipelineStageServiceImpl, ciTemplateReadServiceImpl, appRepositoryImpl, ciPipelineRepositoryImpl, pipelineRepositoryImpl, ciPipelineConfigServiceImpl, gitOpsConfigReadServiceImpl, chartReadServiceImpl) deploymentTemplateRepositoryImpl := repository2.NewDeploymentTemplateRepositoryImpl(db, sugaredLogger) - deploymentTemplateHistoryReadServiceImpl := read7.NewDeploymentTemplateHistoryReadServiceImpl(sugaredLogger, deploymentTemplateHistoryRepositoryImpl, scopedVariableManagerImpl) - generateManifestDeploymentTemplateServiceImpl, err := generateManifest.NewDeploymentTemplateServiceImpl(sugaredLogger, chartServiceImpl, appListingServiceImpl, deploymentTemplateRepositoryImpl, helmAppReadServiceImpl, chartTemplateServiceImpl, helmAppClientImpl, k8sServiceImpl, propertiesConfigServiceImpl, environmentRepositoryImpl, appRepositoryImpl, scopedVariableManagerImpl, chartRefServiceImpl, pipelineOverrideRepositoryImpl, chartRepositoryImpl, pipelineRepositoryImpl, utilMergeUtil, deploymentTemplateHistoryReadServiceImpl) + deploymentTemplateHistoryReadServiceImpl := read8.NewDeploymentTemplateHistoryReadServiceImpl(sugaredLogger, deploymentTemplateHistoryRepositoryImpl, scopedVariableManagerImpl) + generateManifestDeploymentTemplateServiceImpl, err := generateManifest.NewDeploymentTemplateServiceImpl(sugaredLogger, chartServiceImpl, appListingServiceImpl, deploymentTemplateRepositoryImpl, helmAppReadServiceImpl, chartTemplateServiceImpl, helmAppClientImpl, k8sServiceImpl, propertiesConfigServiceImpl, environmentRepositoryImpl, appRepositoryImpl, scopedVariableManagerImpl, chartRefServiceImpl, pipelineOverrideRepositoryImpl, chartRepositoryImpl, pipelineRepositoryImpl, utilMergeUtil, deploymentTemplateHistoryReadServiceImpl, deploymentConfigReadServiceImpl) if err != nil { return nil, err } @@ -726,23 +739,22 @@ func InitializeApp() (*App, error) { imageScanDeployInfoRepositoryImpl := repository24.NewImageScanDeployInfoRepositoryImpl(db, sugaredLogger) imageScanObjectMetaRepositoryImpl := repository24.NewImageScanObjectMetaRepositoryImpl(db, sugaredLogger) imageScanHistoryRepositoryImpl := repository24.NewImageScanHistoryRepositoryImpl(db, sugaredLogger) - imageScanHistoryReadServiceImpl := read13.NewImageScanHistoryReadService(sugaredLogger, imageScanHistoryRepositoryImpl) + imageScanHistoryReadServiceImpl := read18.NewImageScanHistoryReadService(sugaredLogger, imageScanHistoryRepositoryImpl) cveStoreRepositoryImpl := repository24.NewCveStoreRepositoryImpl(db, sugaredLogger) policyServiceImpl := imageScanning.NewPolicyServiceImpl(environmentServiceImpl, sugaredLogger, appRepositoryImpl, pipelineOverrideRepositoryImpl, cvePolicyRepositoryImpl, clusterServiceImplExtended, pipelineRepositoryImpl, imageScanResultRepositoryImpl, imageScanDeployInfoRepositoryImpl, imageScanObjectMetaRepositoryImpl, httpClient, ciArtifactRepositoryImpl, ciCdConfig, imageScanHistoryReadServiceImpl, cveStoreRepositoryImpl, ciTemplateRepositoryImpl, clusterReadServiceImpl, transactionUtilImpl) - imageScanResultReadServiceImpl := read13.NewImageScanResultReadServiceImpl(sugaredLogger, imageScanResultRepositoryImpl) - pipelineConfigRestHandlerImpl := configure.NewPipelineRestHandlerImpl(pipelineBuilderImpl, sugaredLogger, deploymentTemplateValidationServiceImpl, chartServiceImpl, devtronAppGitOpConfigServiceImpl, propertiesConfigServiceImpl, userServiceImpl, teamServiceImpl, enforcerImpl, ciHandlerImpl, validate, clientImpl, ciPipelineRepositoryImpl, pipelineRepositoryImpl, enforcerUtilImpl, dockerRegistryConfigImpl, cdHandlerImpl, appCloneServiceImpl, generateManifestDeploymentTemplateServiceImpl, appWorkflowServiceImpl, gitMaterialReadServiceImpl, policyServiceImpl, imageScanResultReadServiceImpl, ciPipelineMaterialRepositoryImpl, imageTaggingReadServiceImpl, imageTaggingServiceImpl, ciArtifactRepositoryImpl, deployedAppMetricsServiceImpl, chartRefServiceImpl, ciCdPipelineOrchestratorImpl, gitProviderReadServiceImpl, teamReadServiceImpl) + imageScanResultReadServiceImpl := read18.NewImageScanResultReadServiceImpl(sugaredLogger, imageScanResultRepositoryImpl) + pipelineConfigRestHandlerImpl := configure.NewPipelineRestHandlerImpl(pipelineBuilderImpl, sugaredLogger, deploymentTemplateValidationServiceImpl, chartServiceImpl, devtronAppGitOpConfigServiceImpl, propertiesConfigServiceImpl, userServiceImpl, teamServiceImpl, enforcerImpl, ciHandlerImpl, validate, clientImpl, ciPipelineRepositoryImpl, pipelineRepositoryImpl, enforcerUtilImpl, dockerRegistryConfigImpl, cdHandlerImpl, appCloneServiceImpl, generateManifestDeploymentTemplateServiceImpl, appWorkflowServiceImpl, gitMaterialReadServiceImpl, policyServiceImpl, imageScanResultReadServiceImpl, ciPipelineMaterialRepositoryImpl, imageTaggingReadServiceImpl, imageTaggingServiceImpl, ciArtifactRepositoryImpl, deployedAppMetricsServiceImpl, chartRefServiceImpl, ciCdPipelineOrchestratorImpl, gitProviderReadServiceImpl, teamReadServiceImpl, environmentRepositoryImpl, chartReadServiceImpl) gitOpsManifestPushServiceImpl := publish.NewGitOpsManifestPushServiceImpl(sugaredLogger, pipelineStatusTimelineServiceImpl, pipelineOverrideRepositoryImpl, acdConfig, chartRefServiceImpl, gitOpsConfigReadServiceImpl, chartServiceImpl, gitOperationServiceImpl, argoClientWrapperServiceImpl, transactionUtilImpl, deploymentConfigServiceImpl, chartTemplateServiceImpl) - argoK8sClientImpl := argocdServer.NewArgoK8sClientImpl(sugaredLogger, k8sServiceImpl) manifestCreationServiceImpl := manifest.NewManifestCreationServiceImpl(sugaredLogger, dockerRegistryIpsConfigServiceImpl, chartRefServiceImpl, scopedVariableCMCSManagerImpl, k8sCommonServiceImpl, deployedAppMetricsServiceImpl, imageDigestPolicyServiceImpl, utilMergeUtil, appCrudOperationServiceImpl, deploymentTemplateServiceImpl, argoClientWrapperServiceImpl, configMapHistoryRepositoryImpl, configMapRepositoryImpl, chartRepositoryImpl, envConfigOverrideRepositoryImpl, environmentRepositoryImpl, pipelineRepositoryImpl, ciArtifactRepositoryImpl, pipelineOverrideRepositoryImpl, pipelineStrategyHistoryRepositoryImpl, pipelineConfigRepositoryImpl, deploymentTemplateHistoryRepositoryImpl, deploymentConfigServiceImpl, envConfigOverrideReadServiceImpl) - configMapHistoryReadServiceImpl := read14.NewConfigMapHistoryReadService(sugaredLogger, configMapHistoryRepositoryImpl, scopedVariableCMCSManagerImpl) + configMapHistoryReadServiceImpl := read19.NewConfigMapHistoryReadService(sugaredLogger, configMapHistoryRepositoryImpl, scopedVariableCMCSManagerImpl) deployedConfigurationHistoryServiceImpl := history.NewDeployedConfigurationHistoryServiceImpl(sugaredLogger, userServiceImpl, deploymentTemplateHistoryServiceImpl, pipelineStrategyHistoryServiceImpl, configMapHistoryServiceImpl, cdWorkflowRepositoryImpl, scopedVariableCMCSManagerImpl, deploymentTemplateHistoryReadServiceImpl, configMapHistoryReadServiceImpl) userDeploymentRequestRepositoryImpl := repository25.NewUserDeploymentRequestRepositoryImpl(db, transactionUtilImpl) userDeploymentRequestServiceImpl := service3.NewUserDeploymentRequestServiceImpl(sugaredLogger, userDeploymentRequestRepositoryImpl) - imageScanDeployInfoReadServiceImpl := read13.NewImageScanDeployInfoReadService(sugaredLogger, imageScanDeployInfoRepositoryImpl) + imageScanDeployInfoReadServiceImpl := read18.NewImageScanDeployInfoReadService(sugaredLogger, imageScanDeployInfoRepositoryImpl) imageScanDeployInfoServiceImpl := imageScanning.NewImageScanDeployInfoService(sugaredLogger, imageScanDeployInfoRepositoryImpl) manifestPushConfigRepositoryImpl := repository18.NewManifestPushConfigRepository(sugaredLogger, db) scanToolExecutionHistoryMappingRepositoryImpl := repository24.NewScanToolExecutionHistoryMappingRepositoryImpl(db, sugaredLogger) - cdWorkflowReadServiceImpl := read15.NewCdWorkflowReadServiceImpl(sugaredLogger, cdWorkflowRepositoryImpl) + cdWorkflowReadServiceImpl := read20.NewCdWorkflowReadServiceImpl(sugaredLogger, cdWorkflowRepositoryImpl) imageScanServiceImpl := imageScanning.NewImageScanServiceImpl(sugaredLogger, imageScanHistoryRepositoryImpl, imageScanResultRepositoryImpl, imageScanObjectMetaRepositoryImpl, cveStoreRepositoryImpl, imageScanDeployInfoRepositoryImpl, userServiceImpl, appRepositoryImpl, environmentServiceImpl, ciArtifactRepositoryImpl, policyServiceImpl, pipelineRepositoryImpl, ciPipelineRepositoryImpl, scanToolMetadataRepositoryImpl, scanToolExecutionHistoryMappingRepositoryImpl, cvePolicyRepositoryImpl, cdWorkflowReadServiceImpl) triggerServiceImpl, err := devtronApps.NewTriggerServiceImpl(sugaredLogger, cdWorkflowCommonServiceImpl, gitOpsManifestPushServiceImpl, gitOpsConfigReadServiceImpl, argoK8sClientImpl, acdConfig, argoClientWrapperServiceImpl, pipelineStatusTimelineServiceImpl, chartTemplateServiceImpl, workflowEventPublishServiceImpl, manifestCreationServiceImpl, deployedConfigurationHistoryServiceImpl, pipelineStageServiceImpl, globalPluginServiceImpl, customTagServiceImpl, pluginInputVariableParserImpl, prePostCdScriptHistoryServiceImpl, scopedVariableCMCSManagerImpl, workflowServiceImpl, imageDigestPolicyServiceImpl, userServiceImpl, clientImpl, helmAppServiceImpl, enforcerUtilImpl, userDeploymentRequestServiceImpl, helmAppClientImpl, eventSimpleFactoryImpl, eventRESTClientImpl, environmentVariables, appRepositoryImpl, ciPipelineMaterialRepositoryImpl, imageScanHistoryReadServiceImpl, imageScanDeployInfoReadServiceImpl, imageScanDeployInfoServiceImpl, pipelineRepositoryImpl, pipelineOverrideRepositoryImpl, manifestPushConfigRepositoryImpl, chartRepositoryImpl, environmentRepositoryImpl, cdWorkflowRepositoryImpl, ciWorkflowRepositoryImpl, ciArtifactRepositoryImpl, ciTemplateReadServiceImpl, gitMaterialReadServiceImpl, appLabelRepositoryImpl, ciPipelineRepositoryImpl, appWorkflowRepositoryImpl, dockerArtifactStoreRepositoryImpl, imageScanServiceImpl, k8sServiceImpl, transactionUtilImpl, deploymentConfigServiceImpl, ciCdPipelineOrchestratorImpl, gitOperationServiceImpl, attributesServiceImpl, clusterRepositoryImpl, cdWorkflowRunnerServiceImpl) if err != nil { @@ -767,7 +779,7 @@ func InitializeApp() (*App, error) { gitProviderRouterImpl := router.NewGitProviderRouterImpl(gitProviderRestHandlerImpl) gitHostRepositoryImpl := repository26.NewGitHostRepositoryImpl(db) gitHostConfigImpl := gitHost.NewGitHostConfigImpl(gitHostRepositoryImpl, sugaredLogger) - gitHostReadServiceImpl := read16.NewGitHostReadServiceImpl(sugaredLogger, gitHostRepositoryImpl, attributesServiceImpl) + gitHostReadServiceImpl := read21.NewGitHostReadServiceImpl(sugaredLogger, gitHostRepositoryImpl, attributesServiceImpl) gitHostRestHandlerImpl := restHandler.NewGitHostRestHandlerImpl(sugaredLogger, gitHostConfigImpl, userServiceImpl, validate, enforcerImpl, clientImpl, gitProviderReadServiceImpl, gitHostReadServiceImpl) gitHostRouterImpl := router.NewGitHostRouterImpl(gitHostRestHandlerImpl) chartProviderServiceImpl := chartProvider.NewChartProviderServiceImpl(sugaredLogger, chartRepoRepositoryImpl, chartRepositoryServiceImpl, dockerArtifactStoreRepositoryImpl, ociRegistryConfigRepositoryImpl) @@ -805,7 +817,8 @@ func InitializeApp() (*App, error) { if err != nil { return nil, err } - argoApplicationServiceExtendedImpl := argoApplication.NewArgoApplicationServiceExtendedServiceImpl(sugaredLogger, clusterRepositoryImpl, k8sServiceImpl, helmAppClientImpl, helmAppServiceImpl, k8sApplicationServiceImpl, argoApplicationConfigServiceImpl, argoClientWrapperServiceImpl) + argoApplicationServiceImpl := argoApplication.NewArgoApplicationServiceImpl(sugaredLogger, clusterRepositoryImpl, k8sServiceImpl, helmAppClientImpl, helmAppServiceImpl, k8sApplicationServiceImpl, argoApplicationConfigServiceImpl, deploymentConfigServiceImpl) + argoApplicationServiceExtendedImpl := argoApplication.NewArgoApplicationServiceExtendedServiceImpl(argoApplicationServiceImpl, argoClientWrapperServiceImpl) installedAppResourceServiceImpl := resource.NewInstalledAppResourceServiceImpl(sugaredLogger, installedAppRepositoryImpl, appStoreApplicationVersionRepositoryImpl, argoClientWrapperServiceImpl, acdAuthConfig, installedAppVersionHistoryRepositoryImpl, helmAppServiceImpl, helmAppReadServiceImpl, appStatusServiceImpl, k8sCommonServiceImpl, k8sApplicationServiceImpl, k8sServiceImpl, deploymentConfigServiceImpl, ociRegistryConfigRepositoryImpl, argoApplicationServiceExtendedImpl) chartGroupEntriesRepositoryImpl := repository28.NewChartGroupEntriesRepositoryImpl(db, sugaredLogger) chartGroupReposotoryImpl := repository28.NewChartGroupReposotoryImpl(db, sugaredLogger) @@ -875,8 +888,8 @@ func InitializeApp() (*App, error) { imageScanRouterImpl := router.NewImageScanRouterImpl(imageScanRestHandlerImpl) policyRestHandlerImpl := restHandler.NewPolicyRestHandlerImpl(sugaredLogger, policyServiceImpl, userServiceImpl, userAuthServiceImpl, enforcerImpl, enforcerUtilImpl, environmentServiceImpl) policyRouterImpl := router.NewPolicyRouterImpl(policyRestHandlerImpl) - gitOpsConfigServiceImpl := gitops.NewGitOpsConfigServiceImpl(sugaredLogger, gitOpsConfigRepositoryImpl, k8sServiceImpl, acdAuthConfig, clusterServiceImplExtended, gitOperationServiceImpl, gitOpsConfigReadServiceImpl, gitOpsValidationServiceImpl, certificateServiceClientImpl, repositoryServiceClientImpl, environmentVariables, argoCDConnectionManagerImpl, argoCDConfigGetterImpl, argoClientWrapperServiceImpl, clusterReadServiceImpl) - gitOpsConfigRestHandlerImpl := restHandler.NewGitOpsConfigRestHandlerImpl(sugaredLogger, gitOpsConfigServiceImpl, userServiceImpl, validate, enforcerImpl, teamServiceImpl) + gitOpsConfigServiceImpl := gitops.NewGitOpsConfigServiceImpl(sugaredLogger, gitOpsConfigRepositoryImpl, k8sServiceImpl, acdAuthConfig, clusterServiceImplExtended, gitOperationServiceImpl, gitOpsConfigReadServiceImpl, gitOpsValidationServiceImpl, certificateServiceClientImpl, repositoryServiceClientImpl, environmentVariables, argoCDConnectionManagerImpl, argoCDConfigGetterImpl, argoClientWrapperServiceImpl, clusterReadServiceImpl, moduleReadServiceImpl) + gitOpsConfigRestHandlerImpl := restHandler.NewGitOpsConfigRestHandlerImpl(sugaredLogger, moduleReadServiceImpl, gitOpsConfigServiceImpl, userServiceImpl, validate, enforcerImpl, teamServiceImpl) gitOpsConfigRouterImpl := router.NewGitOpsConfigRouterImpl(gitOpsConfigRestHandlerImpl) dashboardConfig, err := dashboard.GetConfig() if err != nil { @@ -892,8 +905,8 @@ func InitializeApp() (*App, error) { userAttributesServiceImpl := attributes.NewUserAttributesServiceImpl(sugaredLogger, userAttributesRepositoryImpl) userAttributesRestHandlerImpl := restHandler.NewUserAttributesRestHandlerImpl(sugaredLogger, enforcerImpl, userServiceImpl, userAttributesServiceImpl) userAttributesRouterImpl := router.NewUserAttributesRouterImpl(userAttributesRestHandlerImpl) - commonRestHanlderImpl := restHandler.NewCommonRestHanlderImpl(sugaredLogger, userServiceImpl, commonServiceImpl) - commonRouterImpl := router.NewCommonRouterImpl(commonRestHanlderImpl) + commonRestHandlerImpl := restHandler.NewCommonRestHandlerImpl(sugaredLogger, userServiceImpl, commonServiceImpl) + commonRouterImpl := router.NewCommonRouterImpl(commonRestHandlerImpl) grafanaConfig, err := grafana.GetConfig() if err != nil { return nil, err @@ -930,7 +943,7 @@ func InitializeApp() (*App, error) { webhookListenerRouterImpl := router.NewWebhookListenerRouterImpl(webhookEventHandlerImpl) appFilteringRestHandlerImpl := appList.NewAppFilteringRestHandlerImpl(sugaredLogger, teamServiceImpl, enforcerImpl, userServiceImpl, clusterServiceImplExtended, environmentServiceImpl, teamReadServiceImpl) appFilteringRouterImpl := appList2.NewAppFilteringRouterImpl(appFilteringRestHandlerImpl) - serviceImpl := resourceTree.NewServiceImpl(sugaredLogger, appListingServiceImpl, appStatusServiceImpl, argoApplicationServiceExtendedImpl, cdApplicationStatusUpdateHandlerImpl, helmAppReadServiceImpl, helmAppServiceImpl, k8sApplicationServiceImpl, k8sCommonServiceImpl) + serviceImpl := resourceTree.NewServiceImpl(sugaredLogger, appListingServiceImpl, appStatusServiceImpl, argoApplicationServiceExtendedImpl, cdApplicationStatusUpdateHandlerImpl, helmAppReadServiceImpl, helmAppServiceImpl, k8sApplicationServiceImpl, k8sCommonServiceImpl, environmentReadServiceImpl) appListingRestHandlerImpl := appList.NewAppListingRestHandlerImpl(appListingServiceImpl, enforcerImpl, pipelineBuilderImpl, sugaredLogger, enforcerUtilImpl, deploymentGroupServiceImpl, userServiceImpl, k8sCommonServiceImpl, installedAppDBExtendedServiceImpl, installedAppResourceServiceImpl, pipelineRepositoryImpl, k8sApplicationServiceImpl, deploymentConfigServiceImpl, serviceImpl) appListingRouterImpl := appList2.NewAppListingRouterImpl(appListingRestHandlerImpl) appInfoRestHandlerImpl := appInfo.NewAppInfoRestHandlerImpl(sugaredLogger, appCrudOperationServiceImpl, userServiceImpl, validate, enforcerUtilImpl, enforcerImpl, helmAppServiceImpl, enforcerUtilHelmImpl, genericNoteServiceImpl) @@ -952,11 +965,11 @@ func InitializeApp() (*App, error) { devtronAppAutoCompleteRestHandlerImpl := pipeline3.NewDevtronAppAutoCompleteRestHandlerImpl(sugaredLogger, userServiceImpl, teamServiceImpl, enforcerImpl, enforcerUtilImpl, devtronAppConfigServiceImpl, environmentServiceImpl, dockerRegistryConfigImpl, gitProviderReadServiceImpl) devtronAppAutoCompleteRouterImpl := pipeline4.NewDevtronAppAutoCompleteRouterImpl(devtronAppAutoCompleteRestHandlerImpl) appRouterImpl := app3.NewAppRouterImpl(appFilteringRouterImpl, appListingRouterImpl, appInfoRouterImpl, pipelineTriggerRouterImpl, pipelineConfigRouterImpl, pipelineHistoryRouterImpl, pipelineStatusRouterImpl, appWorkflowRouterImpl, devtronAppAutoCompleteRouterImpl, appWorkflowRestHandlerImpl, appListingRestHandlerImpl, appFilteringRestHandlerImpl) - coreAppRestHandlerImpl := restHandler.NewCoreAppRestHandlerImpl(sugaredLogger, userServiceImpl, validate, enforcerUtilImpl, enforcerImpl, appCrudOperationServiceImpl, pipelineBuilderImpl, gitRegistryConfigImpl, chartServiceImpl, configMapServiceImpl, appListingServiceImpl, propertiesConfigServiceImpl, appWorkflowServiceImpl, appWorkflowRepositoryImpl, environmentRepositoryImpl, configMapRepositoryImpl, chartRepositoryImpl, teamServiceImpl, pipelineStageServiceImpl, ciPipelineRepositoryImpl, gitProviderReadServiceImpl, gitMaterialReadServiceImpl, teamReadServiceImpl) + coreAppRestHandlerImpl := restHandler.NewCoreAppRestHandlerImpl(sugaredLogger, userServiceImpl, validate, enforcerUtilImpl, enforcerImpl, appCrudOperationServiceImpl, pipelineBuilderImpl, gitRegistryConfigImpl, chartServiceImpl, configMapServiceImpl, appListingServiceImpl, propertiesConfigServiceImpl, appWorkflowServiceImpl, appWorkflowRepositoryImpl, environmentRepositoryImpl, configMapRepositoryImpl, chartRepositoryImpl, teamServiceImpl, pipelineStageServiceImpl, ciPipelineRepositoryImpl, gitProviderReadServiceImpl, gitMaterialReadServiceImpl, teamReadServiceImpl, chartReadServiceImpl) coreAppRouterImpl := router.NewCoreAppRouterImpl(coreAppRestHandlerImpl) helmAppRestHandlerImpl := client3.NewHelmAppRestHandlerImpl(sugaredLogger, helmAppServiceImpl, enforcerImpl, clusterServiceImplExtended, enforcerUtilHelmImpl, appStoreDeploymentServiceImpl, installedAppDBServiceImpl, userServiceImpl, attributesServiceImpl, serverEnvConfigServerEnvConfig, fluxApplicationServiceImpl, argoApplicationServiceExtendedImpl) helmAppRouterImpl := client3.NewHelmAppRouterImpl(helmAppRestHandlerImpl) - argoApplicationReadServiceImpl := read17.NewArgoApplicationReadServiceImpl(sugaredLogger, clusterRepositoryImpl, k8sServiceImpl, helmAppClientImpl, helmAppServiceImpl) + argoApplicationReadServiceImpl := read22.NewArgoApplicationReadServiceImpl(sugaredLogger, clusterRepositoryImpl, k8sServiceImpl, helmAppClientImpl, helmAppServiceImpl) k8sApplicationRestHandlerImpl := application3.NewK8sApplicationRestHandlerImpl(sugaredLogger, k8sApplicationServiceImpl, pumpImpl, terminalSessionHandlerImpl, enforcerImpl, enforcerUtilHelmImpl, enforcerUtilImpl, helmAppServiceImpl, userServiceImpl, k8sCommonServiceImpl, validate, environmentVariables, fluxApplicationServiceImpl, argoApplicationReadServiceImpl) k8sApplicationRouterImpl := application3.NewK8sApplicationRouterImpl(k8sApplicationRestHandlerImpl) pProfRestHandlerImpl := restHandler.NewPProfRestHandler(userServiceImpl, enforcerImpl) @@ -1034,7 +1047,7 @@ func InitializeApp() (*App, error) { if err != nil { return nil, err } - deploymentConfigurationServiceImpl, err := configDiff.NewDeploymentConfigurationServiceImpl(sugaredLogger, configMapServiceImpl, appRepositoryImpl, environmentRepositoryImpl, chartServiceImpl, generateManifestDeploymentTemplateServiceImpl, deploymentTemplateHistoryRepositoryImpl, pipelineStrategyHistoryRepositoryImpl, configMapHistoryRepositoryImpl, scopedVariableCMCSManagerImpl, configMapRepositoryImpl, pipelineDeploymentConfigServiceImpl, chartRefServiceImpl, pipelineRepositoryImpl, configMapHistoryServiceImpl, deploymentTemplateHistoryReadServiceImpl, configMapHistoryReadServiceImpl, cdWorkflowRepositoryImpl, envConfigOverrideReadServiceImpl, chartTemplateServiceImpl, helmAppClientImpl, helmAppServiceImpl, k8sServiceImpl, mergeUtil, helmAppReadServiceImpl) + deploymentConfigurationServiceImpl, err := configDiff.NewDeploymentConfigurationServiceImpl(sugaredLogger, configMapServiceImpl, appRepositoryImpl, environmentRepositoryImpl, chartServiceImpl, generateManifestDeploymentTemplateServiceImpl, deploymentTemplateHistoryRepositoryImpl, pipelineStrategyHistoryRepositoryImpl, configMapHistoryRepositoryImpl, scopedVariableCMCSManagerImpl, configMapRepositoryImpl, pipelineDeploymentConfigServiceImpl, chartRefServiceImpl, pipelineRepositoryImpl, configMapHistoryServiceImpl, deploymentTemplateHistoryReadServiceImpl, configMapHistoryReadServiceImpl, cdWorkflowRepositoryImpl, envConfigOverrideReadServiceImpl, chartTemplateServiceImpl, helmAppClientImpl, helmAppServiceImpl, k8sServiceImpl, mergeUtil, helmAppReadServiceImpl, chartReadServiceImpl) if err != nil { return nil, err } @@ -1056,7 +1069,7 @@ func InitializeApp() (*App, error) { muxRouter := router.NewMuxRouter(sugaredLogger, environmentRouterImpl, clusterRouterImpl, webhookRouterImpl, userAuthRouterImpl, gitProviderRouterImpl, gitHostRouterImpl, dockerRegRouterImpl, notificationRouterImpl, teamRouterImpl, userRouterImpl, chartRefRouterImpl, configMapRouterImpl, appStoreRouterImpl, chartRepositoryRouterImpl, releaseMetricsRouterImpl, deploymentGroupRouterImpl, batchOperationRouterImpl, chartGroupRouterImpl, imageScanRouterImpl, policyRouterImpl, gitOpsConfigRouterImpl, dashboardRouterImpl, attributesRouterImpl, userAttributesRouterImpl, commonRouterImpl, grafanaRouterImpl, ssoLoginRouterImpl, telemetryRouterImpl, telemetryEventClientImplExtended, bulkUpdateRouterImpl, webhookListenerRouterImpl, appRouterImpl, coreAppRouterImpl, helmAppRouterImpl, k8sApplicationRouterImpl, pProfRouterImpl, deploymentConfigRouterImpl, dashboardTelemetryRouterImpl, commonDeploymentRouterImpl, externalLinkRouterImpl, globalPluginRouterImpl, moduleRouterImpl, serverRouterImpl, apiTokenRouterImpl, cdApplicationStatusUpdateHandlerImpl, k8sCapacityRouterImpl, webhookHelmRouterImpl, globalCMCSRouterImpl, userTerminalAccessRouterImpl, jobRouterImpl, ciStatusUpdateCronImpl, resourceGroupingRouterImpl, rbacRoleRouterImpl, scopedVariableRouterImpl, ciTriggerCronImpl, proxyRouterImpl, deploymentConfigurationRouterImpl, infraConfigRouterImpl, argoApplicationRouterImpl, devtronResourceRouterImpl, fluxApplicationRouterImpl, scanningResultRouterImpl) loggingMiddlewareImpl := util4.NewLoggingMiddlewareImpl(userServiceImpl) cdWorkflowServiceImpl := cd.NewCdWorkflowServiceImpl(sugaredLogger, cdWorkflowRepositoryImpl) - cdWorkflowRunnerReadServiceImpl := read15.NewCdWorkflowRunnerReadServiceImpl(sugaredLogger, cdWorkflowRepositoryImpl) + cdWorkflowRunnerReadServiceImpl := read20.NewCdWorkflowRunnerReadServiceImpl(sugaredLogger, cdWorkflowRepositoryImpl) webhookServiceImpl := pipeline.NewWebhookServiceImpl(ciArtifactRepositoryImpl, sugaredLogger, ciPipelineRepositoryImpl, ciWorkflowRepositoryImpl, cdWorkflowCommonServiceImpl, workFlowStageStatusServiceImpl, ciServiceImpl) workflowEventProcessorImpl, err := in.NewWorkflowEventProcessorImpl(sugaredLogger, pubSubClientServiceImpl, cdWorkflowServiceImpl, cdWorkflowReadServiceImpl, cdWorkflowRunnerServiceImpl, cdWorkflowRunnerReadServiceImpl, workflowDagExecutorImpl, ciHandlerImpl, cdHandlerImpl, eventSimpleFactoryImpl, eventRESTClientImpl, triggerServiceImpl, deployedAppServiceImpl, webhookServiceImpl, validate, environmentVariables, cdWorkflowCommonServiceImpl, cdPipelineConfigServiceImpl, userDeploymentRequestServiceImpl, pipelineRepositoryImpl, ciArtifactRepositoryImpl, cdWorkflowRepositoryImpl, deploymentConfigServiceImpl) if err != nil { @@ -1064,7 +1077,7 @@ func InitializeApp() (*App, error) { } ciPipelineEventProcessorImpl := in.NewCIPipelineEventProcessorImpl(sugaredLogger, pubSubClientServiceImpl, gitWebhookServiceImpl) cdPipelineEventProcessorImpl := in.NewCDPipelineEventProcessorImpl(sugaredLogger, pubSubClientServiceImpl, cdWorkflowCommonServiceImpl, workflowStatusServiceImpl, triggerServiceImpl, pipelineRepositoryImpl, installedAppReadServiceImpl) - deployedApplicationEventProcessorImpl := in.NewDeployedApplicationEventProcessorImpl(sugaredLogger, pubSubClientServiceImpl, appServiceImpl, gitOpsConfigReadServiceImpl, installedAppDBExtendedServiceImpl, workflowDagExecutorImpl, cdWorkflowCommonServiceImpl, pipelineBuilderImpl, appStoreDeploymentServiceImpl, pipelineRepositoryImpl, installedAppReadServiceImpl) + deployedApplicationEventProcessorImpl := in.NewDeployedApplicationEventProcessorImpl(sugaredLogger, pubSubClientServiceImpl, appServiceImpl, gitOpsConfigReadServiceImpl, installedAppDBExtendedServiceImpl, workflowDagExecutorImpl, cdWorkflowCommonServiceImpl, pipelineBuilderImpl, appStoreDeploymentServiceImpl, pipelineRepositoryImpl, installedAppReadServiceImpl, deploymentConfigServiceImpl) appStoreAppsEventProcessorImpl := in.NewAppStoreAppsEventProcessorImpl(sugaredLogger, pubSubClientServiceImpl, chartGroupServiceImpl, installedAppVersionHistoryRepositoryImpl) centralEventProcessor, err := eventProcessor.NewCentralEventProcessor(sugaredLogger, workflowEventProcessorImpl, ciPipelineEventProcessorImpl, cdPipelineEventProcessorImpl, deployedApplicationEventProcessorImpl, appStoreAppsEventProcessorImpl) if err != nil {