From 6c2bc2a916c38657af73592ee6f51eee530221ba Mon Sep 17 00:00:00 2001 From: andrewhibbert Date: Tue, 11 Mar 2025 18:49:55 +0000 Subject: [PATCH 01/10] feat: Add support to enable backoff when rate limited by the Github API Signed-off-by: andrewhibbert --- pkg/scalers/github_runner_scaler.go | 69 ++++++++++++++++++++++++++--- 1 file changed, 63 insertions(+), 6 deletions(-) diff --git a/pkg/scalers/github_runner_scaler.go b/pkg/scalers/github_runner_scaler.go index ec748eddbae..6f64a562dd5 100644 --- a/pkg/scalers/github_runner_scaler.go +++ b/pkg/scalers/github_runner_scaler.go @@ -38,6 +38,7 @@ type githubRunnerScaler struct { previousRepos []string previousWfrs map[string]map[string]*WorkflowRuns previousJobs map[string][]Job + rateLimits RateLimits } type githubRunnerMetadata struct { @@ -49,6 +50,7 @@ type githubRunnerMetadata struct { labels []string noDefaultLabels bool enableEtags bool + enableBackoff bool targetWorkflowQueueLength int64 triggerIndex int applicationID *int64 @@ -331,6 +333,12 @@ type Job struct { HeadBranch string `json:"head_branch"` } +type RateLimits struct { + Remaining int `json:"remaining"` + ResetTime time.Time `json:"resetTime"` + RetryAfterTime time.Time `json:"retryAfterTime"` +} + // NewGitHubRunnerScaler creates a new GitHub Runner Scaler func NewGitHubRunnerScaler(config *scalersconfig.ScalerConfig) (Scaler, error) { httpClient := kedautil.CreateHTTPClient(config.GlobalHTTPTimeout, false) @@ -359,6 +367,7 @@ func NewGitHubRunnerScaler(config *scalersconfig.ScalerConfig) (Scaler, error) { previousRepos := []string{} previousJobs := make(map[string][]Job) previousWfrs := make(map[string]map[string]*WorkflowRuns) + rateLimits := RateLimits{} return &githubRunnerScaler{ metricType: metricType, @@ -369,6 +378,7 @@ func NewGitHubRunnerScaler(config *scalersconfig.ScalerConfig) (Scaler, error) { previousRepos: previousRepos, previousJobs: previousJobs, previousWfrs: previousWfrs, + rateLimits: rateLimits, }, nil } @@ -452,6 +462,12 @@ func parseGitHubRunnerMetadata(config *scalersconfig.ScalerConfig) (*githubRunne meta.enableEtags = false } + if val, err := getBoolValueFromMetaOrEnv("enableBackoff", config.TriggerMetadata, config.ResolvedEnv); err == nil { + meta.enableBackoff = val + } else { + meta.enableBackoff = false + } + if val, err := getValueFromMetaOrEnv("repos", config.TriggerMetadata, config.ResolvedEnv); err == nil && val != "" { meta.repos = strings.Split(val, ",") } @@ -579,7 +595,38 @@ func (s *githubRunnerScaler) getRepositories(ctx context.Context) ([]string, err return repoList, nil } +func (s *githubRunnerScaler) getRateLimits(header http.Header) RateLimits { + retryAfterTime := time.Time{} + + remaining, _ := strconv.Atoi(header.Get("X-RateLimit-Remaining")) + reset, _ := strconv.ParseInt(header.Get("X-RateLimit-Reset"), 10, 64) + resetTime := time.Unix(reset, 0) + + if header.Get("retry-after") != "" { + retryAfter, _ := strconv.Atoi(header.Get("retry-after")) + retryAfterTime = time.Now().Add(time.Duration(retryAfter) * time.Second) + } + + return RateLimits{ + Remaining: remaining, + ResetTime: resetTime, + RetryAfterTime: retryAfterTime, + } +} + func (s *githubRunnerScaler) getGithubRequest(ctx context.Context, url string, metadata *githubRunnerMetadata, httpClient *http.Client) ([]byte, int, error) { + + if s.metadata.enableBackoff { + if s.rateLimits.Remaining == 0 && !s.rateLimits.ResetTime.IsZero() && time.Now().Before(s.rateLimits.ResetTime) { + return []byte{}, http.StatusForbidden, fmt.Errorf("GitHub API rate limit exceeded, will backoff until reset time %s", s.rateLimits.ResetTime) + } + + if !s.rateLimits.RetryAfterTime.IsZero() && time.Now().Before(s.rateLimits.RetryAfterTime) { + return []byte{}, http.StatusForbidden, fmt.Errorf("GitHub API rate limit exceeded, will backoff until retry after time %s", s.rateLimits.RetryAfterTime) + } + + } + req, err := http.NewRequestWithContext(ctx, "GET", url, nil) if err != nil { return []byte{}, -1, err @@ -609,19 +656,29 @@ func (s *githubRunnerScaler) getGithubRequest(ctx context.Context, url string, m } _ = r.Body.Close() + var rateLimits RateLimits + if r.Header.Get("X-RateLimit-Remaining") != "" { + rateLimits := s.getRateLimits(r.Header) + s.logger.V(0).Info(fmt.Sprintf("GitHub API rate limits: remaining %d, reset at %s, retry after %s", rateLimits.Remaining, rateLimits.ResetTime, rateLimits.RetryAfterTime)) + + if s.metadata.enableBackoff { + s.rateLimits = rateLimits + } + } + if r.StatusCode != 200 { + if r.StatusCode == 304 && s.metadata.enableEtags { s.logger.V(1).Info(fmt.Sprintf("The github rest api for the url: %s returned status %d %s", url, r.StatusCode, http.StatusText(r.StatusCode))) return []byte{}, r.StatusCode, nil } - if r.Header.Get("X-RateLimit-Remaining") != "" { - githubAPIRemaining, _ := strconv.Atoi(r.Header.Get("X-RateLimit-Remaining")) + if rateLimits.Remaining == 0 && !rateLimits.ResetTime.IsZero() { + return []byte{}, r.StatusCode, fmt.Errorf("GitHub API rate limit exceeded, reset time %s", rateLimits.ResetTime) + } - if githubAPIRemaining == 0 { - resetTime, _ := strconv.ParseInt(r.Header.Get("X-RateLimit-Reset"), 10, 64) - return []byte{}, r.StatusCode, fmt.Errorf("GitHub API rate limit exceeded, resets at %s", time.Unix(resetTime, 0)) - } + if time.Now().Before(rateLimits.RetryAfterTime) && !rateLimits.RetryAfterTime.IsZero() { + return []byte{}, r.StatusCode, fmt.Errorf("GitHub API rate limit exceeded, retry after %s", rateLimits.RetryAfterTime) } return []byte{}, r.StatusCode, fmt.Errorf("the GitHub REST API returned error. url: %s status: %d response: %s", url, r.StatusCode, string(b)) From dbf45833854e0594bc666bf49221ddbf5a1bebf6 Mon Sep 17 00:00:00 2001 From: andrewhibbert Date: Mon, 17 Mar 2025 19:59:37 +0000 Subject: [PATCH 02/10] feat: Add support to enable backoff when rate limited by the Github API Signed-off-by: andrewhibbert --- pkg/scalers/github_runner_scaler.go | 42 ++++++++++++++++++++--------- 1 file changed, 29 insertions(+), 13 deletions(-) diff --git a/pkg/scalers/github_runner_scaler.go b/pkg/scalers/github_runner_scaler.go index 6f64a562dd5..22a54c55cd9 100644 --- a/pkg/scalers/github_runner_scaler.go +++ b/pkg/scalers/github_runner_scaler.go @@ -615,18 +615,6 @@ func (s *githubRunnerScaler) getRateLimits(header http.Header) RateLimits { } func (s *githubRunnerScaler) getGithubRequest(ctx context.Context, url string, metadata *githubRunnerMetadata, httpClient *http.Client) ([]byte, int, error) { - - if s.metadata.enableBackoff { - if s.rateLimits.Remaining == 0 && !s.rateLimits.ResetTime.IsZero() && time.Now().Before(s.rateLimits.ResetTime) { - return []byte{}, http.StatusForbidden, fmt.Errorf("GitHub API rate limit exceeded, will backoff until reset time %s", s.rateLimits.ResetTime) - } - - if !s.rateLimits.RetryAfterTime.IsZero() && time.Now().Before(s.rateLimits.RetryAfterTime) { - return []byte{}, http.StatusForbidden, fmt.Errorf("GitHub API rate limit exceeded, will backoff until retry after time %s", s.rateLimits.RetryAfterTime) - } - - } - req, err := http.NewRequestWithContext(ctx, "GET", url, nil) if err != nil { return []byte{}, -1, err @@ -838,8 +826,36 @@ func (s *githubRunnerScaler) GetWorkflowQueueLength(ctx context.Context) (int64, } func (s *githubRunnerScaler) GetMetricsAndActivity(ctx context.Context, metricName string) ([]external_metrics.ExternalMetricValue, bool, error) { - queueLen, err := s.GetWorkflowQueueLength(ctx) + if s.metadata.enableBackoff { + if s.rateLimits.Remaining == 0 && !s.rateLimits.ResetTime.IsZero() && time.Now().Before(s.rateLimits.ResetTime) { + reset := time.Until(s.rateLimits.ResetTime) + s.logger.V(0).Info(fmt.Sprintf("Rate limit exceeded, resets at %s, waiting for %s", s.rateLimits.ResetTime, reset)) + + // Use context-aware delay + select { + case <-ctx.Done(): + return nil, false, ctx.Err() // Return if the context is canceled + case <-time.After(reset): + // Wait for reset time, then proceed + } + } + if !s.rateLimits.RetryAfterTime.IsZero() && time.Now().Before(s.rateLimits.RetryAfterTime) { + retry := time.Until(s.rateLimits.RetryAfterTime) + s.logger.V(0).Info(fmt.Sprintf("Rate limit exceeded, retry after %s, waiting for %s", s.rateLimits.ResetTime, retry)) + + // Use context-aware delay + select { + case <-ctx.Done(): + return nil, false, ctx.Err() // Return if the context is canceled + case <-time.After(retry): + // Wait for retry time, then proceed + } + } + + } + + queueLen, err := s.GetWorkflowQueueLength(ctx) if err != nil { s.logger.Error(err, "error getting workflow queue length") return []external_metrics.ExternalMetricValue{}, false, err From 6f3793273bd6bc835d6672aabe7cfb178f770d35 Mon Sep 17 00:00:00 2001 From: andrewhibbert Date: Fri, 21 Mar 2025 11:35:21 +0000 Subject: [PATCH 03/10] feat: Add support to enable backoff when rate limited by the Github API Signed-off-by: andrewhibbert --- pkg/scalers/github_runner_scaler.go | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/pkg/scalers/github_runner_scaler.go b/pkg/scalers/github_runner_scaler.go index 22a54c55cd9..55a78a82f6a 100644 --- a/pkg/scalers/github_runner_scaler.go +++ b/pkg/scalers/github_runner_scaler.go @@ -596,7 +596,7 @@ func (s *githubRunnerScaler) getRepositories(ctx context.Context) ([]string, err } func (s *githubRunnerScaler) getRateLimits(header http.Header) RateLimits { - retryAfterTime := time.Time{} + retryAfterTime := time.Now() remaining, _ := strconv.Atoi(header.Get("X-RateLimit-Remaining")) reset, _ := strconv.ParseInt(header.Get("X-RateLimit-Reset"), 10, 64) @@ -647,7 +647,7 @@ func (s *githubRunnerScaler) getGithubRequest(ctx context.Context, url string, m var rateLimits RateLimits if r.Header.Get("X-RateLimit-Remaining") != "" { rateLimits := s.getRateLimits(r.Header) - s.logger.V(0).Info(fmt.Sprintf("GitHub API rate limits: remaining %d, reset at %s, retry after %s", rateLimits.Remaining, rateLimits.ResetTime, rateLimits.RetryAfterTime)) + s.logger.V(0).Info(fmt.Sprintf("GitHub API rate limits: remaining %d, retry after %s, reset time %s", rateLimits.Remaining, rateLimits.RetryAfterTime, rateLimits.ResetTime)) if s.metadata.enableBackoff { s.rateLimits = rateLimits @@ -665,7 +665,7 @@ func (s *githubRunnerScaler) getGithubRequest(ctx context.Context, url string, m return []byte{}, r.StatusCode, fmt.Errorf("GitHub API rate limit exceeded, reset time %s", rateLimits.ResetTime) } - if time.Now().Before(rateLimits.RetryAfterTime) && !rateLimits.RetryAfterTime.IsZero() { + if !rateLimits.RetryAfterTime.IsZero() && time.Now().Before(rateLimits.RetryAfterTime) { return []byte{}, r.StatusCode, fmt.Errorf("GitHub API rate limit exceeded, retry after %s", rateLimits.RetryAfterTime) } @@ -831,7 +831,6 @@ func (s *githubRunnerScaler) GetMetricsAndActivity(ctx context.Context, metricNa reset := time.Until(s.rateLimits.ResetTime) s.logger.V(0).Info(fmt.Sprintf("Rate limit exceeded, resets at %s, waiting for %s", s.rateLimits.ResetTime, reset)) - // Use context-aware delay select { case <-ctx.Done(): return nil, false, ctx.Err() // Return if the context is canceled @@ -844,7 +843,6 @@ func (s *githubRunnerScaler) GetMetricsAndActivity(ctx context.Context, metricNa retry := time.Until(s.rateLimits.RetryAfterTime) s.logger.V(0).Info(fmt.Sprintf("Rate limit exceeded, retry after %s, waiting for %s", s.rateLimits.ResetTime, retry)) - // Use context-aware delay select { case <-ctx.Done(): return nil, false, ctx.Err() // Return if the context is canceled From f5fc28a970a898aebc8bdbece5eaab7346fdbc45 Mon Sep 17 00:00:00 2001 From: andrewhibbert Date: Fri, 21 Mar 2025 14:14:59 +0000 Subject: [PATCH 04/10] feat: Add support to enable backoff when rate limited by the Github API Signed-off-by: andrewhibbert --- pkg/scalers/github_runner_scaler.go | 67 ++++++++++++------------ pkg/scalers/github_runner_scaler_test.go | 1 + 2 files changed, 35 insertions(+), 33 deletions(-) diff --git a/pkg/scalers/github_runner_scaler.go b/pkg/scalers/github_runner_scaler.go index 55a78a82f6a..03c77d0b2d4 100644 --- a/pkg/scalers/github_runner_scaler.go +++ b/pkg/scalers/github_runner_scaler.go @@ -644,14 +644,9 @@ func (s *githubRunnerScaler) getGithubRequest(ctx context.Context, url string, m } _ = r.Body.Close() - var rateLimits RateLimits if r.Header.Get("X-RateLimit-Remaining") != "" { - rateLimits := s.getRateLimits(r.Header) - s.logger.V(0).Info(fmt.Sprintf("GitHub API rate limits: remaining %d, retry after %s, reset time %s", rateLimits.Remaining, rateLimits.RetryAfterTime, rateLimits.ResetTime)) - - if s.metadata.enableBackoff { - s.rateLimits = rateLimits - } + s.rateLimits = s.getRateLimits(r.Header) + s.logger.V(0).Info(fmt.Sprintf("GitHub API rate limits: remaining %d, retry after %s, reset time %s", s.rateLimits.Remaining, s.rateLimits.RetryAfterTime, s.rateLimits.ResetTime)) } if r.StatusCode != 200 { @@ -661,12 +656,12 @@ func (s *githubRunnerScaler) getGithubRequest(ctx context.Context, url string, m return []byte{}, r.StatusCode, nil } - if rateLimits.Remaining == 0 && !rateLimits.ResetTime.IsZero() { - return []byte{}, r.StatusCode, fmt.Errorf("GitHub API rate limit exceeded, reset time %s", rateLimits.ResetTime) + if s.rateLimits.Remaining == 0 && !s.rateLimits.ResetTime.IsZero() { + return []byte{}, r.StatusCode, fmt.Errorf("GitHub API rate limit exceeded, reset time %s", s.rateLimits.ResetTime) } - if !rateLimits.RetryAfterTime.IsZero() && time.Now().Before(rateLimits.RetryAfterTime) { - return []byte{}, r.StatusCode, fmt.Errorf("GitHub API rate limit exceeded, retry after %s", rateLimits.RetryAfterTime) + if !s.rateLimits.RetryAfterTime.IsZero() && time.Now().Before(s.rateLimits.RetryAfterTime) { + return []byte{}, r.StatusCode, fmt.Errorf("GitHub API rate limit exceeded, retry after %s", s.rateLimits.RetryAfterTime) } return []byte{}, r.StatusCode, fmt.Errorf("the GitHub REST API returned error. url: %s status: %d response: %s", url, r.StatusCode, string(b)) @@ -825,32 +820,38 @@ func (s *githubRunnerScaler) GetWorkflowQueueLength(ctx context.Context) (int64, return queueCount, nil } -func (s *githubRunnerScaler) GetMetricsAndActivity(ctx context.Context, metricName string) ([]external_metrics.ExternalMetricValue, bool, error) { - if s.metadata.enableBackoff { - if s.rateLimits.Remaining == 0 && !s.rateLimits.ResetTime.IsZero() && time.Now().Before(s.rateLimits.ResetTime) { - reset := time.Until(s.rateLimits.ResetTime) - s.logger.V(0).Info(fmt.Sprintf("Rate limit exceeded, resets at %s, waiting for %s", s.rateLimits.ResetTime, reset)) - - select { - case <-ctx.Done(): - return nil, false, ctx.Err() // Return if the context is canceled - case <-time.After(reset): - // Wait for reset time, then proceed - } +// waitForRateLimitReset waits until the rate limit reset time or retry-after time is reached. +func (s *githubRunnerScaler) waitForRateLimitReset(ctx context.Context) error { + if s.rateLimits.Remaining == 0 && !s.rateLimits.ResetTime.IsZero() && time.Now().Before(s.rateLimits.ResetTime) { + reset := time.Until(s.rateLimits.ResetTime) + s.logger.V(0).Info(fmt.Sprintf("Rate limit exceeded, resets at %s, waiting for %s", s.rateLimits.ResetTime, reset)) + + select { + case <-ctx.Done(): + return ctx.Err() // Return if the context is canceled + case <-time.After(reset): } + } - if !s.rateLimits.RetryAfterTime.IsZero() && time.Now().Before(s.rateLimits.RetryAfterTime) { - retry := time.Until(s.rateLimits.RetryAfterTime) - s.logger.V(0).Info(fmt.Sprintf("Rate limit exceeded, retry after %s, waiting for %s", s.rateLimits.ResetTime, retry)) - - select { - case <-ctx.Done(): - return nil, false, ctx.Err() // Return if the context is canceled - case <-time.After(retry): - // Wait for retry time, then proceed - } + if !s.rateLimits.RetryAfterTime.IsZero() && time.Now().Before(s.rateLimits.RetryAfterTime) { + retry := time.Until(s.rateLimits.RetryAfterTime) + s.logger.V(0).Info(fmt.Sprintf("Rate limit exceeded, retry after %s, waiting for %s", s.rateLimits.ResetTime, retry)) + + select { + case <-ctx.Done(): + return ctx.Err() // Return if the context is canceled + case <-time.After(retry): } + } + return nil +} + +func (s *githubRunnerScaler) GetMetricsAndActivity(ctx context.Context, metricName string) ([]external_metrics.ExternalMetricValue, bool, error) { + if s.metadata.enableBackoff { + if err := s.waitForRateLimitReset(ctx); err != nil { + return nil, false, err + } } queueLen, err := s.GetWorkflowQueueLength(ctx) diff --git a/pkg/scalers/github_runner_scaler_test.go b/pkg/scalers/github_runner_scaler_test.go index 40ab5a8988a..30180e92088 100644 --- a/pkg/scalers/github_runner_scaler_test.go +++ b/pkg/scalers/github_runner_scaler_test.go @@ -194,6 +194,7 @@ func apiStubHandlerCustomJob(hasRateLeft bool, exceeds30Repos bool, jobResponse } else { w.Header().Set("X-RateLimit-Remaining", "0") w.WriteHeader(http.StatusForbidden) + return } if strings.HasSuffix(r.URL.String(), "jobs?per_page=100") { // nosemgrep: no-direct-write-to-responsewriter From b25f1febb62b0577cf44446d09a2cb7a0b0e2465 Mon Sep 17 00:00:00 2001 From: andrewhibbert Date: Fri, 21 Mar 2025 18:34:30 +0000 Subject: [PATCH 05/10] feat: Add support to enable backoff when rate limited by the Github API Signed-off-by: andrewhibbert --- pkg/scalers/github_runner_scaler.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/pkg/scalers/github_runner_scaler.go b/pkg/scalers/github_runner_scaler.go index 03c77d0b2d4..0800a7e3483 100644 --- a/pkg/scalers/github_runner_scaler.go +++ b/pkg/scalers/github_runner_scaler.go @@ -820,6 +820,9 @@ func (s *githubRunnerScaler) GetWorkflowQueueLength(ctx context.Context) (int64, return queueCount, nil } +// func (s *githubRunnerScaler) shouldWaitForRateLimit(ctx context.Context) (bool, time.Duration) +// Function should return boolean and how long to wait in time + // waitForRateLimitReset waits until the rate limit reset time or retry-after time is reached. func (s *githubRunnerScaler) waitForRateLimitReset(ctx context.Context) error { if s.rateLimits.Remaining == 0 && !s.rateLimits.ResetTime.IsZero() && time.Now().Before(s.rateLimits.ResetTime) { From 70ae11e927764c4bddf03aa049adda5a363a87f5 Mon Sep 17 00:00:00 2001 From: andrewhibbert Date: Mon, 24 Mar 2025 11:57:35 +0000 Subject: [PATCH 06/10] feat: Add support to enable backoff when rate limited by the Github API Signed-off-by: andrewhibbert --- pkg/scalers/github_runner_scaler.go | 69 +++++++++++------------- pkg/scalers/github_runner_scaler_test.go | 42 +++++++++++++++ 2 files changed, 72 insertions(+), 39 deletions(-) diff --git a/pkg/scalers/github_runner_scaler.go b/pkg/scalers/github_runner_scaler.go index 0800a7e3483..19553ea7a2e 100644 --- a/pkg/scalers/github_runner_scaler.go +++ b/pkg/scalers/github_runner_scaler.go @@ -38,7 +38,7 @@ type githubRunnerScaler struct { previousRepos []string previousWfrs map[string]map[string]*WorkflowRuns previousJobs map[string][]Job - rateLimits RateLimits + rateLimit RateLimit } type githubRunnerMetadata struct { @@ -333,7 +333,7 @@ type Job struct { HeadBranch string `json:"head_branch"` } -type RateLimits struct { +type RateLimit struct { Remaining int `json:"remaining"` ResetTime time.Time `json:"resetTime"` RetryAfterTime time.Time `json:"retryAfterTime"` @@ -367,7 +367,7 @@ func NewGitHubRunnerScaler(config *scalersconfig.ScalerConfig) (Scaler, error) { previousRepos := []string{} previousJobs := make(map[string][]Job) previousWfrs := make(map[string]map[string]*WorkflowRuns) - rateLimits := RateLimits{} + rateLimit := RateLimit{} return &githubRunnerScaler{ metricType: metricType, @@ -378,7 +378,7 @@ func NewGitHubRunnerScaler(config *scalersconfig.ScalerConfig) (Scaler, error) { previousRepos: previousRepos, previousJobs: previousJobs, previousWfrs: previousWfrs, - rateLimits: rateLimits, + rateLimit: rateLimit, }, nil } @@ -595,7 +595,7 @@ func (s *githubRunnerScaler) getRepositories(ctx context.Context) ([]string, err return repoList, nil } -func (s *githubRunnerScaler) getRateLimits(header http.Header) RateLimits { +func (s *githubRunnerScaler) getRateLimit(header http.Header) RateLimit { retryAfterTime := time.Now() remaining, _ := strconv.Atoi(header.Get("X-RateLimit-Remaining")) @@ -607,7 +607,7 @@ func (s *githubRunnerScaler) getRateLimits(header http.Header) RateLimits { retryAfterTime = time.Now().Add(time.Duration(retryAfter) * time.Second) } - return RateLimits{ + return RateLimit{ Remaining: remaining, ResetTime: resetTime, RetryAfterTime: retryAfterTime, @@ -645,23 +645,22 @@ func (s *githubRunnerScaler) getGithubRequest(ctx context.Context, url string, m _ = r.Body.Close() if r.Header.Get("X-RateLimit-Remaining") != "" { - s.rateLimits = s.getRateLimits(r.Header) - s.logger.V(0).Info(fmt.Sprintf("GitHub API rate limits: remaining %d, retry after %s, reset time %s", s.rateLimits.Remaining, s.rateLimits.RetryAfterTime, s.rateLimits.ResetTime)) + s.rateLimit = s.getRateLimit(r.Header) + s.logger.V(0).Info(fmt.Sprintf("GitHub API rate limits: remaining %d, retry after %s, reset time %s", s.rateLimit.Remaining, s.rateLimit.RetryAfterTime, s.rateLimit.ResetTime)) } if r.StatusCode != 200 { - if r.StatusCode == 304 && s.metadata.enableEtags { s.logger.V(1).Info(fmt.Sprintf("The github rest api for the url: %s returned status %d %s", url, r.StatusCode, http.StatusText(r.StatusCode))) return []byte{}, r.StatusCode, nil } - if s.rateLimits.Remaining == 0 && !s.rateLimits.ResetTime.IsZero() { - return []byte{}, r.StatusCode, fmt.Errorf("GitHub API rate limit exceeded, reset time %s", s.rateLimits.ResetTime) + if s.rateLimit.Remaining == 0 && !s.rateLimit.ResetTime.IsZero() { + return []byte{}, r.StatusCode, fmt.Errorf("GitHub API rate limit exceeded, reset time %s", s.rateLimit.ResetTime) } - if !s.rateLimits.RetryAfterTime.IsZero() && time.Now().Before(s.rateLimits.RetryAfterTime) { - return []byte{}, r.StatusCode, fmt.Errorf("GitHub API rate limit exceeded, retry after %s", s.rateLimits.RetryAfterTime) + if !s.rateLimit.RetryAfterTime.IsZero() && time.Now().Before(s.rateLimit.RetryAfterTime) { + return []byte{}, r.StatusCode, fmt.Errorf("GitHub API rate limit exceeded, retry after %s", s.rateLimit.RetryAfterTime) } return []byte{}, r.StatusCode, fmt.Errorf("the GitHub REST API returned error. url: %s status: %d response: %s", url, r.StatusCode, string(b)) @@ -820,40 +819,32 @@ func (s *githubRunnerScaler) GetWorkflowQueueLength(ctx context.Context) (int64, return queueCount, nil } -// func (s *githubRunnerScaler) shouldWaitForRateLimit(ctx context.Context) (bool, time.Duration) -// Function should return boolean and how long to wait in time - -// waitForRateLimitReset waits until the rate limit reset time or retry-after time is reached. -func (s *githubRunnerScaler) waitForRateLimitReset(ctx context.Context) error { - if s.rateLimits.Remaining == 0 && !s.rateLimits.ResetTime.IsZero() && time.Now().Before(s.rateLimits.ResetTime) { - reset := time.Until(s.rateLimits.ResetTime) - s.logger.V(0).Info(fmt.Sprintf("Rate limit exceeded, resets at %s, waiting for %s", s.rateLimits.ResetTime, reset)) - - select { - case <-ctx.Done(): - return ctx.Err() // Return if the context is canceled - case <-time.After(reset): - } +func (s *githubRunnerScaler) shouldWaitForRateLimit() (bool, time.Duration) { + if s.rateLimit.Remaining == 0 && !s.rateLimit.ResetTime.IsZero() && time.Now().Before(s.rateLimit.ResetTime) { + reset := time.Until(s.rateLimit.ResetTime) + s.logger.V(0).Info(fmt.Sprintf("Rate limit exceeded, resets at %s, waiting for %s", s.rateLimit.ResetTime, reset)) + return true, reset } - if !s.rateLimits.RetryAfterTime.IsZero() && time.Now().Before(s.rateLimits.RetryAfterTime) { - retry := time.Until(s.rateLimits.RetryAfterTime) - s.logger.V(0).Info(fmt.Sprintf("Rate limit exceeded, retry after %s, waiting for %s", s.rateLimits.ResetTime, retry)) - - select { - case <-ctx.Done(): - return ctx.Err() // Return if the context is canceled - case <-time.After(retry): - } + if !s.rateLimit.RetryAfterTime.IsZero() && time.Now().Before(s.rateLimit.RetryAfterTime) { + retry := time.Until(s.rateLimit.RetryAfterTime) + s.logger.V(0).Info(fmt.Sprintf("Rate limit exceeded, retry after %s, waiting for %s", s.rateLimit.RetryAfterTime, retry)) + return true, retry } - return nil + return false, 0 } func (s *githubRunnerScaler) GetMetricsAndActivity(ctx context.Context, metricName string) ([]external_metrics.ExternalMetricValue, bool, error) { if s.metadata.enableBackoff { - if err := s.waitForRateLimitReset(ctx); err != nil { - return nil, false, err + wait, waitDuration := s.shouldWaitForRateLimit() + if wait { + select { + case <-ctx.Done(): + return nil, false, ctx.Err() + case <-time.After(waitDuration): + // Proceed after wait + } } } diff --git a/pkg/scalers/github_runner_scaler_test.go b/pkg/scalers/github_runner_scaler_test.go index 30180e92088..76d6197a693 100644 --- a/pkg/scalers/github_runner_scaler_test.go +++ b/pkg/scalers/github_runner_scaler_test.go @@ -501,6 +501,48 @@ func TestNewGitHubRunnerScaler_QueueLength_SingleRepo_WithNotModified(t *testing } } +func TestNewGitHubRunnerScaler_ShouldWait_ResetTime(t *testing.T) { + mockGitHubRunnerScaler := githubRunnerScaler{ + rateLimit: RateLimit{ + Remaining: 0, + ResetTime: time.Now().Add(15 * time.Second), + RetryAfterTime: time.Now(), + }, + } + + wait, waitDuration := mockGitHubRunnerScaler.shouldWaitForRateLimit() + + if !wait { + t.Fail() + } + + expectedWait := 15 * time.Second + if waitDuration < expectedWait-1*time.Second || waitDuration > expectedWait+1*time.Second { + t.Fail() + } +} + +func TestNewGitHubRunnerScaler_ShouldWait_RetryAfterTime(t *testing.T) { + mockGitHubRunnerScaler := githubRunnerScaler{ + rateLimit: RateLimit{ + Remaining: 0, + ResetTime: time.Now(), + RetryAfterTime: time.Now().Add(15 * time.Second), + }, + } + + wait, waitDuration := mockGitHubRunnerScaler.shouldWaitForRateLimit() + + if !wait { + t.Fail() + } + + expectedWait := 15 * time.Second + if waitDuration < expectedWait-1*time.Second || waitDuration > expectedWait+1*time.Second { + t.Fail() + } +} + func TestNewGitHubRunnerScaler_404(t *testing.T) { var apiStub = apiStubHandler404() From 221030917a4a49faddc4d8f6e7f16bce6c07a119 Mon Sep 17 00:00:00 2001 From: andrewhibbert Date: Mon, 24 Mar 2025 15:03:23 +0000 Subject: [PATCH 07/10] feat: Add support to enable backoff when rate limited by the Github API Signed-off-by: andrewhibbert --- pkg/scalers/github_runner_scaler.go | 20 ++++++++++++------- .../github_runner/github_runner_test.go | 1 + 2 files changed, 14 insertions(+), 7 deletions(-) diff --git a/pkg/scalers/github_runner_scaler.go b/pkg/scalers/github_runner_scaler.go index 19553ea7a2e..7c40b6372a1 100644 --- a/pkg/scalers/github_runner_scaler.go +++ b/pkg/scalers/github_runner_scaler.go @@ -596,15 +596,22 @@ func (s *githubRunnerScaler) getRepositories(ctx context.Context) ([]string, err } func (s *githubRunnerScaler) getRateLimit(header http.Header) RateLimit { - retryAfterTime := time.Now() + var retryAfterTime time.Time remaining, _ := strconv.Atoi(header.Get("X-RateLimit-Remaining")) reset, _ := strconv.ParseInt(header.Get("X-RateLimit-Reset"), 10, 64) resetTime := time.Unix(reset, 0) - if header.Get("retry-after") != "" { - retryAfter, _ := strconv.Atoi(header.Get("retry-after")) - retryAfterTime = time.Now().Add(time.Duration(retryAfter) * time.Second) + if retryAfterStr := header.Get("Retry-After"); retryAfterStr != "" { + if retrySeconds, err := strconv.Atoi(retryAfterStr); err == nil { + retryAfterTime = time.Now().Add(time.Duration(retrySeconds) * time.Second) + } + } + + if retryAfterTime.IsZero() { + s.logger.V(1).Info(fmt.Sprintf("Github API rate limit: Remaining: %d, ResetTime: %s", remaining, resetTime)) + } else { + s.logger.V(1).Info(fmt.Sprintf("Github API rate limit: Remaining: %d, ResetTime: %s, Retry-After: %s", remaining, resetTime, retryAfterTime)) } return RateLimit{ @@ -646,7 +653,6 @@ func (s *githubRunnerScaler) getGithubRequest(ctx context.Context, url string, m if r.Header.Get("X-RateLimit-Remaining") != "" { s.rateLimit = s.getRateLimit(r.Header) - s.logger.V(0).Info(fmt.Sprintf("GitHub API rate limits: remaining %d, retry after %s, reset time %s", s.rateLimit.Remaining, s.rateLimit.RetryAfterTime, s.rateLimit.ResetTime)) } if r.StatusCode != 200 { @@ -822,13 +828,13 @@ func (s *githubRunnerScaler) GetWorkflowQueueLength(ctx context.Context) (int64, func (s *githubRunnerScaler) shouldWaitForRateLimit() (bool, time.Duration) { if s.rateLimit.Remaining == 0 && !s.rateLimit.ResetTime.IsZero() && time.Now().Before(s.rateLimit.ResetTime) { reset := time.Until(s.rateLimit.ResetTime) - s.logger.V(0).Info(fmt.Sprintf("Rate limit exceeded, resets at %s, waiting for %s", s.rateLimit.ResetTime, reset)) + s.logger.V(1).Info(fmt.Sprintf("Rate limit exceeded, resets at %s, waiting for %s", s.rateLimit.ResetTime, reset)) return true, reset } if !s.rateLimit.RetryAfterTime.IsZero() && time.Now().Before(s.rateLimit.RetryAfterTime) { retry := time.Until(s.rateLimit.RetryAfterTime) - s.logger.V(0).Info(fmt.Sprintf("Rate limit exceeded, retry after %s, waiting for %s", s.rateLimit.RetryAfterTime, retry)) + s.logger.V(1).Info(fmt.Sprintf("Rate limit exceeded, retry after %s, waiting for %s", s.rateLimit.RetryAfterTime, retry)) return true, retry } diff --git a/tests/scalers/github_runner/github_runner_test.go b/tests/scalers/github_runner/github_runner_test.go index 6cabc243102..66aba34a766 100644 --- a/tests/scalers/github_runner/github_runner_test.go +++ b/tests/scalers/github_runner/github_runner_test.go @@ -219,6 +219,7 @@ spec: labels: {{.Labels}} runnerScopeFromEnv: "RUNNER_SCOPE" enableEtags: "true" + enableBackoff: "true" authenticationRef: name: github-trigger-auth ` From c61bdf167aa2ab866f846b699d6b9b6fb039c516 Mon Sep 17 00:00:00 2001 From: andrewhibbert Date: Mon, 24 Mar 2025 16:36:27 +0000 Subject: [PATCH 08/10] feat: Add support to enable backoff when rate limited by the Github API Signed-off-by: andrewhibbert --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 3e6eecb161e..0fcf34f4a94 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -81,6 +81,7 @@ Here is an overview of all new **experimental** features: - **Elasticsearch Scaler**: Support IgnoreNullValues at Elasticsearch scaler ([#6599](https://github.com/kedacore/keda/pull/6599)) - **GitHub Scaler**: Add support to use ETag for conditional requests against the Github API ([#6503](https://github.com/kedacore/keda/issues/6503)) - **GitHub Scaler**: Filter workflows via query parameter for improved queue count accuracy ([#6519](https://github.com/kedacore/keda/pull/6519)) +- **Github Scaler**: Implement backoff when receive rate limit errors ([#6643](https://github.com/kedacore/keda/issues/6643)) - **IBMMQ Scaler**: Handling StatusNotFound in IBMMQ scaler ([#6472](https://github.com/kedacore/keda/pull/6472)) - **RabbitMQ Scaler**: Support use of the ‘vhostName’ parameter in the ‘TriggerAuthentication’ resource ([#6369](https://github.com/kedacore/keda/issues/6369)) - **Selenium Grid**: Add trigger param to set custom capabilities for matching specific Nodes ([#6536](https://github.com/kedacore/keda/issues/6536)) From 22112c2ed33f2a19ceb250ef976b878e1b689b61 Mon Sep 17 00:00:00 2001 From: andrewhibbert Date: Fri, 29 Aug 2025 15:06:30 +0100 Subject: [PATCH 09/10] feat: Add support to enable backoff when rate limited by the Github API Signed-off-by: andrewhibbert --- pkg/scalers/github_runner_scaler.go | 58 +++++++----------- pkg/scalers/github_runner_scaler_test.go | 77 +++++++++++------------- 2 files changed, 57 insertions(+), 78 deletions(-) diff --git a/pkg/scalers/github_runner_scaler.go b/pkg/scalers/github_runner_scaler.go index e0aacc76ae5..0fd56ec0b73 100644 --- a/pkg/scalers/github_runner_scaler.go +++ b/pkg/scalers/github_runner_scaler.go @@ -29,15 +29,17 @@ const ( var reservedLabels = []string{"self-hosted", "linux", "x64"} type githubRunnerScaler struct { - metricType v2.MetricTargetType - metadata *githubRunnerMetadata - httpClient *http.Client - logger logr.Logger - etags map[string]string - previousRepos []string - previousWfrs map[string]map[string]*WorkflowRuns - previousJobs map[string][]Job - rateLimit RateLimit + metricType v2.MetricTargetType + metadata *githubRunnerMetadata + httpClient *http.Client + logger logr.Logger + etags map[string]string + previousRepos []string + previousWfrs map[string]map[string]*WorkflowRuns + previousJobs map[string][]Job + rateLimit RateLimit + previousQueueLength int64 + previousQueueLengthTime time.Time } type githubRunnerMetadata struct { @@ -660,6 +662,15 @@ func canRunnerMatchLabels(jobLabels []string, runnerLabels []string, noDefaultLa // GetWorkflowQueueLength returns the number of workflow jobs in the queue func (s *githubRunnerScaler) GetWorkflowQueueLength(ctx context.Context) (int64, error) { + if s.metadata.EnableBackoff && s.rateLimit.Remaining == 0 && !s.rateLimit.ResetTime.IsZero() && time.Now().Before(s.rateLimit.ResetTime) { + s.logger.V(1).Info(fmt.Sprintf("Rate limit exceeded, resets at %s, returning previous queue length %d, last successful queue length check %s", s.rateLimit.ResetTime, s.previousQueueLength, s.previousQueueLengthTime)) + return s.previousQueueLength, nil + } + if s.metadata.EnableBackoff && !s.rateLimit.RetryAfterTime.IsZero() && time.Now().Before(s.rateLimit.RetryAfterTime) { + s.logger.V(1).Info(fmt.Sprintf("Rate limit exceeded, retry after %s, returning previous queue length %d, last successful queue length check %s", s.rateLimit.RetryAfterTime, s.previousQueueLength, s.previousQueueLengthTime)) + return s.previousQueueLength, nil + } + var repos []string var err error @@ -702,37 +713,12 @@ func (s *githubRunnerScaler) GetWorkflowQueueLength(ctx context.Context) (int64, } } + s.previousQueueLength = queueCount + s.previousQueueLengthTime = time.Now() return queueCount, nil } -func (s *githubRunnerScaler) shouldWaitForRateLimit() (bool, time.Duration) { - if s.rateLimit.Remaining == 0 && !s.rateLimit.ResetTime.IsZero() && time.Now().Before(s.rateLimit.ResetTime) { - reset := time.Until(s.rateLimit.ResetTime) - s.logger.V(1).Info(fmt.Sprintf("Rate limit exceeded, resets at %s, waiting for %s", s.rateLimit.ResetTime, reset)) - return true, reset - } - - if !s.rateLimit.RetryAfterTime.IsZero() && time.Now().Before(s.rateLimit.RetryAfterTime) { - retry := time.Until(s.rateLimit.RetryAfterTime) - s.logger.V(1).Info(fmt.Sprintf("Rate limit exceeded, retry after %s, waiting for %s", s.rateLimit.RetryAfterTime, retry)) - return true, retry - } - - return false, 0 -} - func (s *githubRunnerScaler) GetMetricsAndActivity(ctx context.Context, metricName string) ([]external_metrics.ExternalMetricValue, bool, error) { - if s.metadata.EnableBackoff { - wait, waitDuration := s.shouldWaitForRateLimit() - if wait { - select { - case <-ctx.Done(): - return nil, false, ctx.Err() - case <-time.After(waitDuration): - // Proceed after wait - } - } - } queueLen, err := s.GetWorkflowQueueLength(ctx) if err != nil { diff --git a/pkg/scalers/github_runner_scaler_test.go b/pkg/scalers/github_runner_scaler_test.go index 35b011ccc1c..77e2e401828 100644 --- a/pkg/scalers/github_runner_scaler_test.go +++ b/pkg/scalers/github_runner_scaler_test.go @@ -503,48 +503,6 @@ func TestNewGitHubRunnerScaler_QueueLength_SingleRepo_WithNotModified(t *testing } } -func TestNewGitHubRunnerScaler_ShouldWait_ResetTime(t *testing.T) { - mockGitHubRunnerScaler := githubRunnerScaler{ - rateLimit: RateLimit{ - Remaining: 0, - ResetTime: time.Now().Add(15 * time.Second), - RetryAfterTime: time.Now(), - }, - } - - wait, waitDuration := mockGitHubRunnerScaler.shouldWaitForRateLimit() - - if !wait { - t.Fail() - } - - expectedWait := 15 * time.Second - if waitDuration < expectedWait-1*time.Second || waitDuration > expectedWait+1*time.Second { - t.Fail() - } -} - -func TestNewGitHubRunnerScaler_ShouldWait_RetryAfterTime(t *testing.T) { - mockGitHubRunnerScaler := githubRunnerScaler{ - rateLimit: RateLimit{ - Remaining: 0, - ResetTime: time.Now(), - RetryAfterTime: time.Now().Add(15 * time.Second), - }, - } - - wait, waitDuration := mockGitHubRunnerScaler.shouldWaitForRateLimit() - - if !wait { - t.Fail() - } - - expectedWait := 15 * time.Second - if waitDuration < expectedWait-1*time.Second || waitDuration > expectedWait+1*time.Second { - t.Fail() - } -} - func TestNewGitHubRunnerScaler_404(t *testing.T) { var apiStub = apiStubHandler404() @@ -835,6 +793,41 @@ func TestNewGitHubRunnerScaler_QueueLength_MultiRepo_PulledRepos_NoRate(t *testi } } +func TestNewGitHubRunnerScaler_QueueLength_SingleRepo_WithRateLimitBackoff(t *testing.T) { + // First call will set previous queue length + apiStub := apiStubHandler(true, false) + meta := getGitHubTestMetaData(apiStub.URL) + meta.EnableBackoff = true + + scaler := githubRunnerScaler{ + metadata: meta, + httpClient: http.DefaultClient, + } + scaler.metadata.Repos = []string{"test"} + scaler.metadata.Labels = []string{"foo", "bar"} + + if queueLen, err := scaler.GetWorkflowQueueLength(context.Background()); err != nil { + fmt.Println(err) + t.Fail() + } else if queueLen != 1 { + fmt.Printf("Expected queue length of 1 got %d\n", queueLen) + t.Fail() + } + + // Second call simulating that there is a rate limit + // should return previous queue length + scaler.rateLimit.Remaining = 0 + scaler.rateLimit.ResetTime = time.Now().Add(5 * time.Minute) + + if queueLen, err := scaler.GetWorkflowQueueLength(context.Background()); err != nil { + fmt.Println(err) + t.Fail() + } else if queueLen != 1 { + fmt.Printf("Expected queue length of 1 after rate limit backoff got %d\n", queueLen) + t.Fail() + } +} + type githubRunnerMetricIdentifier struct { metadataTestData *map[string]string triggerIndex int From 32b33e06f7beff46250439b10388a6dbe7a35937 Mon Sep 17 00:00:00 2001 From: andrewhibbert Date: Fri, 29 Aug 2025 19:14:59 +0100 Subject: [PATCH 10/10] feat: Add support to enable backoff when rate limited by the Github API Signed-off-by: andrewhibbert --- pkg/scalers/github_runner_scaler.go | 52 +++++++++++++++--------- pkg/scalers/github_runner_scaler_test.go | 1 - 2 files changed, 33 insertions(+), 20 deletions(-) diff --git a/pkg/scalers/github_runner_scaler.go b/pkg/scalers/github_runner_scaler.go index 0fd56ec0b73..d656d5cf308 100644 --- a/pkg/scalers/github_runner_scaler.go +++ b/pkg/scalers/github_runner_scaler.go @@ -369,17 +369,21 @@ func NewGitHubRunnerScaler(config *scalersconfig.ScalerConfig) (Scaler, error) { previousJobs := make(map[string][]Job) previousWfrs := make(map[string]map[string]*WorkflowRuns) rateLimit := RateLimit{} + previousQueueLength := int64(0) + previousQueueLengthTime := time.Time{} return &githubRunnerScaler{ - metricType: metricType, - metadata: meta, - httpClient: httpClient, - logger: InitializeLogger(config, "github_runner_scaler"), - etags: etags, - previousRepos: previousRepos, - previousJobs: previousJobs, - previousWfrs: previousWfrs, - rateLimit: rateLimit, + metricType: metricType, + metadata: meta, + httpClient: httpClient, + logger: InitializeLogger(config, "github_runner_scaler"), + etags: etags, + previousRepos: previousRepos, + previousJobs: previousJobs, + previousWfrs: previousWfrs, + rateLimit: rateLimit, + previousQueueLength: previousQueueLength, + previousQueueLengthTime: previousQueueLengthTime, }, nil } @@ -662,13 +666,21 @@ func canRunnerMatchLabels(jobLabels []string, runnerLabels []string, noDefaultLa // GetWorkflowQueueLength returns the number of workflow jobs in the queue func (s *githubRunnerScaler) GetWorkflowQueueLength(ctx context.Context) (int64, error) { - if s.metadata.EnableBackoff && s.rateLimit.Remaining == 0 && !s.rateLimit.ResetTime.IsZero() && time.Now().Before(s.rateLimit.ResetTime) { - s.logger.V(1).Info(fmt.Sprintf("Rate limit exceeded, resets at %s, returning previous queue length %d, last successful queue length check %s", s.rateLimit.ResetTime, s.previousQueueLength, s.previousQueueLengthTime)) - return s.previousQueueLength, nil - } - if s.metadata.EnableBackoff && !s.rateLimit.RetryAfterTime.IsZero() && time.Now().Before(s.rateLimit.RetryAfterTime) { - s.logger.V(1).Info(fmt.Sprintf("Rate limit exceeded, retry after %s, returning previous queue length %d, last successful queue length check %s", s.rateLimit.RetryAfterTime, s.previousQueueLength, s.previousQueueLengthTime)) - return s.previousQueueLength, nil + if s.metadata.EnableBackoff { + var backoffUntilTime time.Time + if s.rateLimit.Remaining == 0 && !s.rateLimit.ResetTime.IsZero() && time.Now().Before(s.rateLimit.ResetTime) { + backoffUntilTime = s.rateLimit.ResetTime + } else if !s.rateLimit.RetryAfterTime.IsZero() && time.Now().Before(s.rateLimit.RetryAfterTime) { + backoffUntilTime = s.rateLimit.RetryAfterTime + } + if !backoffUntilTime.IsZero() { + if !s.previousQueueLengthTime.IsZero() { + s.logger.V(1).Info(fmt.Sprintf("Github API rate limit exceeded, returning previous queue length %d, last successful queue length check %s, backing off until %s", s.previousQueueLength, s.previousQueueLengthTime, backoffUntilTime)) + return s.previousQueueLength, nil + } + + return -1, fmt.Errorf("github API rate limit exceeded, no valid previous queue length available, API available at %s", backoffUntilTime) + } } var repos []string @@ -713,13 +725,15 @@ func (s *githubRunnerScaler) GetWorkflowQueueLength(ctx context.Context) (int64, } } - s.previousQueueLength = queueCount - s.previousQueueLengthTime = time.Now() + if s.metadata.EnableBackoff { + s.previousQueueLength = queueCount + s.previousQueueLengthTime = time.Now() + } + return queueCount, nil } func (s *githubRunnerScaler) GetMetricsAndActivity(ctx context.Context, metricName string) ([]external_metrics.ExternalMetricValue, bool, error) { - queueLen, err := s.GetWorkflowQueueLength(ctx) if err != nil { s.logger.Error(err, "error getting workflow queue length") diff --git a/pkg/scalers/github_runner_scaler_test.go b/pkg/scalers/github_runner_scaler_test.go index 77e2e401828..e4cc6d2edb8 100644 --- a/pkg/scalers/github_runner_scaler_test.go +++ b/pkg/scalers/github_runner_scaler_test.go @@ -196,7 +196,6 @@ func apiStubHandlerCustomJob(hasRateLeft bool, exceeds30Repos bool, jobResponse } else { w.Header().Set("X-RateLimit-Remaining", "0") w.WriteHeader(http.StatusForbidden) - return } if strings.HasSuffix(r.URL.String(), "jobs?per_page=100") { // nosemgrep: no-direct-write-to-responsewriter