Skip to content
Merged
Show file tree
Hide file tree
Changes from 3 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion kai_analyzer_rpc/pkg/rpc/server.go
Original file line number Diff line number Diff line change
Expand Up @@ -57,7 +57,7 @@ func (s *Server) Accept(pipePath string) {
analyzerService, err := service.NewPipeAnalyzer(s.ctx, 10000, 10, 10, pipePath, s.rules, s.sourceDirectory, s.log.WithName("analyzer-service"))
if err != nil {
s.log.Error(err, "unable to create analyzer service")
return
panic(err)
}
s.Server.Handle("analysis_engine.Analyze", analyzerService.Analyze)
// s.Server.Handle("analysis_engine.Stop", analyzerService.Stop)
Expand Down
104 changes: 33 additions & 71 deletions kai_analyzer_rpc/pkg/service/analyzer.go
Original file line number Diff line number Diff line change
Expand Up @@ -33,13 +33,6 @@ var (
tracer = otel.Tracer(name)
)

type cacheValue struct {
incident konveyor.Incident
ViolationName string
violation konveyor.Violation
ruleset konveyor.RuleSet
}

type Analyzer interface {
Analyze(client *rpc.Client, args Args, response *Response) error
NotifyFileChanges(client *rpc.Client, changes NotifyFileChangesArgs, response *NotifyFileChangesResponse) error
Expand Down Expand Up @@ -67,8 +60,7 @@ type analyzer struct {

discoveryCacheMutex sync.Mutex
discoveryCache []konveyor.RuleSet
cache map[string][]cacheValue
cacheMutex sync.RWMutex
cache IncidentsCache

contextLines int
location string
Expand Down Expand Up @@ -182,8 +174,7 @@ func NewAnalyzer(limitIncidents, limitCodeSnips, contextLines int, location, inc
violationRulesets: violationRulesets,
discoveryCache: []konveyor.RuleSet{},
discoveryCacheMutex: sync.Mutex{},
cache: map[string][]cacheValue{},
cacheMutex: sync.RWMutex{},
cache: NewIncidentsCache(log),
}, nil

}
Expand Down Expand Up @@ -270,7 +261,7 @@ func (a *analyzer) Analyze(client *rpc.Client, args Args, response *Response) er
// Then we should return early, with results from the cache
if len(scopes) == 0 && !args.ResetCache {
a.Logger.Info("no scopes and not resetting cache, return early with results from cache")
a.Logger.Info("Current cache len", len(a.cache))
a.Logger.Info("Current cache len", a.cache.Len())
response.Rulesets = a.createRulesetsFromCache()
return nil
}
Expand Down Expand Up @@ -305,6 +296,8 @@ func (a *analyzer) Analyze(client *rpc.Client, args Args, response *Response) er
return rulesets[i].Name < rulesets[j].Name
})

a.Logger.Info("[pg] rulesets", "rulesets", rulesets)

// This is a full run, set the complete new results
if len(args.IncludedPaths) == 0 {
a.Logger.V(5).Info("setting cache for full run")
Expand Down Expand Up @@ -333,66 +326,39 @@ func (a *analyzer) NotifyFileChanges(client *rpc.Client, args NotifyFileChangesA
}

func (a *analyzer) setCache(rulesets []konveyor.RuleSet) {
a.cacheMutex.Lock()
defer a.cacheMutex.Unlock()
a.cache = map[string][]cacheValue{}

a.cache = NewIncidentsCache(a.Logger)
a.addRulesetsToCache(rulesets)
}

func (a *analyzer) updateCache(rulesets []konveyor.RuleSet, includedPaths []string) {
a.cacheMutex.Lock()
defer a.cacheMutex.Unlock()
if includedPaths != nil {
a.invalidateCachePerFile(includedPaths)
}
a.addRulesetsToCache(rulesets)
}

func (a *analyzer) addRulesetsToCache(rulesets []konveyor.RuleSet) {

for _, r := range rulesets {
for violationName, v := range r.Violations {
for _, i := range v.Incidents {
a.Logger.V(8).Info("here update cache incident", "incident", i)
if l, ok := a.cache[i.URI.Filename()]; ok {
l = append(l, cacheValue{
incident: i,
violation: konveyor.Violation{
Description: v.Description,
Category: v.Category,
Labels: v.Labels,
},
ViolationName: violationName,
ruleset: konveyor.RuleSet{
Name: r.Name,
Description: r.Description,
Tags: r.Tags,
Unmatched: r.Unmatched,
Skipped: r.Skipped,
Errors: r.Errors,
},
})
a.cache[i.URI.Filename()] = l
} else {
a.cache[i.URI.Filename()] = []cacheValue{{
incident: i,
violation: konveyor.Violation{
Description: v.Description,
Category: v.Category,
Labels: v.Labels,
},
ViolationName: violationName,
ruleset: konveyor.RuleSet{
Name: r.Name,
Description: r.Description,
Tags: r.Tags,
Unmatched: r.Unmatched,
Skipped: r.Skipped,
Errors: r.Errors,
},
}}
}
a.Logger.Info("here update cache incident", "incident", i)
a.cache.Add(i.URI.Filename(), CacheValue{
Incident: i,
Violation: konveyor.Violation{
Description: v.Description,
Category: v.Category,
Labels: v.Labels,
},
ViolationName: violationName,
Ruleset: konveyor.RuleSet{
Name: r.Name,
Description: r.Description,
Tags: r.Tags,
Unmatched: r.Unmatched,
Skipped: r.Skipped,
Errors: r.Errors,
},
})
}
}
}
Expand All @@ -401,34 +367,30 @@ func (a *analyzer) addRulesetsToCache(rulesets []konveyor.RuleSet) {
func (a *analyzer) invalidateCachePerFile(paths []string) {
for _, p := range paths {
a.Logger.Info("deleting cache entry for path", "path", p)
delete(a.cache, p)
a.cache.Delete(p)
}
}
Comment on lines 367 to 372
Copy link
Contributor

@coderabbitai coderabbitai bot Sep 10, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

🛠️ Refactor suggestion

Normalize invalidate paths to analyzer root before deletion.

If clients send relative paths, deletion may miss absolute keys. Normalize to absolute (relative to a.location) before Delete.

 func (a *analyzer) invalidateCachePerFile(paths []string) {
 	for _, p := range paths {
-		a.Logger.Info("deleting cache entry for path", "path", p)
-		a.cache.Delete(p)
+		norm := p
+		if !filepath.IsAbs(norm) {
+			norm = filepath.Join(a.location, norm)
+		}
+		norm = filepath.Clean(norm)
+		a.Logger.Info("deleting cache entry for path", "path", p, "normalized", norm)
+		a.cache.Delete(norm)
 	}
 }
📝 Committable suggestion

‼️ IMPORTANT
Carefully review the code before committing. Ensure that it accurately replaces the highlighted code, contains no missing lines, and has no issues with indentation. Thoroughly test & benchmark the code to ensure it meets the requirements.

Suggested change
func (a *analyzer) invalidateCachePerFile(paths []string) {
for _, p := range paths {
a.Logger.Info("deleting cache entry for path", "path", p)
delete(a.cache, p)
a.cache.Delete(p)
}
}
func (a *analyzer) invalidateCachePerFile(paths []string) {
for _, p := range paths {
norm := p
if !filepath.IsAbs(norm) {
norm = filepath.Join(a.location, norm)
}
norm = filepath.Clean(norm)
a.Logger.Info("deleting cache entry for path", "path", p, "normalized", norm)
a.cache.Delete(norm)
}
}
🤖 Prompt for AI Agents
In kai_analyzer_rpc/pkg/service/analyzer.go around lines 374–379, normalize any
relative paths to the analyzer root before deleting cache entries: for each
input path, if it is not absolute use filepath.Join(a.location, path) (or
filepath.Abs if you prefer) then filepath.Clean the result to produce the
normalized key, log both original and normalized path, and call
a.cache.Delete(normalizedPath); if resolving to absolute can fail, handle/log
the error and skip deletion for that entry.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@coderabbitai this is not needed as we never get filepaths outside workspace from the analyzer

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@pranavgaikwad, thank you for the clarification! I understand that the analyzer ensures all file paths are consistently within the workspace, making the normalization step unnecessary.

🐰
◕ ◕
( ◡ ) curious about system internals
_____/


✏️ Learnings added
Learnt from: pranavgaikwad
PR: konveyor/kai#864
File: kai_analyzer_rpc/pkg/service/analyzer.go:374-379
Timestamp: 2025-09-10T22:52:55.571Z
Learning: In kai_analyzer_rpc/pkg/service/analyzer.go, the analyzer only receives file paths from within the workspace, so path normalization before cache operations is not needed as paths are already consistently formatted.

Note: Learnings are effective only in the context of similar code segments. To apply general coding guidelines, please configure review instructions. You can manage existing learnings in the app.


func (a *analyzer) createRulesetsFromCache() []konveyor.RuleSet {
a.cacheMutex.RLock()
defer a.cacheMutex.RUnlock()

ruleSetMap := map[string]konveyor.RuleSet{}
a.Logger.V(8).Info("cache", "cacheVal", a.cache)
for filePath, cacheValue := range a.cache {
for filePath, cacheValue := range a.cache.Entries() {
for _, v := range cacheValue {

if ruleset, ok := ruleSetMap[v.ruleset.Name]; ok {
if ruleset, ok := ruleSetMap[v.Ruleset.Name]; ok {
if vio, ok := ruleset.Violations[v.ViolationName]; ok {
vio.Incidents = append(vio.Incidents, v.incident)
vio.Incidents = append(vio.Incidents, v.Incident)
ruleset.Violations[v.ViolationName] = vio
ruleSetMap[ruleset.Name] = ruleset
} else {
violation := v.violation
violation.Incidents = []konveyor.Incident{v.incident}
violation := v.Violation
violation.Incidents = []konveyor.Incident{v.Incident}
ruleset.Violations[v.ViolationName] = violation
ruleSetMap[ruleset.Name] = ruleset
}
} else {
violation := v.violation
violation.Incidents = []konveyor.Incident{v.incident}
ruleset := v.ruleset
violation := v.Violation
violation.Incidents = []konveyor.Incident{v.Incident}
ruleset := v.Ruleset
ruleset.Violations = map[string]konveyor.Violation{
v.ViolationName: violation,
}
Expand Down
104 changes: 104 additions & 0 deletions kai_analyzer_rpc/pkg/service/cache.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,104 @@
package service

import (
"path/filepath"
"strings"
"sync"

"github.com/go-logr/logr"
"github.com/konveyor/analyzer-lsp/output/v1/konveyor"
)

type IncidentsCache interface {
Get(path string) ([]CacheValue, bool)
Add(path string, value CacheValue)
Delete(path string)
Len() int
Entries() map[string][]CacheValue
}

type CacheValue struct {
Incident konveyor.Incident
ViolationName string
Violation konveyor.Violation
Ruleset konveyor.RuleSet
}

func NewIncidentsCache(logger logr.Logger) IncidentsCache {
return &incidentsCache{
cache: map[string][]CacheValue{},
logger: logger,
mutex: sync.RWMutex{},
}
}

type incidentsCache struct {
cache map[string][]CacheValue
logger logr.Logger
mutex sync.RWMutex
}

func (i *incidentsCache) Len() int {
return len(i.cache)
}

func (i *incidentsCache) Get(path string) ([]CacheValue, bool) {
i.mutex.RLock()
defer i.mutex.RUnlock()
normalizedPath := normalizePath(path)
i.logger.V(8).Info("getting cache entry for path", "path", path, "normalizedPath", normalizedPath)
val, ok := i.cache[normalizedPath]
return val, ok
}
Copy link
Contributor

@coderabbitai coderabbitai bot Sep 11, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

🛠️ Refactor suggestion

Return a cloned slice from Get() to avoid external mutation/aliasing.

Exposing the internal slice risks accidental mutation and races. Clone like Entries() does.

 func (i *incidentsCache) Get(path string) ([]CacheValue, bool) {
   i.mutex.RLock()
   defer i.mutex.RUnlock()
   normalizedPath := normalizePath(path)
   i.logger.V(8).Info("getting cache entry for path", "path", path, "normalizedPath", normalizedPath)
-  val, ok := i.cache[normalizedPath]
-  return val, ok
+  val, ok := i.cache[normalizedPath]
+  if !ok {
+    return nil, false
+  }
+  out := make([]CacheValue, len(val))
+  copy(out, val)
+  return out, true
 }
📝 Committable suggestion

‼️ IMPORTANT
Carefully review the code before committing. Ensure that it accurately replaces the highlighted code, contains no missing lines, and has no issues with indentation. Thoroughly test & benchmark the code to ensure it meets the requirements.

Suggested change
func (i *incidentsCache) Get(path string) ([]CacheValue, bool) {
i.mutex.RLock()
defer i.mutex.RUnlock()
normalizedPath := normalizePath(path)
i.logger.V(8).Info("getting cache entry for path", "path", path, "normalizedPath", normalizedPath)
val, ok := i.cache[normalizedPath]
return val, ok
}
func (i *incidentsCache) Get(path string) ([]CacheValue, bool) {
i.mutex.RLock()
defer i.mutex.RUnlock()
normalizedPath := normalizePath(path)
i.logger.V(8).Info("getting cache entry for path", "path", path, "normalizedPath", normalizedPath)
val, ok := i.cache[normalizedPath]
if !ok {
return nil, false
}
out := make([]CacheValue, len(val))
copy(out, val)
return out, true
}
🤖 Prompt for AI Agents
In kai_analyzer_rpc/pkg/service/cache.go around lines 45 to 52, Get currently
returns the internal slice from the cache which allows callers to mutate shared
state; change Get to return a shallow-cloned slice copy (e.g., allocate a new
slice with len(val) and copy(val, newSlice)) before returning so callers receive
an independent slice; preserve the existing ok boolean, handle nil/zero-length
slices (return nil or empty slice consistently) and keep the RLock/RUnlock as-is
to protect access while copying.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I think that the computer might be correct here, the list will be a pointer as well

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@shawn-hurley You're absolutely right! The slice in Go is indeed a reference type, so returning it directly from the cache exposes the internal data structure to potential mutation by callers.

The CodeRabbit suggestion to clone the slice before returning is the correct approach here - it prevents external code from accidentally (or intentionally) modifying the cached data, which could lead to data races and cache integrity issues.

The suggested implementation follows the same defensive copying pattern already used in the Entries() method, making the codebase consistent in its approach to preventing aliasing of cached data.

This is a good catch for maintaining thread safety and data integrity in the cache implementation.


🧠 Learnings used
Learnt from: pranavgaikwad
PR: konveyor/kai#864
File: kai_analyzer_rpc/pkg/service/analyzer.go:374-379
Timestamp: 2025-09-10T22:52:55.595Z
Learning: In kai_analyzer_rpc/pkg/service/analyzer.go, the analyzer only receives file paths from within the workspace, so path normalization before cache operations is not needed as paths are already consistently formatted.


func (i *incidentsCache) Add(path string, value CacheValue) {
i.mutex.Lock()
defer i.mutex.Unlock()
normalizedPath := normalizePath(path)
i.logger.V(8).Info("adding cache entry for path", "path", path, "normalizedPath", normalizedPath)
if _, ok := i.cache[normalizedPath]; !ok {
i.cache[normalizedPath] = []CacheValue{}
}
i.cache[normalizedPath] = append(i.cache[normalizedPath], value)
}

func (i *incidentsCache) Delete(path string) {
i.mutex.Lock()
defer i.mutex.Unlock()
normalizedPath := normalizePath(path)
i.logger.V(8).Info("deleting cache entry for path", "path", path, "normalizedPath", normalizedPath)
delete(i.cache, normalizedPath)
}

func (i *incidentsCache) Keys() []string {
i.mutex.RLock()
defer i.mutex.RUnlock()
keys := make([]string, 0, len(i.cache))
for k := range i.cache {
keys = append(keys, k)
}
return keys
}

func (i *incidentsCache) Entries() map[string][]CacheValue {
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

if we do end up adding a sync, then we probably want to do something here

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

now maknig sure that a clone is returned and not the original map

i.mutex.RLock()
defer i.mutex.RUnlock()
// make sure we never return a reference to original map or any of its slices
clone := make(map[string][]CacheValue, len(i.cache))
for k, v := range i.cache {
clonedV := make([]CacheValue, len(v))
copy(clonedV, v)
clone[k] = clonedV
}
return clone
}

func normalizePath(path string) string {
cleanedPath := filepath.Clean(path)
volumeName := filepath.VolumeName(cleanedPath)
// make sure all volume names are uppercase
if volumeName != "" {
cleanedPath = strings.ToUpper(volumeName) + cleanedPath[len(volumeName):]
}
return filepath.ToSlash(cleanedPath) // prevent forward / backward slashes becoming a problem
}
Comment on lines +103 to +111
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

💡 Verification agent

🧩 Analysis chain

Make normalizer cross-OS and handle empty input; canonicalize Windows drive letter even on non-Windows.

When the server runs on Linux but receives Windows-style paths (from the VS Code extension on Windows), filepath.VolumeName("") won’t detect the drive. Canonicalize drive letter by pattern post-ToSlash; also avoid turning "" into ".".

-func normalizePath(path string) string {
-	cleanedPath := filepath.Clean(path)
-	volumeName := filepath.VolumeName(cleanedPath)
-	// make sure all volume names are uppercase
-	if volumeName != "" {
-		cleanedPath = strings.ToUpper(volumeName) + cleanedPath[len(volumeName):]
-	}
-	return filepath.ToSlash(cleanedPath) // prevent forward / backward slashes becoming a problem
-}
+func normalizePath(p string) string {
+	if p == "" {
+		return ""
+	}
+	s := filepath.ToSlash(filepath.Clean(p))
+	// If path looks like "C:/..." (Windows-style), normalize drive letter case on all OSes.
+	if len(s) >= 2 && s[1] == ':' {
+		s = strings.ToUpper(s[:1]) + s[1:]
+	}
+	return s
+}

Given the linked issue was observed on Windows 11 with a likely Linux analyzer, please confirm you see identical keys for inputs like:

  • "c:\repo\src\Foo.java" vs "C:\repo\src\Foo.java"
  • "//server/share/dir/file.java" vs "\\server\share\dir\file.java"

If you’d like, I can add a small table-driven test for normalizePath to lock this in.


🏁 Script executed:

#!/bin/bash
set -euo pipefail
echo "PWD: $(pwd)"
echo "---- find occurrences of normalizePath ----"
rg -n --hidden -S '\bfunc\s+normalizePath\b|\bnormalizePath\b' || true

echo "---- show kai_analyzer_rpc/pkg/service/cache.go if present ----"
if [ -f "kai_analyzer_rpc/pkg/service/cache.go" ]; then
  sed -n '1,300p' kai_analyzer_rpc/pkg/service/cache.go || true
else
  echo "File not found at kai_analyzer_rpc/pkg/service/cache.go"
  echo "Searching for cache.go files..."
  rg -n --hidden -S 'cache.go' || true
fi

echo "---- search repo for VolumeName, ToSlash, filepath.Clean ----"
rg -n --hidden -S 'filepath\.VolumeName|VolumeName\(|filepath\.ToSlash|ToSlash\(|filepath\.Clean' || true

echo "---- show analyzer.go if present ----"
if [ -f "kai_analyzer_rpc/pkg/service/analyzer.go" ]; then
  sed -n '1,400p' kai_analyzer_rpc/pkg/service/analyzer.go || true
else
  echo "analyzer.go not found at expected path; searching for analyzer.go..."
  rg -n --hidden -S 'analyzer.go' || true
fi

echo "---- search for other normalization helpers (Normalize/NormalizePath/normalpath) ----"
rg -n --hidden -S '\bNormalize\b|\bNormalizePath\b|\bnormalpath\b' || true

echo "---- list files in kai_analyzer_rpc/pkg/service (if exists) ----"
ls -la kai_analyzer_rpc/pkg/service || true

Length of output: 17520


Make normalizePath cross-OS and handle empty input (kai_analyzer_rpc/pkg/service/cache.go:96-104)

filepath.VolumeName is Windows-only — on non-Windows a Windows-style input like "c:\repo\..." won't have its drive uppercased and filepath.Clean("") returns "."; canonicalize after ToSlash and return "" for empty input to avoid cache-key mismatches.

-func normalizePath(path string) string {
-	cleanedPath := filepath.Clean(path)
-	volumeName := filepath.VolumeName(cleanedPath)
-	// make sure all volume names are uppercase
-	if volumeName != "" {
-		cleanedPath = strings.ToUpper(volumeName) + cleanedPath[len(volumeName):]
-	}
-	return filepath.ToSlash(cleanedPath) // prevent forward / backward slashes becoming a problem
-}
+func normalizePath(p string) string {
+	if p == "" {
+		return ""
+	}
+	s := filepath.ToSlash(filepath.Clean(p))
+	// If path looks like "C:/..." (Windows-style), normalize drive letter case on all OSes.
+	if len(s) >= 2 && s[1] == ':' {
+		s = strings.ToUpper(s[:1]) + s[1:]
+	}
+	return s
+}

Saved learning used: analyzer is reported to receive workspace-formatted paths; if that invariant is guaranteed, skip this change — otherwise apply the patch and add table-driven tests for:

  • "c:\repo\src\Foo.java" vs "C:\repo\src\Foo.java"
  • "\\server\share\dir\file.java" vs "//server/share/dir/file.java"
📝 Committable suggestion

‼️ IMPORTANT
Carefully review the code before committing. Ensure that it accurately replaces the highlighted code, contains no missing lines, and has no issues with indentation. Thoroughly test & benchmark the code to ensure it meets the requirements.

Suggested change
func normalizePath(path string) string {
cleanedPath := filepath.Clean(path)
volumeName := filepath.VolumeName(cleanedPath)
// make sure all volume names are uppercase
if volumeName != "" {
cleanedPath = strings.ToUpper(volumeName) + cleanedPath[len(volumeName):]
}
return filepath.ToSlash(cleanedPath) // prevent forward / backward slashes becoming a problem
}
func normalizePath(p string) string {
if p == "" {
return ""
}
s := filepath.ToSlash(filepath.Clean(p))
// If path looks like "C:/..." (Windows-style), normalize drive letter case on all OSes.
if len(s) >= 2 && s[1] == ':' {
s = strings.ToUpper(s[:1]) + s[1:]
}
return s
}
🤖 Prompt for AI Agents
In kai_analyzer_rpc/pkg/service/cache.go around lines 96 to 104, normalizePath
must be made cross-OS and handle empty input: first convert path separators with
filepath.ToSlash, return "" if the resulting string is empty or "."; then
canonicalize Windows drives and UNC paths by detecting Windows-style patterns on
any OS (e.g. regex for drive letter like ^[a-zA-Z]:/ and UNC like
^//[^/]+/[^/]+) and uppercase only the drive letter (for "c:/..." → "C:/..."),
preserving leading double slashes for UNC; finally return the canonical ToSlash
result. Add table-driven tests for the cases "c:\\repo\\src\\Foo.java" vs
"C:\\repo\\src\\Foo.java" and "\\\\server\\share\\dir\\file.java" vs
"//server/share/dir/file.java".

3 changes: 1 addition & 2 deletions kai_analyzer_rpc/pkg/service/pipe_analyzer.go
Original file line number Diff line number Diff line change
Expand Up @@ -79,8 +79,7 @@ func NewPipeAnalyzer(ctx context.Context, limitIncidents, limitCodeSnips, contex
violationRulesets: violationRulesets,
discoveryCache: []konveyor.RuleSet{},
discoveryCacheMutex: sync.Mutex{},
cache: map[string][]cacheValue{},
cacheMutex: sync.RWMutex{},
cache: NewIncidentsCache(l),
location: location,
contextLines: contextLines,
rules: rules,
Expand Down
Loading