Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
19 commits
Select commit Hold shift + click to select a range
07f5a88
ci: add minimal testing requirements for CI pipeline
germa89 Feb 19, 2025
f447a06
chore: update testing dependencies in pyproject.toml
germa89 Feb 19, 2025
66dcc9b
feat: enhance log collection by adding JSONL file support and updatin…
germa89 Feb 19, 2025
49d1ebb
chore: adding changelog file 3754.dependencies.md [dependabot-skip]
pyansys-ci-bot Feb 19, 2025
70e28d7
feat: add pytest summary script and update CI workflow to include tes…
germa89 Feb 19, 2025
5effb6c
style: using click CLI for passing arguments.
germa89 Feb 19, 2025
2457545
test: printing files
germa89 Feb 19, 2025
8910fc4
feat: add step to uncompress log files in CI workflow
germa89 Feb 20, 2025
40ab1f0
fix: adjust file search depth for uncompressing logs and improve dire…
germa89 Feb 20, 2025
a774cfd
fix: ckecking cwd
germa89 Feb 20, 2025
c2140f3
fix: update CI workflow to improve test summary output and formatting
germa89 Feb 20, 2025
0e366fc
fix: enhance test summary output formatting and improve readability i…
germa89 Feb 20, 2025
810ecf1
fix: remove unnecessary export of GITHUB_STEP_SUMMARY in CI workflow …
germa89 Feb 20, 2025
8b80eb1
ci: adding all jobs
germa89 Feb 21, 2025
7d43b25
ci: printing only 10 jobs
germa89 Feb 21, 2025
7c559c1
fix: wrong parsing
germa89 Feb 21, 2025
3202840
build: removing pytest-durations dependency
germa89 Feb 21, 2025
7f9138d
Merge branch 'main' into feat/adding-pytest-durations-and-reportlog
germa89 Feb 24, 2025
fbc9f59
Merge branch 'main' into feat/adding-pytest-durations-and-reportlog
germa89 Feb 25, 2025
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions .ci/collect_mapdl_logs_locals.sh
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,8 @@ cp *pymapdl.apdl ./"$LOG_NAMES"/ || echo "No PYMAPDL APDL log files could be fou
echo "Copying the profiling files..."
cp -r prof ./"$LOG_NAMES"/prof || echo "No profile files could be found"

echo "Copying the JSONL files..."
cp *.jsonl ./"$LOG_NAMES"/ || echo "No JSONL files could be found"

ls -la ./"$LOG_NAMES"

Expand Down
3 changes: 3 additions & 0 deletions .ci/collect_mapdl_logs_remote.sh
Original file line number Diff line number Diff line change
Expand Up @@ -38,6 +38,9 @@ cp mapdl_launch_1.log ./"$LOG_NAMES"/mapdl_launch_1.log || echo "MAPDL launch do
echo "Copying the profiling files..."
cp -r prof ./"$LOG_NAMES"/prof || echo "No profile files could be found"

echo "Copying the JSONL files..."
cp *.jsonl ./"$LOG_NAMES"/ || echo "No JSONL files could be found"

echo "Collecting file structure..."
ls -R > ./"$LOG_NAMES"/files_structure.txt || echo "Failed to copy file structure to a file"

Expand Down
251 changes: 251 additions & 0 deletions .ci/pytest_summary.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,251 @@
import json
import os

import click
import numpy as np

BIG_WIDTH = 80
SMALL_WIDTH = 8


def find_json_files(base_dir):
"""Recursively find all JSON files in subdirectories."""
json_files = []
for root, _, files in os.walk(base_dir):
for file in files:
if file.endswith(".jsonl"):
json_files.append(os.path.join(root, file))
return json_files


def read_json_file(file_path):
"""Read a JSON file and return its content as a list of test configurations."""
with open(file_path, "r", encoding="utf-8") as f:
try:
data = [json.loads(line) for line in f]
return data
except json.JSONDecodeError as e:
print(f"Error reading {file_path}: {e}")
return []


def extract_tests_with_tags(json_files):
"""Extract test data and assign a tag based on the directory name."""
tests = []

for file_path in json_files:
directory_name = os.path.basename(os.path.dirname(file_path))
test_data = read_json_file(file_path)

for test in test_data:
if test.get("outcome", "").lower() == "passed" and test.get("duration"):
nodeid = test.get("nodeid")
if nodeid.startswith("tests/"):
nodeid = nodeid[6:]

when = test.get("when")
duration = test["duration"]
tags = directory_name.split("-")
tags.remove("logs")
id_ = f"{nodeid}({when})"

tests.append(
{
"tags": tags,
"id": id_,
"nodeid": nodeid,
"duration": duration,
"when": when,
}
)
return tests


def compute_statistics(tests):
"""Compute average duration and standard deviation per test ID."""
test_stats = {}

for test in tests:
test_id = test["id"]
if test_id not in test_stats:
test_stats[test_id] = {
"durations": [],
"n_tests": 0,
}

test_stats[test_id]["durations"].append(test["duration"])
test_stats[test_id]["n_tests"] += 1

summary = []

for test_id, data in test_stats.items():
durations = np.array(data["durations"])

if durations.size == 0:
continue

avg_duration = np.mean(durations)
std_dev = np.std(durations)

mask_99 = durations < np.percentile(durations, 99)
if mask_99.sum() == 0:
avg_duration_minus_one = np.nan
std_dev_minus_one = np.nan
else:
avg_duration_minus_one = np.mean(durations[mask_99])
std_dev_minus_one = np.std(durations[mask_99])

mask_75 = durations < np.percentile(durations, 75)
if mask_75.sum() == 0:
avg_duration_minus_34 = np.nan
std_dev_minus_34 = np.nan
else:
avg_duration_minus_34 = np.mean(durations[mask_75])
std_dev_minus_34 = np.std(durations[mask_75])

summary.append(
{
"id": test_id,
"n_tests": data["n_tests"],
"average_duration": avg_duration,
"std_dev": std_dev,
"avg_duration_minus_one": avg_duration_minus_one,
"std_dev_minus_one": std_dev_minus_one,
"avg_duration_minus_34": avg_duration_minus_34,
"std_dev_minus_34": std_dev_minus_34,
}
)

return summary


def print_table(data, keys, headers, title=""):
JUNCTION = "|"

def make_bold(s):
return click.style(s, bold=True)

h = [headers[0].ljust(BIG_WIDTH)]
h.extend([each.center(SMALL_WIDTH)[:SMALL_WIDTH] for each in headers[1:]])

len_h = len("| " + " | ".join(h) + " |")

sep = (
f"{JUNCTION}-"
+ f"-{JUNCTION}-".join(["-" * len(each) for each in h])
+ f"-{JUNCTION}"
)
top_sep = f"{JUNCTION}" + "-" * (len_h - 2) + f"{JUNCTION}"

if title:
# click.echo(top_sep)
click.echo(
"| " + make_bold(f"Top {len(data)} {title}".center(len_h - 4)) + " |"
)
click.echo(sep)

click.echo("| " + " | ".join([make_bold(each) for each in h]) + " |")
click.echo(sep)

for test in data:
s = []
for i, each_key in enumerate(keys):

if i == 0:
id_ = test[each_key]

id_ = (
id_.replace("(", "\(")
.replace(")", "\)")
.replace("[", "\[")
.replace("]", "\]")
)
if len(id_) >= BIG_WIDTH:
id_ = id_[: BIG_WIDTH - 15] + "..." + id_[-12:]

s.append(f"{id_}".ljust(BIG_WIDTH)[0:BIG_WIDTH])

elif each_key == "n_tests":
s.append(f"{int(test[each_key])}".center(SMALL_WIDTH))
else:
if np.isnan(test[each_key]):
s.append("N/A".center(SMALL_WIDTH))
else:
s.append(f"{test[each_key]:.4f}".center(SMALL_WIDTH))

click.echo("| " + " | ".join(s) + " |")
# click.echo(sep)


def print_summary(summary, num=10):
"""Print the top N longest tests and the top N most variable tests."""
longest_tests = sorted(summary, key=lambda x: -x["average_duration"])[:num]
most_variable_tests = sorted(summary, key=lambda x: -x["std_dev"])[:num]

print(f"\n## Top {num} Longest Running Tests\n")
print_table(
longest_tests,
["id", "n_tests", "average_duration", "std_dev"],
["Test ID", "N. tests", "Avg", "STD"],
# "Longest Running Tests",
)

print("")
print(f"\n## Top {num} Most Variable Running Tests\n")
print_table(
most_variable_tests,
[
"id",
"n_tests",
"std_dev",
"average_duration",
"std_dev_minus_one",
"avg_duration_minus_one",
"std_dev_minus_34",
"avg_duration_minus_34",
],
[
"Test ID",
"N. tests",
"Std",
"Avg",
"Std-99%",
"Avg-99%",
"Std-75%",
"Avg-75%",
],
# "Most Variable Running Tests",
)


@click.command()
@click.option(
"--directory",
type=click.Path(exists=True, file_okay=False, dir_okay=True),
default=None,
)
@click.option(
"--num", default=10, help="Number of top tests to display.", show_default=True
)
@click.option(
"--save-file",
default=None,
help="File to save the test durations. Default 'tests_durations.json'.",
show_default=True,
)
def analyze_tests(directory, num, save_file):
directory = directory or os.getcwd() # Change this to your base directory
json_files = find_json_files(directory)
tests = extract_tests_with_tags(json_files)

if save_file:
with open(save_file, "a+") as f:
for each_line in tests:
json.dump(each_line, f, indent=2)

summary = compute_statistics(tests)
print_summary(summary, num=num)


if __name__ == "__main__":
analyze_tests()
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@ pyfakefs==5.7.2
pytest-cov==6.0.0
pytest-profiling==1.8.1
pytest-random-order==1.1.1
pytest-reportlog==0.4.0
pytest-rerunfailures==15.0
pytest-timeout==2.3.1
pytest==8.3.4
Loading