Add integration and unit tests for monitoring JSON API endpoints#373
Add integration and unit tests for monitoring JSON API endpoints#373gimballock wants to merge 4 commits intostratum-mining:mainfrom
Conversation
85a3f7e to
f7f95d2
Compare
f7f95d2 to
f3c2e96
Compare
Shourya742
left a comment
There was a problem hiding this comment.
Shouldn't monitoring module have abstractions for most of these?
| tokio::task::spawn_blocking(move || { | ||
| let (status, bytes) = crate::utils::http::make_get_request_with_status(&url, 5); | ||
| let body = String::from_utf8(bytes).expect("api response should be valid UTF-8"); | ||
| let json: serde_json::Value = serde_json::from_str(&body).unwrap_or_else(|e| { | ||
| panic!( | ||
| "Failed to parse JSON from {} (status {}): {}\nBody: {}", | ||
| url, status, e, body | ||
| ) | ||
| }); | ||
| (status, json) | ||
| }) | ||
| .await | ||
| .expect("spawn_blocking for fetch_api_with_status panicked") |
There was a problem hiding this comment.
doesn't minreq provide async variant?
There was a problem hiding this comment.
minreq does not provide an async variant - it's a synchronous HTTP client by design. We use tokio::task::spawn_blocking to run it without blocking the async runtime. If we want true async HTTP, we'd need to switch to reqwest or similar, but minreq was chosen for its simplicity and minimal dependencies in tests.
| /// Assert that an endpoint returns HTTP 404 with a JSON `{"error": "..."}` body. | ||
| pub async fn assert_api_not_found(monitoring_addr: SocketAddr, path: &str) { | ||
| let (status, json) = fetch_api_with_status(monitoring_addr, path).await; | ||
| assert_eq!( | ||
| status, 404, | ||
| "{} should return HTTP 404, got {} with body: {}", | ||
| path, status, json | ||
| ); | ||
| assert!( | ||
| json["error"].is_string(), | ||
| "{} should return JSON with 'error' field, got: {}", | ||
| path, | ||
| json | ||
| ); | ||
| } |
There was a problem hiding this comment.
Not a good idea to move asserts in a helper method, makes debugging painful when something goes wrong.
There was a problem hiding this comment.
Fixed - removed the assert helper methods. Assertions are now inlined directly in each test for better debugging.
| pub async fn assert_api_root(monitoring_addr: SocketAddr) { | ||
| let json = fetch_api_json(monitoring_addr, "/").await; | ||
| assert_eq!( | ||
| json["service"], "SRI Monitoring API", | ||
| "Root endpoint should return service name, got: {}", | ||
| json | ||
| ); | ||
| assert!( | ||
| json["endpoints"].is_object(), | ||
| "Root endpoint should list endpoints, got: {}", | ||
| json | ||
| ); | ||
| } | ||
|
|
||
| /// Assert that `/api/v1/global` returns a valid response with the expected structure. | ||
| /// Returns the parsed JSON for further assertions. | ||
| pub async fn assert_api_global(monitoring_addr: SocketAddr) -> serde_json::Value { | ||
| let json = fetch_api_json(monitoring_addr, "/api/v1/global").await; | ||
| assert!( | ||
| json["uptime_secs"].as_u64().is_some(), | ||
| "Global endpoint should contain uptime_secs, got: {}", | ||
| json | ||
| ); | ||
| json | ||
| } |
There was a problem hiding this comment.
Fixed - removed this helper as well.
| struct PoolWithSv2Miner { | ||
| _tp: integration_tests_sv2::template_provider::TemplateProvider, | ||
| _pool: pool_sv2::PoolSv2, | ||
| _sniffer: integration_tests_sv2::sniffer::Sniffer<'static>, | ||
| pool_mon: std::net::SocketAddr, | ||
| } | ||
|
|
||
| /// Spin up Pool + Sniffer + SV2 Mining Device, wait for a share to be accepted, | ||
| /// and return the pool monitoring address. | ||
| async fn setup_pool_with_sv2_miner() -> PoolWithSv2Miner { | ||
| let (tp, tp_addr) = start_template_provider(None, DifficultyLevel::Low); | ||
| let (pool, pool_addr, pool_monitoring) = | ||
| start_pool(sv2_tp_config(tp_addr), vec![], vec![], true).await; | ||
| let (sniffer, sniffer_addr) = start_sniffer("A", pool_addr, false, vec![], None); | ||
| start_mining_device_sv2(sniffer_addr, None, None, None, 1, None, true); | ||
|
|
||
| sniffer | ||
| .wait_for_message_type( | ||
| MessageDirection::ToUpstream, | ||
| MESSAGE_TYPE_SUBMIT_SHARES_STANDARD, | ||
| ) | ||
| .await; | ||
| sniffer | ||
| .wait_for_message_type( | ||
| MessageDirection::ToDownstream, | ||
| MESSAGE_TYPE_SUBMIT_SHARES_SUCCESS, | ||
| ) | ||
| .await; | ||
|
|
||
| let pool_mon = pool_monitoring.expect("pool monitoring should be enabled"); | ||
| PoolWithSv2Miner { | ||
| _tp: tp, | ||
| _pool: pool, | ||
| _sniffer: sniffer, | ||
| pool_mon, | ||
| } | ||
| } |
There was a problem hiding this comment.
We can remove these very specific abstraction to spawn servers, this quickly explodes considering all combination of different apps.
There was a problem hiding this comment.
Fixed - removed the topology abstractions. Each test now explicitly sets up its own topology, avoiding the combinatorial explosion problem.
| struct TproxyWithSv1Miner { | ||
| _tp: integration_tests_sv2::template_provider::TemplateProvider, | ||
| _pool: pool_sv2::PoolSv2, | ||
| _sniffer: integration_tests_sv2::sniffer::Sniffer<'static>, | ||
| _tproxy: translator_sv2::TranslatorSv2, | ||
| _minerd_process: integration_tests_sv2::sv1_minerd::MinerdProcess, | ||
| tproxy_mon: std::net::SocketAddr, | ||
| } | ||
|
|
||
| /// Spin up TP → Pool → Sniffer → tProxy → minerd, wait for a share to reach | ||
| /// the pool, and return the tProxy monitoring address. | ||
| async fn setup_tproxy_with_sv1_miner() -> TproxyWithSv1Miner { | ||
| let (tp, tp_addr) = start_template_provider(None, DifficultyLevel::Low); | ||
| let (pool, pool_addr, _pool_monitoring) = | ||
| start_pool(sv2_tp_config(tp_addr), vec![], vec![], false).await; | ||
| let (sniffer, sniffer_addr) = start_sniffer("0", pool_addr, false, vec![], None); | ||
| let (tproxy, tproxy_addr, tproxy_monitoring) = | ||
| start_sv2_translator(&[sniffer_addr], false, vec![], vec![], None, true).await; | ||
| let (minerd_process, _minerd_addr) = start_minerd(tproxy_addr, None, None, false).await; | ||
|
|
||
| sniffer | ||
| .wait_for_message_type( | ||
| MessageDirection::ToUpstream, | ||
| MESSAGE_TYPE_SUBMIT_SHARES_EXTENDED, | ||
| ) | ||
| .await; | ||
|
|
||
| let tproxy_mon = tproxy_monitoring.expect("tproxy monitoring should be enabled"); | ||
| TproxyWithSv1Miner { | ||
| _tp: tp, | ||
| _pool: pool, | ||
| _sniffer: sniffer, | ||
| _tproxy: tproxy, | ||
| _minerd_process: minerd_process, | ||
| tproxy_mon, | ||
| } | ||
| } |
There was a problem hiding this comment.
Lets remove this, and be explicit in the tests.
There was a problem hiding this comment.
Fixed - removed this abstraction as well.
| assert!( | ||
| global["server"].is_object(), | ||
| "tProxy /api/v1/global should have server data, got: {}", | ||
| global["server"] | ||
| ); | ||
| assert!( | ||
| global["server"]["extended_channels"].as_u64().unwrap() >= 1, | ||
| "tProxy should have at least 1 extended upstream channel" | ||
| ); | ||
| // tProxy has SV1 clients | ||
| assert!( | ||
| global["sv1_clients"]["total_clients"].as_u64().unwrap() >= 1, | ||
| "tProxy should see at least 1 SV1 client" | ||
| ); | ||
| // tProxy has no SV2 downstreams | ||
| assert!( | ||
| global["sv2_clients"].is_null(), | ||
| "tProxy should have null sv2_clients, got: {}", | ||
| global["sv2_clients"] | ||
| ); |
There was a problem hiding this comment.
lets define types instead of stringly typed.
There was a problem hiding this comment.
Acknowledged - will add typed response structs.
| let client_id = clients["items"][0]["client_id"] | ||
| .as_u64() | ||
| .expect("client_id should be a number"); | ||
|
|
||
| // Fetch channels for this client | ||
| let channels = fetch_api_json( | ||
| topo.pool_mon, | ||
| &format!("/api/v1/clients/{}/channels", client_id), | ||
| ) | ||
| .await; | ||
| assert_eq!(channels["client_id"], client_id); | ||
| // Mining device opens a standard channel | ||
| let total_standard = channels["total_standard"] | ||
| .as_u64() | ||
| .expect("total_standard should be present"); | ||
| let total_extended = channels["total_extended"] | ||
| .as_u64() | ||
| .expect("total_extended should be present"); |
There was a problem hiding this comment.
lets define types instead of stringly typed.
There was a problem hiding this comment.
Acknowledged - will add typed response structs.
| let client_id = clients["items"][0]["client_id"] | ||
| .as_u64() | ||
| .expect("client_id should be a number"); | ||
|
|
||
| // Fetch single client | ||
| let client = fetch_api_json(topo.pool_mon, &format!("/api/v1/clients/{}", client_id)).await; | ||
| assert_eq!(client["client_id"], client_id); | ||
| assert!( | ||
| client["extended_channels_count"].as_u64().is_some() | ||
| || client["standard_channels_count"].as_u64().is_some(), | ||
| "Client should have channel counts, got: {}", | ||
| client | ||
| ); | ||
|
|
There was a problem hiding this comment.
lets define types instead of stringly typed.
There was a problem hiding this comment.
Acknowledged - will add typed response structs.
| assert_eq!(clients["total"], 1, "Pool should have 1 SV2 client"); | ||
| let items = clients["items"].as_array().expect("items should be array"); | ||
| assert_eq!(items.len(), 1); | ||
| assert!( |
There was a problem hiding this comment.
lets define types instead of stringly typed.
There was a problem hiding this comment.
Acknowledged - will add typed response structs.
| assert!( | ||
| global["server"].is_null(), | ||
| "Pool /api/v1/global should have null server (no upstream), got: {}", | ||
| global["server"] | ||
| ); | ||
| // Pool should report SV2 clients | ||
| assert_eq!( | ||
| global["sv2_clients"]["total_clients"], 1, | ||
| "Pool should see 1 SV2 client" | ||
| ); | ||
| // Pool has no SV1 clients | ||
| assert!( | ||
| global["sv1_clients"].is_null(), | ||
| "Pool /api/v1/global should have null sv1_clients, got: {}", | ||
| global["sv1_clients"] | ||
| ); |
There was a problem hiding this comment.
lets define types instead of stringly typed.
There was a problem hiding this comment.
Acknowledged - will add typed response structs.
Closes stratum-mining#329 Exercise every JSON REST API endpoint against live SV2 topologies and strengthen unit-test coverage for edge cases. Integration tests (monitoring_integration.rs): - 15 new tests covering /, /api/v1/global, /api/v1/server, /api/v1/server/channels, /api/v1/clients, /api/v1/clients/{id}, /api/v1/clients/{id}/channels, /api/v1/sv1/clients, and /api/v1/sv1/clients/{id} for both Pool and tProxy topologies - Topology setup helpers (PoolWithSv2Miner, TproxyWithSv1Miner) eliminate duplicated boilerplate across tests - 404 endpoints assert both HTTP status code and JSON error body via new assert_api_not_found helper Assertion helpers (prometheus_metrics_assertions.rs): - fetch_api_json: parse JSON API response - fetch_api_with_status: return (status_code, json) without panicking on non-2xx, enabling 404 testing - poll_until_api_field_gte: poll JSON endpoint until a field reaches a threshold (analogous to poll_until_metric_gte for Prometheus) - assert_api_root, assert_api_global: reusable structure checks - assert_api_not_found: verify HTTP 404 + error field - POLL_TIMEOUT constant to reduce repetition HTTP helper (utils.rs): - make_get_request_with_status: returns (status, body) without panicking on 4xx responses, unlike make_get_request Unit tests (http_server.rs): - 11 new tests for pagination boundaries (limit=0, offset beyond total, limit exceeding MAX_LIMIT), missing data sources returning 404, invalid/non-existent client IDs, SV1 data in global endpoint, and Prometheus metrics with no sources Dependencies: - Add serde_json to integration-tests for JSON parsing
f3c2e96 to
1f12145
Compare
…down calls - Replace verbose super::super::* paths with minified imports in http_server.rs - Add route constants and response types for unit tests - Remove assert helper methods (assert_api_root, assert_api_global, assert_api_not_found) - Remove topology setup abstractions (PoolWithSv2Miner, TproxyWithSv1Miner) - Add shutdown_all! calls to all JSON API integration tests - Inline all assertions directly in tests for better debugging
|
I think the test failure here is the target of PR #338, once we rebase off of that this failure should go away i think. |
Shourya742
left a comment
There was a problem hiding this comment.
I still dont see any changes requested. Now, we have some random files in the PR
| @@ -0,0 +1 @@ | |||
| 81316 | |||
| @@ -0,0 +1 @@ | |||
| 81251 | |||
| @@ -0,0 +1 @@ | |||
| 81224 | |||
| @@ -0,0 +1 @@ | |||
| 81325 | |||
| # SV2 Monitoring Dashboard Mockups | ||
|
|
||
| ## Simple Dashboard (Tier 1 — Aggregate Only) | ||
|
|
| @@ -0,0 +1,171 @@ | |||
| #!/usr/bin/env bash | |||
| @@ -0,0 +1,440 @@ | |||
| # Pool Scale Testing: Unified Framework | |||
|
|
|||
| @@ -0,0 +1,505 @@ | |||
| ## Vardiff Profile | |||
Closes #329
Exercise every JSON REST API endpoint against live SV2 topologies and strengthen unit-test coverage for edge cases.
Integration tests (monitoring_integration.rs):
Assertion helpers (prometheus_metrics_assertions.rs):
HTTP helper (utils.rs):
Unit tests (http_server.rs):
Dependencies: