Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions .gitignore
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
# Build directories
build/
.worktrees/
cmake-build-*/
*.build/

Expand Down
1 change: 1 addition & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@
- **Virtual Stack Tracking** for natural exception propagation
- **Work-Stealing Scheduler** with lock-free Chase-Lev deques
- **Dynamic Thread Pool** with runtime adjustment
- **Autoscaler** for automatic worker thread scaling under load
- **Synchronization Primitives**: mutex, semaphore, event, channel
- **Timers**: sleep_for, sleep_until, yield
- **I/O Backends**: io_uring (preferred) and epoll fallback
Expand Down
4 changes: 4 additions & 0 deletions examples/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,10 @@ add_executable(dynamic_threads dynamic_threads.cpp)
target_link_libraries(dynamic_threads PRIVATE elio)
target_link_options(dynamic_threads PRIVATE ${STATIC_LINK_FLAGS})

add_executable(autoscaler_example autoscaler_example.cpp)
target_link_libraries(autoscaler_example PRIVATE elio)
target_link_options(autoscaler_example PRIVATE ${STATIC_LINK_FLAGS})

add_executable(thread_affinity thread_affinity.cpp)
target_link_libraries(thread_affinity PRIVATE elio)
target_link_options(thread_affinity PRIVATE ${STATIC_LINK_FLAGS})
Expand Down
137 changes: 137 additions & 0 deletions examples/autoscaler_example.cpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,137 @@
#include <elio/elio.hpp>
#include <elio/runtime/autoscaler.hpp>
#include <iostream>
#include <atomic>
#include <chrono>
#include <random>

using namespace elio;

// Task that simulates work with random duration
coro::task<void> workload_task(std::atomic<int>& counter) {
// Simulate variable work duration (10-100ms)
static thread_local std::mt19937 rng(std::hash<std::thread::id>{}(std::this_thread::get_id()));
std::uniform_int_distribution<int> dist(10, 100);
std::this_thread::sleep_for(std::chrono::milliseconds(dist(rng)));

counter.fetch_add(1, std::memory_order_relaxed);
co_return;
}

int main() {
log::logger::instance().set_level(log::level::warning);

std::cout << "=== Elio Autoscaler Example ===" << std::endl;
std::cout << "Demonstrating automatic worker thread scaling" << std::endl;
std::cout << std::endl;

// Configure autoscaler
elio::runtime::autoscaler_config config;
config.tick_interval = std::chrono::milliseconds(200);
config.overload_threshold = 20;
config.idle_threshold = 5;
config.idle_delay = std::chrono::seconds(3);
config.min_workers = 2;
config.max_workers = 8;

// Create scheduler with minimum workers
runtime::scheduler sched(config.min_workers);
sched.start();

// Create and start autoscaler with default triggers
elio::runtime::autoscaler<runtime::scheduler> autoscaler(config);
autoscaler.start(&sched);

std::cout << "Initial workers: " << sched.num_threads() << std::endl;
std::cout << std::endl;

// Phase 1: High load - demonstrate scale-up
{
std::atomic<int> completed{0};

// Submit heavy workload
for (int i = 0; i < 2000; ++i) {
sched.spawn(workload_task(completed).release());
}

std::cout << "Phase 1: High load - expecting scale-up..." << std::endl;
std::cout << "----------------------------------------" << std::endl;

// Monitor autoscaler for 5 seconds
for (int i = 0; i < 25; ++i) {
std::this_thread::sleep_for(config.tick_interval);

size_t workers = sched.num_threads();
size_t pending = sched.pending_tasks();

if (i % 2 == 0) {
std::cout << " Workers: " << workers
<< ", Pending: " << pending
<< ", Completed: " << completed.load() << std::endl;
}
}
}

std::cout << std::endl;

// Phase 2: Even higher load
{
std::atomic<int> completed2{0};

// Submit even heavier workload
for (int i = 0; i < 3000; ++i) {
sched.spawn(workload_task(completed2).release());
}

std::cout << "Phase 2: Higher load - expecting more scale-up..." << std::endl;
std::cout << "-------------------------------------------" << std::endl;

for (int i = 0; i < 25; ++i) {
std::this_thread::sleep_for(config.tick_interval);

size_t pending = sched.pending_tasks();

if (i % 2 == 0) {
std::cout << " Workers: " << sched.num_threads()
<< ", Pending: " << pending << std::endl;
}
}
}

std::cout << std::endl;

// Phase 3: Low load - wait for scale-down
{
std::cout << "Phase 3: Low load - waiting for scale-down..." << std::endl;
std::cout << "------------------------------------------" << std::endl;

// Wait longer for idle_delay to trigger scale-down
for (int i = 0; i < 30; ++i) {
std::this_thread::sleep_for(config.tick_interval);

size_t workers = sched.num_threads();
size_t pending = sched.pending_tasks();

if (i % 2 == 0) {
std::cout << " Workers: " << workers
<< ", Pending: " << pending << std::endl;
}
}
}

std::cout << std::endl;

// Stop autoscaler
autoscaler.stop();

std::cout << "Final workers: " << sched.num_threads() << std::endl;

// Shutdown
sched.shutdown();

std::cout << std::endl;
std::cout << "=== Example completed ===" << std::endl;
std::cout << "Autoscaler automatically adjusted worker count based on load!" << std::endl;

return 0;
}
4 changes: 4 additions & 0 deletions include/elio/elio.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,10 @@
#include "runtime/async_main.hpp"
#include "runtime/affinity.hpp"
#include "runtime/serve.hpp"
#include "runtime/autoscaler_config.hpp"
#include "runtime/autoscaler_triggers.hpp"
#include "runtime/autoscaler_actions.hpp"
#include "runtime/autoscaler.hpp"

// I/O backend
#include "io/io_backend.hpp"
Expand Down
Loading