diff --git a/.gitignore b/.gitignore deleted file mode 100644 index 29aab1e..0000000 --- a/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -*~ -__pycache__/ diff --git a/EvoloPy/__init__.py b/EvoloPy/__init__.py index 8b13789..e5d398c 100644 --- a/EvoloPy/__init__.py +++ b/EvoloPy/__init__.py @@ -1 +1,29 @@ +""" +EvoloPy: An open source nature-inspired optimization library +""" +__version__ = "1.1.1" + +# Import main API functions for easy access +from EvoloPy.api import ( + available_optimizers, + available_benchmarks, + run_optimizer, + run_multiple_optimizers, + get_optimizer_class +) + +# Provide direct imports for convenience +import EvoloPy.optimizers as optimizers +import EvoloPy.benchmarks as benchmarks + +# Export main functions at the top level +__all__ = [ + 'available_optimizers', + 'available_benchmarks', + 'run_optimizer', + 'run_multiple_optimizers', + 'get_optimizer_class', + 'optimizers', + 'benchmarks', +] diff --git a/EvoloPy/api.py b/EvoloPy/api.py new file mode 100644 index 0000000..18135db --- /dev/null +++ b/EvoloPy/api.py @@ -0,0 +1,673 @@ +""" +High-level API functions for EvoloPy. + +This module provides simplified access to EvoloPy's core functionality +with a more Pythonic and user-friendly interface. +""" + +import numpy as np +from typing import Union, List, Dict, Any, Callable, Optional +from EvoloPy.optimizer import run as optimizer_run +from EvoloPy.solution import solution +import importlib +import time + +# Import parallel processing utilities +try: + from EvoloPy.parallel_utils import detect_hardware, get_optimal_process_count + PARALLEL_AVAILABLE = True +except ImportError: + PARALLEL_AVAILABLE = False + +def get_optimizer_map(): + """Get a dictionary mapping optimizer names to their functions.""" + optimizer_map = {} + optimizer_modules = [ + "PSO", "GWO", "MVO", "MFO", "CS", "BAT", + "WOA", "FFA", "SSA", "GA", "HHO", "SCA", + "JAYA", "DE" + ] + + for name in optimizer_modules: + try: + module = importlib.import_module(f"EvoloPy.optimizers.{name}") + optimizer_function = getattr(module, name) + optimizer_map[name] = optimizer_function + except (ImportError, AttributeError): + # Skip optimizers that aren't available + pass + + return optimizer_map + +def get_optimizer_class(optimizer_name: str) -> Callable: + """ + Get the optimizer class/function by name. + + Parameters: + optimizer_name (str): Name of the optimizer algorithm + + Returns: + Callable: The optimizer function + + Raises: + ValueError: If the optimizer does not exist + + Example: + >>> from EvoloPy.api import get_optimizer_class + >>> PSO = get_optimizer_class("PSO") + >>> result = PSO(objective_function, lb=-10, ub=10, dim=5, PopSize=30, iters=50) + """ + optimizer_map = get_optimizer_map() + + if optimizer_name not in optimizer_map: + raise ValueError(f"Optimizer '{optimizer_name}' not found. Available optimizers: {list(optimizer_map.keys())}") + + return optimizer_map[optimizer_name] + +def available_optimizers() -> List[str]: + """ + Get a list of all available optimization algorithms. + + Returns: + List[str]: List of optimizer names + + Example: + >>> from EvoloPy.api import available_optimizers + >>> print(available_optimizers()) + ['PSO', 'GWO', 'MVO', ...] + """ + return list(get_optimizer_map().keys()) + +def available_benchmarks() -> List[str]: + """ + Get a list of all available benchmark functions. + + Returns: + List[str]: List of benchmark function names + + Example: + >>> from EvoloPy.api import available_benchmarks + >>> print(available_benchmarks()) + ['F1', 'F2', 'F3', ...] + """ + # List of all benchmark functions + return [ + "F1", "F2", "F3", "F4", "F5", "F6", "F7", "F8", "F9", "F10", + "F11", "F12", "F13", "F14", "F15", "F16", "F17", "F18", "F19", + "F20", "F21", "F22", "F23", "F24", "ackley", "rosenbrock", + "rastrigin", "griewank" + ] + +def run_optimizer( + optimizer: str, + objective_func: Union[str, Callable], + lb: Union[float, List[float]] = -100, + ub: Union[float, List[float]] = 100, + dim: int = 30, + population_size: int = 30, + iterations: int = 50, + num_runs: int = 1, + export_results: bool = True, + export_details: bool = True, + export_convergence: bool = True, + export_boxplot: bool = True, + display_plots: bool = True, + results_directory: Optional[str] = None, + enable_parallel: bool = False, + parallel_backend: str = 'auto', + num_processes: Optional[int] = None +) -> Dict[str, Any]: + """ + Run a single optimizer on a specified objective function. + + Parameters: + optimizer (str): Name of the optimizer algorithm + objective_func (str or callable): Either a benchmark name (e.g., "F1") + or a custom objective function + lb (float or list): Lower bound for variables + ub (float or list): Upper bound for variables + dim (int): Problem dimension + population_size (int): Size of the population + iterations (int): Maximum number of iterations + num_runs (int): Number of independent runs + export_results (bool): Whether to export average results + export_details (bool): Whether to export detailed results + export_convergence (bool): Whether to export convergence plots + export_boxplot (bool): Whether to export boxplots + display_plots (bool): Whether to display plots directly in notebook (useful for Jupyter) + results_directory (str, optional): Directory to save results + enable_parallel (bool): Whether to enable parallel processing + parallel_backend (str): Parallel processing backend ('multiprocessing', 'cuda', 'auto') + num_processes (int, optional): Number of processes to use (None for auto-detection) + + Returns: + Dict[str, Any]: Results dictionary containing: + - 'best_solution': Best solution found + - 'best_fitness': Best fitness value + - 'convergence': Convergence history + - 'execution_time': Execution time + - 'plots': Dictionary of matplotlib figures (if display_plots=True) + + Example: + >>> from EvoloPy.api import run_optimizer + >>> result = run_optimizer("PSO", "F1", population_size=50, iterations=100) + >>> print(f"Best fitness: {result['best_fitness']}") + >>> print(f"Execution time: {result['execution_time']} seconds") + """ + optimizer_map = get_optimizer_map() + + #check if optimizer exists + if optimizer not in optimizer_map: + raise ValueError(f"Optimizer '{optimizer}' not found. Available optimizers: {available_optimizers()}") + + # Check parallel configuration + if enable_parallel and not PARALLEL_AVAILABLE: + print("Warning: Parallel processing requested but not available. Installing psutil package is required.") + enable_parallel = False + + # Handle boxplot export based on number of runs + # Always enable boxplot when multiple runs are performed + if num_runs > 1: + export_boxplot = True + + # Create organized results directory if not provided + if results_directory is None: + timestamp = time.strftime("%Y-%m-%d-%H-%M-%S") + results_directory = f"results_{timestamp}/{optimizer}/" + + # Ensure directory has trailing slash + if not results_directory.endswith('/'): + results_directory += '/' + + if isinstance(objective_func, str): + if objective_func not in available_benchmarks(): + raise ValueError(f"Benchmark '{objective_func}' not found. Available benchmarks: {available_benchmarks()}") + + # Create function-specific subdirectory for better organization + func_results_dir = f"{results_directory}{objective_func}/" + + params = {"PopulationSize": population_size, "Iterations": iterations} + export_flags = { + "Export_avg": export_results, + "Export_details": export_details, + "Export_convergence": export_convergence, + "Export_boxplot": export_boxplot + } + + # Save configuration summary + import os + from pathlib import Path + Path(func_results_dir).mkdir(parents=True, exist_ok=True) + + with open(os.path.join(func_results_dir, "config.txt"), "w") as f: + f.write(f"Optimizer: {optimizer}\n") + f.write(f"Function: {objective_func}\n") + f.write(f"Dimension: {dim}\n") + f.write(f"Population Size: {population_size}\n") + f.write(f"Iterations: {iterations}\n") + f.write(f"Number of Runs: {num_runs}\n") + f.write(f"Lower Bound: {lb}\n") + f.write(f"Upper Bound: {ub}\n") + f.write(f"Parallel: {enable_parallel}\n") + if enable_parallel: + f.write(f"Parallel Backend: {parallel_backend}\n") + f.write(f"Processes: {num_processes or 'auto'}\n") + + results = optimizer_run( + [optimizer], + [objective_func], + num_runs, + params, + export_flags, + func_results_dir, + enable_parallel, + parallel_backend, + num_processes + ) + + # Process results + plots = {} + + # Generate and display plots if requested + if display_plots: + try: + import matplotlib.pyplot as plt + import numpy as np + + # Create plots for display in notebook + if hasattr(results, 'convergence'): + # Convergence plot for single result + fig_conv = plt.figure(figsize=(10, 6)) + plt.plot(range(1, len(results.convergence)+1), results.convergence) + plt.title(f'Convergence Curve: {optimizer} on {objective_func}') + plt.xlabel('Iteration') + plt.ylabel('Fitness') + plt.grid(True) + plots['convergence'] = fig_conv + + elif isinstance(results, list) and len(results) > 0 and hasattr(results[0], 'convergence'): + # Multiple runs - show average convergence + convergence_data = np.array([r.convergence for r in results]) + avg_convergence = np.mean(convergence_data, axis=0) + std_convergence = np.std(convergence_data, axis=0) + + fig_conv = plt.figure(figsize=(10, 6)) + iterations = range(1, len(avg_convergence)+1) + plt.plot(iterations, avg_convergence, label='Mean') + plt.fill_between( + iterations, + avg_convergence - std_convergence, + avg_convergence + std_convergence, + alpha=0.3, label='Std Dev' + ) + plt.title(f'Average Convergence: {optimizer} on {objective_func} ({num_runs} runs)') + plt.xlabel('Iteration') + plt.ylabel('Fitness') + plt.grid(True) + plt.legend() + plots['avg_convergence'] = fig_conv + + # Boxplot for multiple runs + if num_runs > 1: + final_fitnesses = [r.best_score if hasattr(r, 'best_score') else r.convergence[-1] for r in results] + fig_box = plt.figure(figsize=(8, 6)) + plt.boxplot(final_fitnesses, labels=[optimizer]) + plt.title(f'Final Fitness Distribution: {optimizer} on {objective_func}') + plt.ylabel('Fitness Value') + plt.grid(True, axis='y') + plots['boxplot'] = fig_box + # Store the raw data for potential reuse + plots['boxplot_data'] = final_fitnesses + except Exception as e: + print(f"Warning: Could not generate plots for display: {e}") + + # Process and return results + if hasattr(results, 'bestIndividual'): + # Single result object returned + return { + 'best_solution': results.bestIndividual, + 'best_fitness': results.best_score if hasattr(results, 'best_score') else None, + 'convergence': results.convergence, + 'execution_time': results.executionTime, + 'plots': plots if display_plots else None + } + elif results and isinstance(results, list) and hasattr(results[0], 'bestIndividual'): + # Pick the best result if a list was returned + best_result = min(results, key=lambda x: getattr(x, 'best_score', float('inf'))) + return { + 'best_solution': best_result.bestIndividual, + 'best_fitness': best_result.best_score if hasattr(best_result, 'best_score') else None, + 'convergence': best_result.convergence, + 'execution_time': best_result.executionTime, + 'plots': plots if display_plots else None + } + else: + # Fall back to empty result if structure is unexpected + return { + 'best_solution': None, + 'best_fitness': None, + 'convergence': None, + 'execution_time': None, + 'plots': plots if display_plots else None + } + + # Handle callable objective functions + elif callable(objective_func): + # Get the optimizer function + optimizer_func = optimizer_map[optimizer] + + # Create function-specific subdirectory for better organization + func_name = getattr(objective_func, '__name__', 'custom_function') + func_results_dir = f"{results_directory}{func_name}/" + + from pathlib import Path + Path(func_results_dir).mkdir(parents=True, exist_ok=True) + + if enable_parallel and num_runs > 1: + # Import parallel utils if needed + from EvoloPy.parallel_utils import run_optimizer_parallel + + # Execute multiple runs in parallel + results = run_optimizer_parallel( + optimizer_func=optimizer_func, + objf=objective_func, + lb=lb, + ub=ub, + dim=dim, + PopSize=population_size, + iters=iterations, + num_runs=num_runs, + parallel_backend=parallel_backend, + num_processes=num_processes + ) + + # Return the best result + best_run = min(results, key=lambda x: objective_func(x.bestIndividual)) + + # Generate boxplot if enabled and we have multiple runs + if export_boxplot and num_runs > 1: + try: + from EvoloPy import plot_boxplot + fitness_values = [objective_func(r.bestIndividual) for r in results] + plot_boxplot.generateBoxPlot([optimizer], [func_name], [fitness_values], func_results_dir) + except Exception as e: + print(f"Warning: Could not generate boxplot: {e}") + + # Create plots for display in notebook + plots = {} + if display_plots: + try: + import matplotlib.pyplot as plt + import numpy as np + + # Convergence plot + convergence_data = np.array([r.convergence for r in results]) + avg_convergence = np.mean(convergence_data, axis=0) + std_convergence = np.std(convergence_data, axis=0) + + fig_conv = plt.figure(figsize=(10, 6)) + iterations = range(1, len(avg_convergence)+1) + plt.plot(iterations, avg_convergence, label='Mean') + plt.fill_between( + iterations, + avg_convergence - std_convergence, + avg_convergence + std_convergence, + alpha=0.3, label='Std Dev' + ) + plt.title(f'Average Convergence: {optimizer} on {func_name} ({num_runs} runs)') + plt.xlabel('Iteration') + plt.ylabel('Fitness') + plt.grid(True) + plt.legend() + plots['avg_convergence'] = fig_conv + + # Boxplot for multiple runs + if num_runs > 1: + fitness_values = [objective_func(r.bestIndividual) for r in results] + fig_box = plt.figure(figsize=(8, 6)) + plt.boxplot(fitness_values, labels=[optimizer]) + plt.title(f'Final Fitness Distribution: {optimizer} on {func_name}') + plt.ylabel('Fitness Value') + plt.grid(True, axis='y') + plots['boxplot'] = fig_box + # Store the raw data for potential reuse + plots['boxplot_data'] = fitness_values + except Exception as e: + print(f"Warning: Could not generate plots for display: {e}") + + return { + 'best_solution': best_run.bestIndividual, + 'best_fitness': objective_func(best_run.bestIndividual), + 'convergence': best_run.convergence, + 'execution_time': sum(r.executionTime for r in results)/len(results), # Average time + 'plots': plots if display_plots else None + } + else: + # Run the optimization directly (single run) + result = optimizer_func(objective_func, lb, ub, dim, population_size, iterations) + + # Return the results + return { + 'best_solution': result.bestIndividual, + 'best_fitness': objective_func(result.bestIndividual), + 'convergence': result.convergence, + 'execution_time': result.executionTime + } + + else: + raise TypeError("objective_func must be either a string (benchmark name) or a callable function") + +def run_multiple_optimizers( + optimizers: List[str], + objective_funcs: List[Union[str, Callable]], + lb: Union[float, List[float]] = -100, + ub: Union[float, List[float]] = 100, + dim: int = 30, + population_size: int = 30, + iterations: int = 50, + num_runs: int = 1, + export_results: bool = True, + export_details: bool = True, + export_convergence: bool = True, + export_boxplot: bool = True, + display_plots: bool = True, + results_directory: Optional[str] = None, + enable_parallel: bool = False, + parallel_backend: str = 'auto', + num_processes: Optional[int] = None +) -> Dict[str, Dict[str, Dict[str, Any]]]: + """ + Run multiple optimizers on multiple objective functions. + + This function allows running multiple optimization algorithms on multiple benchmark + functions and returns structured results. + + Parameters: + optimizers (List[str]): List of optimizer names + objective_funcs (List[str]): List of benchmark function names + lb (float or list): Lower bound for variables + ub (float or list): Upper bound for variables + dim (int): Problem dimension + population_size (int): Size of the population + iterations (int): Maximum number of iterations + num_runs (int): Number of independent runs + export_results (bool): Whether to export average results + export_details (bool): Whether to export detailed results + export_convergence (bool): Whether to export convergence plots + export_boxplot (bool): Whether to export boxplots + display_plots (bool): Whether to display plots directly in notebook (useful for Jupyter) + results_directory (str, optional): Directory to save results + enable_parallel (bool): Whether to enable parallel processing + parallel_backend (str): Parallel processing backend ('multiprocessing', 'cuda', 'auto') + num_processes (int, optional): Number of processes to use (None for auto-detection) + + Returns: + Dict[str, Dict[str, Dict[str, Any]]]: Nested dictionary of results: + {optimizer_name: {objective_name: {result_data}}} + + Example: + >>> from EvoloPy.api import run_multiple_optimizers + >>> results = run_multiple_optimizers( + ... optimizers=["PSO", "GWO"], + ... objective_funcs=["F1", "F5"], + ... population_size=30, + ... iterations=50 + ... ) + >>> # Access specific results + >>> for opt_name, opt_results in results.items(): + ... for func_name, func_results in opt_results.items(): + ... print(f"{opt_name} on {func_name}: {func_results}") + """ + # Required imports + import numpy as np + + optimizer_map = get_optimizer_map() + + # Validate inputs + for opt in optimizers: + if opt not in optimizer_map: + raise ValueError(f"Optimizer '{opt}' not found. Available optimizers: {available_optimizers()}") + + for func in objective_funcs: + if isinstance(func, str) and func not in available_benchmarks(): + raise ValueError(f"Benchmark '{func}' not found. Available benchmarks: {available_benchmarks()}") + + # Only support string benchmark functions for now + if not all(isinstance(func, str) for func in objective_funcs): + raise TypeError("For multiple optimizers, all objective_funcs must be benchmark names (strings)") + + # Check parallel configuration + if enable_parallel and not PARALLEL_AVAILABLE: + print("Warning: Parallel processing requested but not available. Installing psutil package is required.") + enable_parallel = False + + # Handle boxplot export based on number of runs + # Always enable boxplot when multiple runs are performed + if num_runs > 1: + export_boxplot = True + + # Create organized results directory if not provided + if results_directory is None: + timestamp = time.strftime("%Y-%m-%d-%H-%M-%S") + results_directory = f"results_{timestamp}/multiple_optimizers/" + + # Ensure directory has trailing slash + if not results_directory.endswith('/'): + results_directory += '/' + + # Create a dictionary to store all results + all_results = {} + all_convergence_data = {} + all_fitness_data = {} + + # Run each optimizer for each objective function + for opt in optimizers: + all_results[opt] = {} + all_convergence_data[opt] = {} + all_fitness_data[opt] = {} + + for func in objective_funcs: + result = run_optimizer( + optimizer=opt, + objective_func=func, + lb=lb, + ub=ub, + dim=dim, + population_size=population_size, + iterations=iterations, + num_runs=num_runs, + export_results=export_results, + export_details=export_details, + export_convergence=export_convergence, + export_boxplot=export_boxplot, + display_plots=False, # We'll create our own plots for multiple optimizers + results_directory=f"{results_directory}{opt}/", + enable_parallel=enable_parallel, + parallel_backend=parallel_backend, + num_processes=num_processes + ) + + # Store the result for this optimizer and function + all_results[opt][func] = result + + # Store convergence data for comparison plots + if isinstance(result['convergence'], list) or isinstance(result['convergence'], np.ndarray): + all_convergence_data[opt][func] = result['convergence'] + + # Store fitness data for comparison plots + all_fitness_data[opt][func] = result['best_fitness'] + + # Create comparison plots if requested + plots = {} + if display_plots: + try: + import matplotlib.pyplot as plt + import numpy as np + + # Plot convergence comparison for each objective function + for func in objective_funcs: + func_name = func if isinstance(func, str) else getattr(func, '__name__', 'custom_function') + + # Convergence comparison plot + fig_conv = plt.figure(figsize=(12, 7)) + for opt in optimizers: + if func in all_convergence_data[opt]: + plt.plot(range(1, len(all_convergence_data[opt][func])+1), + all_convergence_data[opt][func], + label=opt) + + plt.title(f'Convergence Comparison on {func_name}') + plt.xlabel('Iteration') + plt.ylabel('Fitness') + plt.grid(True) + plt.legend() + plots[f'convergence_{func_name}'] = fig_conv + + # Performance comparison boxplot if multiple runs + if num_runs > 1: + # Get fitness values for all optimizers for this function + performance_data = [] + labels = [] + + for opt in optimizers: + if func in all_results[opt] and 'plots' in all_results[opt][func] and all_results[opt][func]['plots']: + if 'boxplot_data' in all_results[opt][func]['plots']: + performance_data.append(all_results[opt][func]['plots']['boxplot_data']) + labels.append(opt) + + if performance_data: + fig_box = plt.figure(figsize=(10, 7)) + plt.boxplot(performance_data, labels=labels) + plt.title(f'Performance Comparison on {func_name}') + plt.ylabel('Fitness Value') + plt.grid(True, axis='y') + plots[f'boxplot_{func_name}'] = fig_box + + # Create a summary performance comparison across all functions + if len(objective_funcs) > 1: + fig_summary = plt.figure(figsize=(14, 8)) + + # Extract best fitness for each optimizer and function + data = [] + x_labels = [] + + for i, func in enumerate(objective_funcs): + func_name = func if isinstance(func, str) else getattr(func, '__name__', 'custom_function') + x_labels.append(func_name) + + for opt in optimizers: + if i == 0: # First iteration, initialize the lists + data.append([]) + + if func in all_results[opt]: + # Add the best fitness for this function + data[optimizers.index(opt)].append(all_results[opt][func]['best_fitness']) + + # Create the bar chart + x = np.arange(len(objective_funcs)) + width = 0.8 / len(optimizers) + + for i, opt in enumerate(optimizers): + plt.bar(x + (i - len(optimizers)/2 + 0.5) * width, data[i], width, label=opt) + + plt.xlabel('Objective Function') + plt.ylabel('Best Fitness Value') + plt.title('Optimizer Performance Comparison') + plt.xticks(x, x_labels) + plt.legend() + plt.grid(True, axis='y') + + plots['performance_summary'] = fig_summary + + except Exception as e: + print(f"Warning: Could not generate comparison plots: {e}") + + # Add plots to the result dictionary + all_results['plots'] = plots if display_plots else None + + return all_results + +def get_hardware_info() -> Dict[str, Any]: + """ + Get information about available hardware for parallel processing. + + Returns: + Dict[str, Any]: Dictionary containing hardware information: + - cpu_count: Number of CPU cores + - cpu_threads: Number of CPU threads + - ram_gb: Available RAM in GB + - gpu_available: Whether CUDA GPU is available + - gpu_count: Number of CUDA GPUs + - gpu_names: List of GPU names + - gpu_memory: List of GPU memory in GB + + Example: + >>> from EvoloPy.api import get_hardware_info + >>> hw_info = get_hardware_info() + >>> print(f"CPU cores: {hw_info['cpu_count']}") + >>> if hw_info['gpu_available']: + ... print(f"GPU: {hw_info['gpu_names'][0]}") + """ + if not PARALLEL_AVAILABLE: + raise ImportError("Hardware detection requires the psutil package. Install with: pip install psutil") + return detect_hardware() \ No newline at end of file diff --git a/EvoloPy/benchmarks.py b/EvoloPy/benchmarks.py index 9dad5cd..f3276e7 100644 --- a/EvoloPy/benchmarks.py +++ b/EvoloPy/benchmarks.py @@ -9,120 +9,97 @@ import numpy as np import math +from functools import reduce +from typing import Union, List, Tuple, Callable -# define the function blocks +# Define the function blocks - optimized versions def prod(it): - p = 1 - for n in it: - p *= n - return p - + """Optimized product function using reduce""" + return reduce(lambda x, y: x * y, it, 1) def Ufun(x, a, k, m): - y = k * ((x - a) ** m) * (x > a) + k * ((-x - a) ** m) * (x < (-a)) - return y - + """Penalty function""" + return k * ((x - a) ** m) * (x > a) + k * ((-x - a) ** m) * (x < (-a)) def F1(x): - s = np.sum(x ** 2) - return s - + """Sphere function""" + return np.sum(x ** 2) def F2(x): - o = sum(abs(x)) + prod(abs(x)) - return o - + """Sum of absolute and product of absolute values""" + return np.sum(np.abs(x)) + prod(np.abs(x)) def F3(x): - dim = len(x) + 1 - o = 0 - for i in range(1, dim): - o = o + (np.sum(x[0:i])) ** 2 - return o - + """Sum of squared sums""" + result = 0 + for i in range(1, len(x) + 1): + result += (np.sum(x[0:i])) ** 2 + return result def F4(x): - o = max(abs(x)) - return o - + """Maximum absolute value""" + return np.max(np.abs(x)) def F5(x): + """Rosenbrock function""" dim = len(x) - o = np.sum( - 100 * (x[1:dim] - (x[0 : dim - 1] ** 2)) ** 2 + (x[0 : dim - 1] - 1) ** 2 - ) - return o - + return np.sum(100 * (x[1:dim] - (x[0:dim-1] ** 2)) ** 2 + (x[0:dim-1] - 1) ** 2) def F6(x): - o = np.sum(abs((x + 0.5)) ** 2) - return o - + """Shifted absolute square function""" + return np.sum(np.abs(x + 0.5) ** 2) def F7(x): + """Sum of weighted fourth powers with noise""" dim = len(x) - w = np.arange(1, dim + 1) # create an array from 1 to dim - o = np.sum(w * (x ** 4)) + np.random.uniform(0, 1) - return o - + w = np.arange(1, dim + 1) + return np.sum(w * (x ** 4)) + np.random.uniform(0, 1) def F8(x): - o = sum(-x * (np.sin(np.sqrt(abs(x))))) - return o - + """Sum of negative products of sine of square root of absolute value""" + return np.sum(-x * np.sin(np.sqrt(np.abs(x)))) def F9(x): + """Rastrigin function""" dim = len(x) - o = np.sum(x ** 2 - 10 * np.cos(2 * math.pi * x)) + 10 * dim - return o - + return np.sum(x ** 2 - 10 * np.cos(2 * np.pi * x)) + 10 * dim def F10(x): + """Ackley function""" dim = len(x) - o = ( - -20 * np.exp(-0.2 * np.sqrt(np.sum(x ** 2) / dim)) - - np.exp(np.sum(np.cos(2 * math.pi * x)) / dim) - + 20 - + np.exp(1) - ) - return o - + return (-20 * np.exp(-0.2 * np.sqrt(np.sum(x ** 2) / dim)) - + np.exp(np.sum(np.cos(2 * np.pi * x)) / dim) + + 20 + np.exp(1)) def F11(x): + """Griewank function""" dim = len(x) - w = [i for i in range(len(x))] - w = [i + 1 for i in w] - o = np.sum(x ** 2) / 4000 - prod(np.cos(x / np.sqrt(w))) + 1 - return o - + w = np.arange(1, dim + 1) + return np.sum(x ** 2) / 4000 - np.prod(np.cos(x / np.sqrt(w))) + 1 def F12(x): + """Penalized function 1""" dim = len(x) - o = (math.pi / dim) * ( - 10 * ((np.sin(math.pi * (1 + (x[0] + 1) / 4))) ** 2) - + np.sum( - (((x[: dim - 1] + 1) / 4) ** 2) - * (1 + 10 * ((np.sin(math.pi * (1 + (x[1 :] + 1) / 4)))) ** 2) - ) - + ((x[dim - 1] + 1) / 4) ** 2 - ) + np.sum(Ufun(x, 10, 100, 4)) - return o - + y = 1 + (x + 1) / 4 + + term1 = 10 * (np.sin(np.pi * y[0])) ** 2 + term2 = np.sum((y[0:dim-1] - 1) ** 2 * (1 + 10 * (np.sin(np.pi * y[1:dim])) ** 2)) + term3 = (y[dim-1] - 1) ** 2 + + pi_n = np.pi * dim + return (pi_n / 10) * (term1 + term2 + term3) + np.sum(Ufun(x, 10, 100, 4)) def F13(x): - if x.ndim==1: - x = x.reshape(1,-1) - - o = 0.1 * ( - (np.sin(3 * np.pi * x[:,0])) ** 2 - + np.sum( - (x[:,:-1] - 1) ** 2 - * (1 + (np.sin(3 * np.pi * x[:,1:])) ** 2), axis=1 - ) - + ((x[:,-1] - 1) ** 2) * (1 + (np.sin(2 * np.pi * x[:,-1])) ** 2) - ) + np.sum(Ufun(x, 5, 100, 4)) - return o - + """Penalized function 2""" + if x.ndim == 1: + x = x.reshape(1, -1) + + term1 = (np.sin(3 * np.pi * x[:, 0])) ** 2 + term2 = np.sum((x[:, :-1] - 1) ** 2 * (1 + (np.sin(3 * np.pi * x[:, 1:])) ** 2), axis=1) + term3 = ((x[:, -1] - 1) ** 2) * (1 + (np.sin(2 * np.pi * x[:, -1])) ** 2) + + result = 0.1 * (term1 + term2 + term3) + np.sum(Ufun(x, 5, 100, 4)) + return result def F14(x): aS = [ diff --git a/EvoloPy/cli.py b/EvoloPy/cli.py new file mode 100644 index 0000000..469128f --- /dev/null +++ b/EvoloPy/cli.py @@ -0,0 +1,303 @@ +#!/usr/bin/env python +import argparse +import json +import sys +import time +from typing import Dict, List, Any, Optional + +from EvoloPy.api import ( + available_optimizers, + available_benchmarks, + run_optimizer, + run_multiple_optimizers, + get_hardware_info, + PARALLEL_AVAILABLE +) + + +def parse_args() -> argparse.Namespace: + """Parse command line arguments for the EvoloPy CLI.""" + parser = argparse.ArgumentParser( + description="EvoloPy: A Python library for nature-inspired optimization algorithms", + formatter_class=argparse.ArgumentDefaultsHelpFormatter + ) + + # Add required arguments + parser.add_argument( + "--optimizer", "-o", type=str, + help="Optimizer to use. Use 'list' to see available optimizers." + ) + + parser.add_argument( + "--function", "-f", type=str, + help="Objective function to optimize. Use 'list' to see available functions." + ) + + # Add optional arguments with defaults + parser.add_argument( + "--pop-size", "-p", type=int, default=30, + help="Population size" + ) + + parser.add_argument( + "--iterations", "-i", type=int, default=50, + help="Number of iterations" + ) + + parser.add_argument( + "--dim", "-d", type=int, default=10, + help="Problem dimension" + ) + + parser.add_argument( + "--lb", type=str, default="-100", + help="Lower bound of search space. Can be a scalar (same for all dimensions) or a comma-separated list" + ) + + parser.add_argument( + "--ub", type=str, default="100", + help="Upper bound of search space. Can be a scalar (same for all dimensions) or a comma-separated list" + ) + + parser.add_argument( + "--runs", "-r", type=int, default=1, + help="Number of independent runs" + ) + + parser.add_argument( + "--results-dir", type=str, default=None, + help="Directory to save results" + ) + + parser.add_argument( + "--list", action="store_true", + help="List available optimizers and benchmark functions" + ) + + # Multi-optimizer and multi-function mode + parser.add_argument( + "--multi", action="store_true", + help="Run multiple optimizers and/or functions (comma-separated lists)" + ) + + # Parallel processing options + parallel_group = parser.add_argument_group('Parallel Processing') + + parallel_group.add_argument( + "--parallel", action="store_true", + help="Enable parallel processing for multiple runs" + ) + + parallel_group.add_argument( + "--backend", type=str, default="auto", choices=["auto", "multiprocessing", "cuda"], + help="Parallel processing backend to use" + ) + + parallel_group.add_argument( + "--processes", type=int, default=None, + help="Number of parallel processes to use (default: auto-detect)" + ) + + parallel_group.add_argument( + "--hw-info", action="store_true", + help="Display hardware information and exit" + ) + + # Export options + export_group = parser.add_argument_group('Export Options') + export_group.add_argument( + "--no-export-results", action="store_true", + help="Disable exporting average results" + ) + + export_group.add_argument( + "--no-export-details", action="store_true", + help="Disable exporting detailed results" + ) + + export_group.add_argument( + "--no-export-convergence", action="store_true", + help="Disable exporting convergence plots" + ) + + export_group.add_argument( + "--no-export-boxplot", action="store_true", + help="Disable exporting boxplots" + ) + + return parser.parse_args() + + +def display_available_options() -> None: + """Display available optimizers and benchmark functions.""" + print("Available Optimizers:") + for opt in available_optimizers(): + print(f" - {opt}") + + print("\nAvailable Benchmark Functions:") + for func in available_benchmarks(): + print(f" - {func}") + + +def display_hardware_info() -> None: + """Display hardware information for parallel processing.""" + if not PARALLEL_AVAILABLE: + print("Error: Hardware detection requires the psutil package.") + print("Install with: pip install psutil") + sys.exit(1) + + try: + hw_info = get_hardware_info() + + print("Hardware Information:") + print(f"CPU cores: {hw_info['cpu_count']}") + print(f"CPU threads: {hw_info['cpu_threads']}") + print(f"RAM: {hw_info['ram_gb']:.2f} GB") + + if hw_info['gpu_available']: + print(f"CUDA GPUs available: {hw_info['gpu_count']}") + for i, (name, mem) in enumerate(zip(hw_info['gpu_names'], hw_info['gpu_memory'])): + print(f" GPU {i}: {name} ({mem:.2f} GB)") + else: + print("CUDA GPUs: None detected") + except Exception as e: + print(f"Error detecting hardware: {e}") + sys.exit(1) + + +def run_cli() -> None: + """Run the command-line interface for EvoloPy.""" + args = parse_args() + + # Handle hardware info display + if args.hw_info: + display_hardware_info() + sys.exit(0) + + # Handle listing available options + if args.list: + display_available_options() + sys.exit(0) + + # Check for required arguments + if args.optimizer == "list" or args.function == "list": + display_available_options() + sys.exit(0) + + if not args.optimizer: + print("Error: Optimizer must be specified. Use --list to see options.") + sys.exit(1) + + if not args.function: + print("Error: Objective function must be specified. Use --list to see options.") + sys.exit(1) + + # Process bounds - parse string values to handle lists + try: + # Check if lb contains commas, which indicates a list + if ',' in args.lb: + lb = [float(x.strip()) for x in args.lb.split(',')] + else: + lb = float(args.lb) + + # Check if ub contains commas, which indicates a list + if ',' in args.ub: + ub = [float(x.strip()) for x in args.ub.split(',')] + else: + ub = float(args.ub) + except ValueError: + print("Error: Lower and upper bounds must be numeric values or comma-separated numeric values.") + sys.exit(1) + + # Process export flags (default is True, flag disables) + export_results = not args.no_export_results + export_details = not args.no_export_details + export_convergence = not args.no_export_convergence + export_boxplot = not args.no_export_boxplot + + # Handle multi-mode + if args.multi: + optimizers = [o.strip() for o in args.optimizer.split(",")] + functions = [f.strip() for f in args.function.split(",")] + + print(f"Running {len(optimizers)} optimizer(s) on {len(functions)} function(s)...") + + # Display parallel processing info if enabled + if args.parallel: + print(f"Parallel processing enabled with {args.backend} backend") + if args.processes: + print(f"Using {args.processes} processes") + else: + print("Auto-detecting optimal number of processes") + + start_time = time.time() + + results = run_multiple_optimizers( + optimizers=optimizers, + objective_funcs=functions, + population_size=args.pop_size, + iterations=args.iterations, + dim=args.dim, + lb=lb, + ub=ub, + num_runs=args.runs, + export_results=export_results, + export_details=export_details, + export_convergence=export_convergence, + export_boxplot=export_boxplot, + results_directory=args.results_dir, + enable_parallel=args.parallel, + parallel_backend=args.backend, + num_processes=args.processes + ) + + duration = time.time() - start_time + print(f"Optimization completed in {duration:.2f} seconds") + print(f"Results saved to {args.results_dir or 'default output directory'}") + + else: + # Single optimizer and function mode + print(f"Running {args.optimizer} on {args.function}...") + + # Display parallel processing info if enabled + if args.parallel: + print(f"Parallel processing enabled with {args.backend} backend") + if args.processes: + print(f"Using {args.processes} processes") + else: + print("Auto-detecting optimal number of processes") + + start_time = time.time() + + result = run_optimizer( + optimizer=args.optimizer, + objective_func=args.function, + population_size=args.pop_size, + iterations=args.iterations, + dim=args.dim, + lb=lb, + ub=ub, + num_runs=args.runs, + export_results=export_results, + export_details=export_details, + export_convergence=export_convergence, + export_boxplot=export_boxplot, + results_directory=args.results_dir, + enable_parallel=args.parallel, + parallel_backend=args.backend, + num_processes=args.processes + ) + + duration = time.time() - start_time + print(f"Optimization completed in {duration:.2f} seconds") + + if 'best_fitness' in result: + print(f"Best fitness: {result['best_fitness']}") + if 'execution_time' in result: + print(f"Execution time: {result['execution_time']} seconds") + + print(f"Results saved to {args.results_dir or 'default output directory'}") + + +if __name__ == "__main__": + run_cli() \ No newline at end of file diff --git a/EvoloPy/optimizer.py b/EvoloPy/optimizer.py index a9c0eea..9927898 100644 --- a/EvoloPy/optimizer.py +++ b/EvoloPy/optimizer.py @@ -20,6 +20,8 @@ import EvoloPy.optimizers.JAYA as jaya import EvoloPy.optimizers.DE as de from EvoloPy import benchmarks +from EvoloPy.solution import solution +from EvoloPy.parallel_utils import run_optimizer_parallel, detect_hardware import csv import numpy import time @@ -70,7 +72,49 @@ def selector(algo, func_details, popSize, Iter): return x -def run(optimizer, objectivefunc, NumOfRuns, params, export_flags): +def get_optimizer_function(algo): + """ + Return the optimizer function for a given algorithm name. + + Parameters: + algo (str): Name of the algorithm + + Returns: + function: The optimizer function + """ + if algo == "SSA": + return ssa.SSA + elif algo == "PSO": + return pso.PSO + elif algo == "GA": + return ga.GA + elif algo == "BAT": + return bat.BAT + elif algo == "FFA": + return ffa.FFA + elif algo == "GWO": + return gwo.GWO + elif algo == "WOA": + return woa.WOA + elif algo == "MVO": + return mvo.MVO + elif algo == "MFO": + return mfo.MFO + elif algo == "CS": + return cs.CS + elif algo == "HHO": + return hho.HHO + elif algo == "SCA": + return sca.SCA + elif algo == "JAYA": + return jaya.JAYA + elif algo == "DE": + return de.DE + else: + return None + + +def run(optimizer, objectivefunc, NumOfRuns, params, export_flags, results_directory=None, enable_parallel=False, parallel_backend='auto', num_processes=None): """ It serves as the main interface of the framework for running the experiments. @@ -93,10 +137,18 @@ def run(optimizer, objectivefunc, NumOfRuns, params, export_flags): 2. Export_details (Exporting the detailed results in files) 3. Export_convergence (Exporting the covergence plots) 4. Export_boxplot (Exporting the box plots) + results_directory : str, optional + Directory to save results (default: timestamp-based directory) + enable_parallel : bool, optional + Whether to enable parallel processing (default: False) + parallel_backend : str, optional + Parallel processing backend ('multiprocessing', 'cuda', 'auto') + num_processes : int, optional + Number of processes to use (None for auto-detection) Returns ----------- - N/A + List of solution objects or a single solution object """ # Select general parameters for all optimizers (population size, number of iterations) .... @@ -109,76 +161,254 @@ def run(optimizer, objectivefunc, NumOfRuns, params, export_flags): Export_convergence = export_flags["Export_convergence"] Export_boxplot = export_flags["Export_boxplot"] - Flag = False - Flag_details = False + # Create directory for results + if results_directory is None: + results_directory = time.strftime("%Y-%m-%d-%H-%M-%S") + "/" + + # Ensure directory has trailing slash + if not results_directory.endswith('/'): + results_directory += '/' + + Path(results_directory).mkdir(parents=True, exist_ok=True) + + # Print parallel processing information if enabled + if enable_parallel: + hardware_info = detect_hardware() + print("Parallel processing enabled") + print(f"Backend: {parallel_backend}") + if parallel_backend == 'auto': + if hardware_info['gpu_available']: + print("Auto-selected backend: CUDA GPU") + else: + print("Auto-selected backend: CPU multiprocessing") + + print(f"CPU cores: {hardware_info['cpu_count']}") + if hardware_info['gpu_available']: + print(f"GPUs available: {hardware_info['gpu_count']}") + for i, (name, mem) in enumerate(zip(hardware_info['gpu_names'], hardware_info['gpu_memory'])): + print(f" GPU {i}: {name} ({mem:.2f} GB)") + + if num_processes is None: + from EvoloPy.parallel_utils import get_optimal_process_count + num_processes = get_optimal_process_count(parallel_backend) + print(f"Using {num_processes} parallel processes") + + # Create all_results list to contain the solutions + all_results = [] # CSV Header for for the cinvergence CnvgHeader = [] - results_directory = time.strftime("%Y-%m-%d-%H-%M-%S") + "/" - Path(results_directory).mkdir(parents=True, exist_ok=True) - for l in range(0, Iterations): CnvgHeader.append("Iter" + str(l + 1)) for i in range(0, len(optimizer)): + # Create an optimizer-specific results directory + optimizer_dir = results_directory + optimizer[i] + "/" + Path(optimizer_dir).mkdir(parents=True, exist_ok=True) + for j in range(0, len(objectivefunc)): + # Create a function-specific results directory + func_dir = optimizer_dir + objectivefunc[j] + "/" + Path(func_dir).mkdir(parents=True, exist_ok=True) + convergence = [0] * NumOfRuns executionTime = [0] * NumOfRuns - for k in range(0, NumOfRuns): + + # Run optimization process + if enable_parallel and NumOfRuns > 1: + # Get optimizer function + optimizer_func = get_optimizer_function(optimizer[i]) + if optimizer_func is None: + print(f"Unknown optimizer: {optimizer[i]}") + continue + + # Get benchmark function func_details = benchmarks.getFunctionDetails(objectivefunc[j]) - x = selector(optimizer[i], func_details, PopulationSize, Iterations) - convergence[k] = x.convergence - optimizerName = x.optimizer - objfname = x.objfname - if Export_details == True: - ExportToFile = results_directory + "experiment_details.csv" - with open(ExportToFile, "a", newline="\n") as out: - writer = csv.writer(out, delimiter=",") - if ( - Flag_details == False - ): # just one time to write the header of the CSV file - header = numpy.concatenate( - [["Optimizer", "objfname", "ExecutionTime", "Individual"], CnvgHeader] + objf = getattr(benchmarks, func_details[0]) + lb = func_details[1] + ub = func_details[2] + dim = func_details[3] + + # Execute multiple runs in parallel + start_time = time.time() + parallel_results = run_optimizer_parallel( + optimizer_func=optimizer_func, + objf=objf, + lb=lb, + ub=ub, + dim=dim, + PopSize=PopulationSize, + iters=Iterations, + num_runs=NumOfRuns, + parallel_backend=parallel_backend, + num_processes=num_processes + ) + + # Extract results + for k in range(NumOfRuns): + x = parallel_results[k] + convergence[k] = x.convergence + executionTime[k] = x.executionTime + optimizerName = x.optimizer + objfname = x.objfname + + # Export detailed results if needed + if Export_details: + ExportToFile = func_dir + "detailed_results.csv" + with open(ExportToFile, "a", newline="\n") as out: + writer = csv.writer(out, delimiter=",") + if k == 0: # Write header only once + header = numpy.concatenate( + [["Run", "Optimizer", "objfname", "ExecutionTime", "Best", "Individual"], CnvgHeader] + ) + writer.writerow(header) + + # Extract best fitness value + best_fitness = getattr(x, 'best_score', None) + if best_fitness is None: + best_fitness = objf(x.bestIndividual) + + a = numpy.concatenate( + [ + [k+1, optimizerName, objfname, x.executionTime, best_fitness, x.bestIndividual], + x.convergence, + ] + ) + writer.writerow(a) + + # Create a solution object to store the best result + best_result_idx = numpy.argmin([objf(x.bestIndividual) for x in parallel_results]) + best_result = parallel_results[best_result_idx] + sol = solution() + sol.optimizer = optimizer[i] + sol.objfname = objectivefunc[j] + sol.best = getattr(benchmarks, func_details[0])(best_result.bestIndividual) + sol.bestIndividual = best_result.bestIndividual + sol.best_score = sol.best + sol.convergence = best_result.convergence + sol.executionTime = sum(executionTime) / NumOfRuns + sol.startTime = time.strftime("%Y-%m-%d-%H-%M-%S") + sol.endTime = time.strftime("%Y-%m-%d-%H-%M-%S") + sol.lb = lb + sol.ub = ub + sol.dim = dim + sol.popnum = PopulationSize + sol.maxiers = Iterations + + all_results.append(sol) + else: + # Sequential execution for each run + all_run_results = [] # To store all run results + + for k in range(0, NumOfRuns): + func_details = benchmarks.getFunctionDetails(objectivefunc[j]) + x = selector(optimizer[i], func_details, PopulationSize, Iterations) + + if x is None: # If the optimizer isn't found + print(f"Error: {optimizer[i]} optimizer is not defined!") + continue + + # Store the result for later use + all_run_results.append(x) + + convergence[k] = x.convergence + executionTime[k] = x.executionTime + + # Export detailed results if needed + if Export_details: + ExportToFile = func_dir + "detailed_results.csv" + with open(ExportToFile, "a", newline="\n") as out: + writer = csv.writer(out, delimiter=",") + if k == 0: # Write header only once + header = numpy.concatenate( + [["Run", "Optimizer", "objfname", "ExecutionTime", "Best", "Individual"], CnvgHeader] + ) + writer.writerow(header) + + # Extract best fitness value + best_fitness = getattr(x, 'best_score', None) + if best_fitness is None: + best_fitness = getattr(benchmarks, func_details[0])(x.bestIndividual) + + a = numpy.concatenate( + [ + [k+1, x.optimizer, x.objfname, x.executionTime, best_fitness, x.bestIndividual], + x.convergence, + ] ) - writer.writerow(header) - Flag_details = True # at least one experiment - executionTime[k] = x.executionTime - a = numpy.array([x.optimizer, x.objfname, x.executionTime, x.bestIndividual] + x.convergence.tolist(), dtype=object) - writer.writerow(a) - out.close() + writer.writerow(a) - if Export == True: - ExportToFile = results_directory + "experiment.csv" + # Calculate mean execution time + mean_execution_time = sum(executionTime) / NumOfRuns + # Define objf from func_details + objf = getattr(benchmarks, func_details[0]) + + # Choose the best result from all runs + if all_run_results: + fitness_values = [objf(result.bestIndividual) for result in all_run_results] + best_result_idx = numpy.argmin(fitness_values) + best_result = all_run_results[best_result_idx] + else: + # Fallback if no valid results + best_result = selector(optimizer[i], func_details, PopulationSize, Iterations) + + # Store results in a solution object + sol = solution() + sol.optimizer = optimizer[i] + sol.objfname = objectivefunc[j] + sol.best = getattr(benchmarks, func_details[0])(best_result.bestIndividual) + sol.bestIndividual = best_result.bestIndividual + sol.best_score = sol.best + sol.convergence = best_result.convergence + sol.executionTime = mean_execution_time + sol.startTime = best_result.startTime + sol.endTime = best_result.endTime + sol.lb = func_details[1] + sol.ub = func_details[2] + sol.dim = func_details[3] + sol.popnum = PopulationSize + sol.maxiers = Iterations + + all_results.append(sol) + + # Export average convergence for all runs + if Export: + ExportToFile = func_dir + "avg_results.csv" with open(ExportToFile, "a", newline="\n") as out: writer = csv.writer(out, delimiter=",") - if ( - Flag == False - ): # just one time to write the header of the CSV file - header = numpy.concatenate( - [["Optimizer", "objfname", "ExecutionTime"], CnvgHeader] - ) - writer.writerow(header) - Flag = True - - avgExecutionTime = float("%0.2f" % (sum(executionTime) / NumOfRuns)) - avgConvergence = numpy.around( - numpy.mean(convergence, axis=0, dtype=numpy.float64), decimals=2 - ).tolist() - a = numpy.concatenate([[optimizerName, objfname, avgExecutionTime], avgConvergence]) - writer.writerow(a) - out.close() - - if Export_convergence == True: - plot_convergence.run(results_directory, optimizer, objectivefunc, Iterations) - - if Export_boxplot == True: - plot_boxplot.run(results_directory, optimizer, objectivefunc, Iterations) - - if Flag == False: # Faild to run at least one experiment - print( - "No Optomizer or Cost function is selected. Check lists of available optimizers and cost functions" - ) - - print("Execution completed") + avg_convergence = numpy.mean(convergence, axis=0) + std_convergence = numpy.std(convergence, axis=0) + avg_execution_time = numpy.mean(executionTime) + std_execution_time = numpy.std(executionTime) + + # Write header + header = ["Optimizer", "objfname", "ExecutionTime", "StdExecutionTime"] + header.extend([f"Iter{i+1}" for i in range(Iterations)]) + header.extend([f"StdIter{i+1}" for i in range(Iterations)]) + writer.writerow(header) + + # Write data + row_data = [optimizer[i], objectivefunc[j], avg_execution_time, std_execution_time] + row_data.extend(avg_convergence) + row_data.extend(std_convergence) + writer.writerow(row_data) + + # Generate convergence plots + if Export_convergence: + plot_convergence.run(convergence, optimizer[i], objectivefunc[j], func_dir) + + # Generate box plots + if Export_boxplot and NumOfRuns > 1: + plot_boxplot.run(optimizer[i], objectivefunc[j], convergence, func_dir) + + # Print the results + if Export or Export_details: + print("Results saved to:", results_directory) + + # Return all results + if len(all_results) == 1: + return all_results[0] # Return a single solution object if only one optimizer and function + else: + return all_results # Return a list of solution objects diff --git a/EvoloPy/optimizers/GA.py b/EvoloPy/optimizers/GA.py index f42cd7b..dd4bf36 100644 --- a/EvoloPy/optimizers/GA.py +++ b/EvoloPy/optimizers/GA.py @@ -18,7 +18,7 @@ def crossoverPopulaton(population, scores, popSize, crossoverProbability, keep): Parameters ---------- population : list - The list of individuals + The list of indiiduals scores : list The list of fitness values for each individual popSize: int diff --git a/EvoloPy/optimizers/PSO.py b/EvoloPy/optimizers/PSO.py index 022af69..9cec995 100644 --- a/EvoloPy/optimizers/PSO.py +++ b/EvoloPy/optimizers/PSO.py @@ -5,99 +5,143 @@ @author: Hossam Faris """ -import random -import numpy +import numpy as np from EvoloPy.solution import solution import time - - -def PSO(objf, lb, ub, dim, PopSize, iters): - +from typing import Callable, Union, List + +def PSO(objf: Callable, lb: Union[float, List[float]], ub: Union[float, List[float]], + dim: int, PopSize: int, iters: int) -> solution: + """ + Particle Swarm Optimization (PSO) algorithm + + Parameters + ---------- + objf : callable + The objective function to be minimized + lb : float or list + Lower bounds for decision variables + ub : float or list + Upper bounds for decision variables + dim : int + Problem dimension + PopSize : int + Population size + iters : int + Maximum number of iterations + + Returns + ------- + s : solution + Solution containing optimization results + """ # PSO parameters - - Vmax = 6 + Vmax = 6.0 wMax = 0.9 wMin = 0.2 - c1 = 2 - c2 = 2 + c1 = 2.0 + c2 = 2.0 s = solution() - if not isinstance(lb, list): + + # Convert bounds to arrays if they're scalar + if not isinstance(lb, list) and not isinstance(lb, np.ndarray): lb = [lb] * dim - if not isinstance(ub, list): + if not isinstance(ub, list) and not isinstance(ub, np.ndarray): ub = [ub] * dim - - ######################## Initializations - - vel = numpy.zeros((PopSize, dim)) - - pBestScore = numpy.zeros(PopSize) - pBestScore.fill(float("inf")) - - pBest = numpy.zeros((PopSize, dim)) - gBest = numpy.zeros(dim) - + + lb = np.array(lb) + ub = np.array(ub) + + # Ensure bounds are the right shape + if len(lb) != dim: + lb = np.array([lb[0]] * dim) + if len(ub) != dim: + ub = np.array([ub[0]] * dim) + + # Initialize positions and velocities + pos = np.zeros((PopSize, dim)) + vel = np.zeros((PopSize, dim)) + + # Initialize personal best positions and scores + pBestScore = np.full(PopSize, float("inf")) + pBest = np.zeros((PopSize, dim)) + + # Initialize global best gBestScore = float("inf") - - pos = numpy.zeros((PopSize, dim)) - for i in range(dim): - pos[:, i] = numpy.random.uniform(0, 1, PopSize) * (ub[i] - lb[i]) + lb[i] - - convergence_curve = numpy.zeros(iters) - - ############################################ + gBest = np.zeros(dim) + + # Initialize positions randomly within bounds + pos = np.random.uniform(0, 1, (PopSize, dim)) * (ub - lb) + lb + + # Initialize convergence tracking + convergence_curve = np.zeros(iters) + print('PSO is optimizing "' + objf.__name__ + '"') - + + # Start timing timerStart = time.time() s.startTime = time.strftime("%Y-%m-%d-%H-%M-%S") - - for l in range(0, iters): - for i in range(0, PopSize): - # pos[i,:]=checkBounds(pos[i,:],lb,ub) - for j in range(dim): - pos[i, j] = numpy.clip(pos[i, j], lb[j], ub[j]) - # Calculate objective function for each particle - fitness = objf(pos[i, :]) - - if pBestScore[i] > fitness: + + # Main loop + for l in range(iters): + # Update inertia weight + w = wMax - l * ((wMax - wMin) / iters) + + # For each particle + for i in range(PopSize): + # Clip position to bounds + pos[i] = np.clip(pos[i], lb, ub) + + # Evaluate fitness + fitness = objf(pos[i]) + + # Update personal best + if fitness < pBestScore[i]: pBestScore[i] = fitness - pBest[i, :] = pos[i, :].copy() - - if gBestScore > fitness: + pBest[i] = pos[i].copy() + + # Update global best + if fitness < gBestScore: gBestScore = fitness - gBest = pos[i, :].copy() - - # Update the W of PSO - w = wMax - l * ((wMax - wMin) / iters) - - for i in range(0, PopSize): - for j in range(0, dim): - r1 = random.random() - r2 = random.random() - vel[i, j] = ( - w * vel[i, j] - + c1 * r1 * (pBest[i, j] - pos[i, j]) - + c2 * r2 * (gBest[j] - pos[i, j]) - ) - - if vel[i, j] > Vmax: - vel[i, j] = Vmax - - if vel[i, j] < -Vmax: - vel[i, j] = -Vmax - - pos[i, j] = pos[i, j] + vel[i, j] - + gBest = pos[i].copy() + + # Update velocities and positions for all particles + r1 = np.random.random((PopSize, dim)) + r2 = np.random.random((PopSize, dim)) + + # Calculate new velocities + vel = (w * vel + + c1 * r1 * (pBest - pos) + + c2 * r2 * (gBest - pos)) + + # Clip velocities + vel = np.clip(vel, -Vmax, Vmax) + + # Update positions + pos = pos + vel + + # Record best fitness convergence_curve[l] = gBestScore - + if l % 1 == 0: print(["At iteration " + str(l + 1) + " the best fitness is " + str(gBestScore)]) + + # End timing timerEnd = time.time() s.endTime = time.strftime("%Y-%m-%d-%H-%M-%S") s.executionTime = timerEnd - timerStart + + # Record results s.convergence = convergence_curve s.optimizer = "PSO" s.bestIndividual = gBest + s.best_score = gBestScore # Store the best score in the solution object s.objfname = objf.__name__ - + s.lb = lb + s.ub = ub + s.dim = dim + s.popnum = PopSize + s.maxiers = iters + return s diff --git a/EvoloPy/optimizers/__init__.py b/EvoloPy/optimizers/__init__.py new file mode 100644 index 0000000..99f2100 --- /dev/null +++ b/EvoloPy/optimizers/__init__.py @@ -0,0 +1,45 @@ +""" +Nature-inspired optimization algorithms implemented in EvoloPy. + +This module provides direct access to all optimization algorithms: + +Examples: + >>> from EvoloPy.optimizers import PSO, GWO + >>> result_pso = PSO.PSO(objective_function, lb=-10, ub=10, dim=5, PopSize=30, iters=50) + >>> result_gwo = GWO.GWO(objective_function, lb=-10, ub=10, dim=5, PopSize=30, iters=50) + + # Or if you want to dynamically select an optimizer: + >>> from EvoloPy.optimizers import optimizer_map + >>> optimizer_func = optimizer_map["PSO"] + >>> result = optimizer_func(objective_function, lb=-10, ub=10, dim=5, PopSize=30, iters=50) +""" + +import importlib +from typing import Dict, Callable, Any + +# Import each optimizer module dynamically +optimizer_modules = [ + "PSO", "GWO", "MVO", "MFO", "CS", "BAT", + "WOA", "FFA", "SSA", "GA", "HHO", "SCA", + "JAYA", "DE" +] + +# Dictionary mapping algorithm names to their functions +optimizer_map = {} + +# Dynamically import each optimizer module +for name in optimizer_modules: + try: + module = importlib.import_module(f"EvoloPy.optimizers.{name}") + # Make the module available for direct import + globals()[name] = module + # Get the optimizer function from the module + optimizer_function = getattr(module, name) + # Add it to the optimizer map + optimizer_map[name] = optimizer_function + except (ImportError, AttributeError): + # Skip optimizers that aren't available + pass + +# List of all available optimizers for import +__all__ = list(optimizer_map.keys()) + ["optimizer_map"] \ No newline at end of file diff --git a/EvoloPy/parallel_utils.py b/EvoloPy/parallel_utils.py new file mode 100644 index 0000000..90aa4aa --- /dev/null +++ b/EvoloPy/parallel_utils.py @@ -0,0 +1,235 @@ +""" +Parallel processing utilities for EvoloPy. + +This module provides functions for parallel execution of optimization algorithms, +hardware detection, and optimal configuration for parallel processing. +""" + +import os +import numpy as np +import multiprocessing +from typing import Callable, Dict, List, Union, Tuple, Any +import concurrent.futures +import platform +import psutil + +try: + import torch + TORCH_AVAILABLE = True +except ImportError: + TORCH_AVAILABLE = False + +try: + import cupy + CUPY_AVAILABLE = True +except ImportError: + CUPY_AVAILABLE = False + + +def detect_hardware() -> Dict[str, Any]: + """ + Detect available hardware resources for parallel processing. + + Returns: + Dict[str, Any]: Dictionary containing hardware information: + - cpu_count: Number of CPU cores + - cpu_threads: Number of CPU threads + - ram_gb: Available RAM in GB + - gpu_available: Whether CUDA GPU is available + - gpu_count: Number of CUDA GPUs + - gpu_names: List of GPU names + - gpu_memory: List of GPU memory in GB + """ + # CPU detection + cpu_count = psutil.cpu_count(logical=False) + cpu_threads = psutil.cpu_count(logical=True) + + # Memory detection + ram_gb = psutil.virtual_memory().total / (1024**3) + + # GPU detection + gpu_available = False + gpu_count = 0 + gpu_names = [] + gpu_memory = [] + + # Check for CUDA GPUs via PyTorch + if TORCH_AVAILABLE: + gpu_available = torch.cuda.is_available() + if gpu_available: + gpu_count = torch.cuda.device_count() + for i in range(gpu_count): + gpu_names.append(torch.cuda.get_device_name(i)) + gpu_memory.append(torch.cuda.get_device_properties(i).total_memory / (1024**3)) + + return { + 'cpu_count': cpu_count, + 'cpu_threads': cpu_threads, + 'ram_gb': ram_gb, + 'gpu_available': gpu_available, + 'gpu_count': gpu_count, + 'gpu_names': gpu_names, + 'gpu_memory': gpu_memory + } + + +def get_optimal_process_count(parallel_backend: str = 'multiprocessing') -> int: + """ + Determine the optimal number of processes for parallel execution. + + Parameters: + parallel_backend (str): Parallel processing backend ('multiprocessing', 'cuda', 'auto') + + Returns: + int: Optimal number of processes + """ + # If GPU backend is requested but not available, fall back to CPU + if parallel_backend == 'cuda' and not (TORCH_AVAILABLE and torch.cuda.is_available()): + parallel_backend = 'multiprocessing' + + # If auto is specified, use GPU if available, otherwise CPU + if parallel_backend == 'auto': + if TORCH_AVAILABLE and torch.cuda.is_available(): + parallel_backend = 'cuda' + else: + parallel_backend = 'multiprocessing' + + # For CUDA, use the number of GPUs (or 1 if multiple streams not supported) + if parallel_backend == 'cuda': + return torch.cuda.device_count() + + # For CPU multiprocessing, leave one core free for system processes + cpu_count = psutil.cpu_count(logical=False) + if cpu_count is None: + cpu_count = os.cpu_count() or 2 + + # Use n-1 cores, but at least 1 + return max(1, cpu_count - 1) + + +def run_optimizer_parallel( + optimizer_func: Callable, + objf: Callable, + lb: Union[float, List[float]], + ub: Union[float, List[float]], + dim: int, + PopSize: int, + iters: int, + num_runs: int, + parallel_backend: str = 'multiprocessing', + num_processes: int = None +) -> List[Any]: + """ + Run an optimizer multiple times in parallel. + + Parameters: + optimizer_func (Callable): The optimizer function to run + objf (Callable): The objective function to optimize + lb (float or List[float]): Lower bounds + ub (float or List[float]): Upper bounds + dim (int): Problem dimension + PopSize (int): Population size + iters (int): Number of iterations + num_runs (int): Number of independent runs + parallel_backend (str): Parallel processing backend ('multiprocessing', 'cuda', 'auto') + num_processes (int, optional): Number of processes to use (None for auto) + + Returns: + List[Any]: List of solution objects from each run + """ + if num_processes is None: + num_processes = get_optimal_process_count(parallel_backend) + + # For GPU backend + if parallel_backend == 'cuda' and TORCH_AVAILABLE and torch.cuda.is_available(): + # Create a function to run on a specific GPU + def run_on_gpu(run_id, gpu_id): + # Set the CUDA device + torch.cuda.set_device(gpu_id % torch.cuda.device_count()) + # Run the optimizer + return optimizer_func(objf, lb, ub, dim, PopSize, iters) + + results = [] + with concurrent.futures.ThreadPoolExecutor(max_workers=num_processes) as executor: + futures = [] + for i in range(num_runs): + futures.append(executor.submit(run_on_gpu, i, i % num_processes)) + + for future in concurrent.futures.as_completed(futures): + results.append(future.result()) + + return results + + # For CPU multiprocessing backend + elif parallel_backend in ['multiprocessing', 'auto']: + # Create a function that just executes the optimizer with fixed parameters + def run_optimizer_once(run_id): + return optimizer_func(objf, lb, ub, dim, PopSize, iters) + + results = [] + with concurrent.futures.ProcessPoolExecutor(max_workers=num_processes) as executor: + futures = [] + for i in range(num_runs): + futures.append(executor.submit(run_optimizer_once, i)) + + for future in concurrent.futures.as_completed(futures): + results.append(future.result()) + + return results + + else: + raise ValueError(f"Unknown parallel backend: {parallel_backend}") + + +def run_population_fitness_parallel( + objf: Callable, + population: np.ndarray, + parallel_backend: str = 'multiprocessing', + num_processes: int = None +) -> np.ndarray: + """ + Evaluate fitness of a population in parallel. + + Parameters: + objf (Callable): The objective function + population (np.ndarray): Population matrix where each row is an individual + parallel_backend (str): Parallel processing backend ('multiprocessing', 'cuda', 'auto') + num_processes (int, optional): Number of processes to use (None for auto) + + Returns: + np.ndarray: Array of fitness values + """ + if num_processes is None: + num_processes = get_optimal_process_count(parallel_backend) + + # For GPU backend with PyTorch + if parallel_backend == 'cuda' and TORCH_AVAILABLE and torch.cuda.is_available(): + # Convert population to PyTorch tensor + device = torch.device("cuda") + population_tensor = torch.tensor(population, device=device, dtype=torch.float32) + + # Vectorized implementation if possible + try: + # Try vectorized evaluation (if objf supports tensor input) + fitness = objf(population_tensor) + return fitness.cpu().numpy() + except: + # Fall back to iterative evaluation + fitness = torch.zeros(len(population), device=device) + for i in range(len(population)): + fitness[i] = objf(population_tensor[i]) + return fitness.cpu().numpy() + + # For CPU multiprocessing backend + elif parallel_backend in ['multiprocessing', 'auto']: + # Create a worker function to evaluate individual fitness + def evaluate_individual(individual): + return objf(individual) + + with concurrent.futures.ProcessPoolExecutor(max_workers=num_processes) as executor: + fitness = list(executor.map(evaluate_individual, population)) + + return np.array(fitness) + + else: + raise ValueError(f"Unknown parallel backend: {parallel_backend}") \ No newline at end of file diff --git a/EvoloPy/plot_boxplot.py b/EvoloPy/plot_boxplot.py index acbf000..b53dbbe 100644 --- a/EvoloPy/plot_boxplot.py +++ b/EvoloPy/plot_boxplot.py @@ -1,57 +1,116 @@ -import numpy as np -import pandas as pd -import matplotlib.pyplot as plt - +# -*- coding: utf-8 -*- +""" +Created on Tue Dec 5 20:43:09 2017 -def run(results_directory, optimizer, objectivefunc, Iterations): - plt.ioff() +@author: Raneem +""" +import matplotlib.pyplot as plt +import numpy as np +from pathlib import Path - fileResultsDetailsData = pd.read_csv(results_directory + "/experiment_details.csv") - for j in range(0, len(objectivefunc)): +def run(optimizer_name, objective_func, convergence, directory=""): + """ + Generate and save boxplot for final results of multiple runs. + + Parameters: + optimizer_name: Name of the optimizer + objective_func: Name of the objective function + convergence: List of convergence data for each run + directory: Directory to save the plot (with trailing slash) + """ + # Ensure the directory exists + Path(directory).mkdir(parents=True, exist_ok=True) + + # Create figure + plt.figure() + + # Extract final fitness values from each run + final_fitness = [conv[-1] for conv in convergence] + + # Create boxplot + bp = plt.boxplot(final_fitness, patch_artist=True) + + # Set colors + plt.setp(bp['boxes'], color='blue', facecolor='lightblue') + plt.setp(bp['medians'], color='red') + plt.setp(bp['whiskers'], color='black') + plt.setp(bp['fliers'], marker='o', markerfacecolor='red', markersize=6) + + # Add labels and title + plt.ylabel('Final Fitness Value') + plt.title(f'Boxplot - {optimizer_name} on {objective_func}') + plt.tick_params(axis='x', which='both', bottom=False, top=False, labelbottom=False) + + # Add statistics as text + stats = { + 'Min': np.min(final_fitness), + 'Max': np.max(final_fitness), + 'Mean': np.mean(final_fitness), + 'Median': np.median(final_fitness), + 'Std': np.std(final_fitness) + } + + stat_text = '\n'.join([f'{k}: {v:.6g}' for k, v in stats.items()]) + plt.figtext(0.65, 0.7, stat_text, fontsize=9, + bbox=dict(facecolor='lightgray', alpha=0.5)) + + # Save the figure + filename = f"{directory}{optimizer_name}_{objective_func}_boxplot.png" + plt.savefig(filename, bbox_inches='tight') + plt.close() + + return filename - # Box Plot +def generateBoxPlot(optimizers, objectives, fitness_values, directory=""): + """ + Generate and save boxplot for comparing multiple optimizers on multiple objectives. + + Parameters: + optimizers: List of optimizer names + objectives: List of objective function names + fitness_values: List of lists of fitness values for each optimizer-objective pair + directory: Directory to save the plot (with trailing slash) + """ + # Ensure the directory exists + Path(directory).mkdir(parents=True, exist_ok=True) + + # Create a boxplot for each objective function + for i, obj in enumerate(objectives): + plt.figure(figsize=(10, 6)) + + # Extract data for this objective data = [] - - for i in range(len(optimizer)): - objective_name = objectivefunc[j] - optimizer_name = optimizer[i] - - detailedData = fileResultsDetailsData[ - (fileResultsDetailsData["Optimizer"] == optimizer_name) - & (fileResultsDetailsData["objfname"] == objective_name) - ] - detailedData = detailedData["Iter" + str(Iterations)] - detailedData = np.array(detailedData).T.tolist() - data.append(detailedData) - - # , notch=True - box = plt.boxplot(data, patch_artist=True, labels=optimizer) - - colors = [ - "#5c9eb7", - "#f77199", - "#cf81d2", - "#4a5e6a", - "#f45b18", - "#ffbd35", - "#6ba5a1", - "#fcd1a1", - "#c3ffc1", - "#68549d", - "#1c8c44", - "#a44c40", - "#404636", - ] - for patch, color in zip(box["boxes"], colors): - patch.set_facecolor(color) - - plt.legend( - handles=box["boxes"], - labels=optimizer, - loc="upper right", - bbox_to_anchor=(1.2, 1.02), - ) - fig_name = results_directory + "/boxplot-" + objective_name + ".png" - plt.savefig(fig_name, bbox_inches="tight") - plt.clf() - # plt.show() + labels = [] + + for j, opt in enumerate(optimizers): + if isinstance(fitness_values[j], list): + data.append(fitness_values[j]) + labels.append(opt) + + # Create boxplot if we have data + if data: + bp = plt.boxplot(data, patch_artist=True, labels=labels) + + # Set colors - use different colors for each optimizer + colors = ['lightblue', 'lightgreen', 'lightpink', 'lightyellow', + 'lightcyan', 'lightsalmon', 'lightcoral', 'lightgray'] + + for box, color in zip(bp['boxes'], colors[:len(data)]): + box.set(color='blue', facecolor=color) + + plt.setp(bp['medians'], color='red') + plt.setp(bp['whiskers'], color='black') + plt.setp(bp['fliers'], marker='o', markerfacecolor='red', markersize=4) + + # Add labels and title + plt.ylabel('Final Fitness Value') + plt.title(f'Algorithm Comparison on {obj}') + plt.xticks(rotation=45) + plt.grid(axis='y', linestyle='--', alpha=0.7) + + # Save the figure + filename = f"{directory}comparison_{obj}_boxplot.png" + plt.savefig(filename, bbox_inches='tight') + plt.close() + + return directory diff --git a/EvoloPy/plot_convergence.py b/EvoloPy/plot_convergence.py index 3c7e25e..ab32404 100644 --- a/EvoloPy/plot_convergence.py +++ b/EvoloPy/plot_convergence.py @@ -1,34 +1,67 @@ -import matplotlib.pyplot as plt -import pandas as pd - - -def run(results_directory, optimizer, objectivefunc, Iterations): - plt.ioff() - fileResultsData = pd.read_csv(results_directory + "/experiment.csv") - - for j in range(0, len(objectivefunc)): - objective_name = objectivefunc[j] +# -*- coding: utf-8 -*- +""" +Created on Tue Dec 5 20:43:09 2017 - startIteration = 0 - if "SSA" in optimizer: - startIteration = 1 - allGenerations = [x + 1 for x in range(startIteration, Iterations)] - for i in range(len(optimizer)): - optimizer_name = optimizer[i] - - row = fileResultsData[ - (fileResultsData["Optimizer"] == optimizer_name) - & (fileResultsData["objfname"] == objective_name) - ] - row = row.iloc[:, 3 + startIteration :] - - plt.plot(allGenerations, row.values.tolist()[0], label=optimizer_name) +@author: Raneem +""" +import matplotlib.pyplot as plt +import numpy as np +from pathlib import Path - plt.xlabel("Iterations") - plt.ylabel("Fitness") - plt.legend(loc="upper right", bbox_to_anchor=(1.2, 1.02)) - plt.grid() - fig_name = results_directory + "/convergence-" + objective_name + ".png" - plt.savefig(fig_name, bbox_inches="tight") - plt.clf() - # plt.show() +def run(convergence, optimizer_name, objective_func, directory=""): + """ + Generate and save convergence plots for an optimizer. + + Parameters: + convergence: List of convergence values for each run + optimizer_name: Name of the optimizer + objective_func: Name of the objective function + directory: Directory to save the plot (with trailing slash) + """ + # Ensure the directory exists + Path(directory).mkdir(parents=True, exist_ok=True) + + # Create figure + plt.figure() + + # If we have multiple runs, calculate mean and plot with confidence intervals + if isinstance(convergence, list) and len(convergence) > 1: + # Convert list of lists to numpy array for easier manipulation + convergence_arr = np.array(convergence) + + # Calculate mean and std of convergence across runs + mean_convergence = np.mean(convergence_arr, axis=0) + std_convergence = np.std(convergence_arr, axis=0) + + # Plot mean convergence + x = np.arange(1, len(mean_convergence) + 1) + plt.plot(x, mean_convergence, 'b-', label=f'{optimizer_name} mean') + + # Add confidence interval (mean Β± std) + plt.fill_between(x, mean_convergence - std_convergence, + mean_convergence + std_convergence, + color='b', alpha=0.2, label='Standard Deviation') + else: + # If single run, plot the convergence directly + if isinstance(convergence, list) and len(convergence) == 1: + convergence = convergence[0] + + x = np.arange(1, len(convergence) + 1) + plt.plot(x, convergence, 'b-', label=optimizer_name) + + # Add labels and title + plt.xlabel('Iterations') + plt.ylabel('Fitness') + plt.title(f'Convergence - {optimizer_name} on {objective_func}') + plt.legend() + + # Set log scale for y-axis if values are all positive and vary by orders of magnitude + if np.all(np.array(convergence) > 0) and np.max(convergence) / np.min(convergence) > 100: + plt.yscale('log') + + # Save the figure + filename = f"{directory}{optimizer_name}_{objective_func}_convergence.png" + plt.savefig(filename, bbox_inches='tight') + plt.close() + + return filename diff --git a/EvoloPy/solution.py b/EvoloPy/solution.py index 3e4cccf..2b36741 100644 --- a/EvoloPy/solution.py +++ b/EvoloPy/solution.py @@ -10,6 +10,7 @@ class solution: def __init__(self): self.best = 0 self.bestIndividual = [] + self.best_score = 0 self.convergence = [] self.optimizer = "" self.objfname = "" diff --git a/MANIFEST.in b/MANIFEST.in new file mode 100644 index 0000000..01e5dbb --- /dev/null +++ b/MANIFEST.in @@ -0,0 +1,5 @@ +include LICENSE.txt +include README.md +include requirements.txt +recursive-include examples *.py *.ipynb +recursive-include tests *.py \ No newline at end of file diff --git a/README.md b/README.md index bb391e0..926c73a 100644 --- a/README.md +++ b/README.md @@ -2,114 +2,567 @@ EvoloPy-logo -# EvoloPy: An open source nature-inspired optimization toolbox for global optimization in Python +# EvoloPy: Nature-Inspired Optimization in Python -The EvoloPy toolbox provides classical and recent nature-inspired metaheuristic for the global optimization. The list of optimizers that have been implemented includes Particle Swarm Optimization (PSO), Multi-Verse Optimizer (MVO), Grey Wolf Optimizer (GWO), and Moth Flame Optimization (MFO). The full list of implemented optimizers is available here https://github.com/7ossam81/EvoloPy/wiki/List-of-optimizers +[![PyPI version](https://badge.fury.io/py/EvoloPy.svg)](https://badge.fury.io/py/EvoloPy) +[![License](https://img.shields.io/badge/License-MIT-blue.svg)](https://opensource.org/licenses/MIT) +[![Python 3.6+](https://img.shields.io/badge/python-3.6+-blue.svg)](https://www.python.org/downloads/release/python-360/) -If you like our framework then we would really appreciate **a Star ⭐!** +EvoloPy is a powerful, easy-to-use optimization library featuring 14 nature-inspired algorithms, performance visualizations, and parallel processing capabilities. Version 4.0 introduces enhanced output organization, better bounds handling, and improved result reporting. +## ✨ Features -## Features -- Fourteen nature-inspired metaheuristic optimizers were implemented. -- The implementation uses the fast array manipulation using `NumPy`. -- Matrix support using `SciPy`'s package. -- More optimizers is coming soon. +- πŸ” **14 Optimizers**: PSO, GWO, MVO, and more +- πŸš€ **Parallel Processing**: Multi-core CPU and GPU acceleration (v3.0+) +- πŸ“Š **Visualization Tools**: Convergence curves and performance comparisons +- πŸ”§ **Simple API**: Consistent interface across all algorithms +- πŸ“‹ **24 Benchmark Functions**: Extensive testing suite +- πŸ“ **Organized Results**: Structured output folders for easy analysis (v4.0+) -## Installation -- Python 3.6 or higher is required. +## πŸ“¦ Installation -Run +### Basic Installation +```bash +pip install EvoloPy +``` - pip install -r requirements.txt +### With GPU Acceleration +```bash +pip install EvoloPy[gpu] +``` -(possibly with `sudo`) +## πŸš€ Quick Start Guide + +### 1. Simple Optimization + +```python +from EvoloPy.api import run_optimizer + +# Run PSO on benchmark function F1 +result = run_optimizer( + optimizer="PSO", + objective_func="F1", + dim=30, + lb=-100, # Lower bounds (scalar or list) + ub=100, # Upper bounds (scalar or list) + population_size=50, + iterations=100, + num_runs=5, # Number of independent runs + results_directory=None # Auto-create timestamped directory +) + +print(f"Best fitness: {result['best_fitness']}") +print(f"Best solution: {result['best_solution']}") +print(f"Execution time: {result['execution_time']} seconds") +``` -This command will install `sklearn`, `NumPy`, `SciPy`, and other dependencies for you. +### 2. Jupyter Notebook Visualization (v4.0.5+) + +```python +from EvoloPy.api import run_optimizer, run_multiple_optimizers +import matplotlib.pyplot as plt + +# Compare multiple optimizers with visualization +results = run_multiple_optimizers( + optimizers=["PSO", "GWO", "MVO"], + objective_funcs=["F1", "F5"], + dim=10, + lb=-100, + ub=100, + population_size=30, + iterations=50, + num_runs=3, + display_plots=True # Enable interactive plots in notebook +) + +# Display convergence comparison plot +plt.figure(results['plots']['convergence_F1']) +plt.show() + +# Display performance summary across functions +plt.figure(results['plots']['performance_summary']) +plt.show() + +# Single optimizer with visualization +result = run_optimizer( + optimizer="PSO", + objective_func="F1", + dim=10, + population_size=30, + iterations=50, + num_runs=5, + display_plots=True # Enable interactive plots +) + +# Display convergence plot +plt.figure(result['plots']['avg_convergence']) +plt.show() + +# Display boxplot of multiple runs +plt.figure(result['plots']['boxplot']) +plt.show() +``` -- **For Windows**: Please install Anaconda from [here](https://www.continuum.io/downloads), which is the leading open data science platform powered by Python. -- **For Ubuntu or Debian (Python 3)**: - - sudo apt-get install python3-numpy python3-scipy liblapack-dev libatlas-base-dev libgsl0-dev fftw-dev libglpk-dev libdsdp-dev +### 3. Optimize Your Custom Function + +```python +import numpy as np +from EvoloPy.optimizers import PSO + +# Define your custom objective function +def my_equation(x): + # Example: Minimize f(x) = sum(x^2) + sum(sin(x)) + return np.sum(x**2) + np.sum(np.sin(x)) + +# Run optimization on your function +result = PSO.PSO( + objf=my_equation, # Your custom function + lb=[-10] * 5, # Lower bounds as list (v4.0+ supports lists) + ub=[10] * 5, # Upper bounds as list + dim=5, # Dimension + PopSize=30, # Population size + iters=100 # Max iterations +) + +# Get results +best_solution = result.bestIndividual +best_fitness = result.best_score # v4.0+ exposes best_score directly + +print(f"Best solution: {best_solution}") +print(f"Best fitness: {best_fitness}") +``` -## Get the source +### 4. Parallel Processing (v3.0+) + +```python +from EvoloPy.api import run_optimizer, get_hardware_info + +# Check available hardware +hw_info = get_hardware_info() +print(f"CPU cores: {hw_info['cpu_count']}") +if hw_info['gpu_available']: + print(f"GPU: {hw_info['gpu_names'][0]}") + +# Run with parallel processing +result = run_optimizer( + optimizer="PSO", + objective_func="F1", + dim=30, + lb=-100, + ub=100, + population_size=50, + iterations=100, + num_runs=10, # Number of independent runs + enable_parallel=True, # Enable parallel processing + parallel_backend="auto" # Auto-select CPU or GPU +) +``` -Clone the Git repository from GitHub +### 5. Compare Multiple Optimizers + +```python +from EvoloPy.api import run_multiple_optimizers + +# Compare PSO, GWO and MVO on F1 and F5 +results = run_multiple_optimizers( + optimizers=["PSO", "GWO", "MVO"], + objective_funcs=["F1", "F5"], + dim=30, + lb=-100, # v4.0+ supports list bounds + ub=100, + population_size=50, + iterations=100, + num_runs=5, # Multiple runs for statistical significance + export_convergence=True, # Generate convergence plots + export_boxplot=True # Generate boxplots for multiple runs +) +``` - git clone https://github.com/7ossam81/EvoloPy.git +### 6. Command Line Usage +```bash +# List available optimizers and benchmarks +evolopy --list -## Quick User Guide +# Run PSO on F1 (v4.0+ syntax) +evolopy --optimizer PSO --function F1 --dim 30 --iterations 100 --runs 5 -EvoloPy toolbox contains twenty three benchmarks (F1-F24). The main file is the optimizer.py, which considered the interface of the toolbox. In the optimizer.py you can setup your experiment by selecting the optimizers, the benchmarks, number of runs, number of iterations, and population size. -The following is a sample example to use the EvoloPy toolbox. -Select optimizers from the list of available ones: "SSA","PSO","GA","BAT","FFA","GWO","WOA","MVO","MFO","CS","HHO","SCA","JAYA","DE". For example: -``` -optimizer=["SSA","PSO","GA"] -``` +# Run with parallel processing and list bounds +evolopy --optimizer PSO --function F1 --dim 30 --iterations 100 --lb "-10,-10,-10" --ub "10,10,10" --parallel -After that, Select benchmark function from the list of available ones: "F1","F2","F3","F4","F5","F6","F7","F8","F9","F10","F11","F12","F13","F14","F15","F16","F17","F18","F19". For example: -``` -objectivefunc=["F3","F4"] +# Disable certain exports +evolopy --optimizer PSO --function F1 --no-export-boxplot --no-export-details ``` -Select the number of repetitions for each experiment. To obtain meaningful statistical results, usually 30 independent runs are executed for each algorithm. For example: +## πŸ“‹ Available Optimizers + +| Abbreviation | Algorithm Name | +|--------------|----------------------------------| +| PSO | Particle Swarm Optimization | +| GWO | Grey Wolf Optimizer | +| MVO | Multi-Verse Optimizer | +| MFO | Moth Flame Optimization | +| CS | Cuckoo Search | +| BAT | Bat Algorithm | +| WOA | Whale Optimization Algorithm | +| FFA | Firefly Algorithm | +| SSA | Salp Swarm Algorithm | +| GA | Genetic Algorithm | +| HHO | Harris Hawks Optimization | +| SCA | Sine Cosine Algorithm | +| JAYA | JAYA Algorithm | +| DE | Differential Evolution | + +## 🧠 Optimization Algorithms in Detail + +### PSO (Particle Swarm Optimization) +- **Inspiration**: Social behavior of bird flocking or fish schooling +- **Year**: 1995 +- **Developers**: Kennedy and Eberhart +- **Key Parameters**: + - Inertia weight (w): Controls momentum of particles + - Cognitive coefficient (c1): Personal influence factor + - Social coefficient (c2): Social influence factor + - Maximum velocity (Vmax): Limits velocity to prevent explosion +- **Process**: Particles move through search space, remembering their own best positions and global best position. Velocity is updated using personal and social components. +- **Strengths**: Easy implementation, few parameters, fast convergence on unimodal functions +- **Ideal For**: Continuous optimization problems, especially when gradient information is unavailable + +### GWO (Grey Wolf Optimizer) +- **Inspiration**: Social hierarchy and hunting behavior of grey wolves +- **Year**: 2014 +- **Developer**: Mirjalili +- **Key Parameters**: + - Alpha, beta, and delta wolves: Three best solutions + - Parameter a: Decreases linearly from 2 to 0 +- **Process**: Solutions are ranked as alpha, beta, delta, and omega wolves. Omegas update their positions based on alpha, beta, and delta positions. +- **Strengths**: Balanced exploration and exploitation, avoids local optima well +- **Ideal For**: Complex multimodal optimization problems + +### MVO (Multi-Verse Optimizer) +- **Inspiration**: Theory of multi-verse in physics +- **Year**: 2016 +- **Developers**: Mirjalili, Mirjalili, and Hatamlou +- **Key Parameters**: + - Wormhole Existence Probability (WEP): Controls exploration/exploitation + - Traveling Distance Rate (TDR): Controls teleportation distance +- **Process**: Uses concepts of white holes, black holes, and wormholes to create "universes" that can exchange information +- **Strengths**: Strong global exploration, good at escaping local optima +- **Ideal For**: Complex multimodal problems with many local optima + +### MFO (Moth Flame Optimization) +- **Inspiration**: Navigation behavior of moths in nature (flying toward light sources) +- **Year**: 2015 +- **Developer**: Mirjalili +- **Key Parameters**: + - Number of flames: Controls exploration/exploitation balance + - Parameter b: Defines the shape of the spiral +- **Process**: Moths fly around flames in spiral pattern, with flames representing best solutions +- **Strengths**: Good balance between exploration and exploitation +- **Ideal For**: Problems requiring precision in local search + +### CS (Cuckoo Search) +- **Inspiration**: Brood parasitism behavior of cuckoo birds +- **Year**: 2009 +- **Developers**: Yang and Deb +- **Key Parameters**: + - Discovery probability (pa): Controls fraction of worse solutions to be abandoned + - LΓ©vy flight parameters: Controls the random walk characteristics +- **Process**: Uses LΓ©vy flights for exploration and host nest switching for exploitation +- **Strengths**: Effective at exploring large spaces and converging to global optimum +- **Ideal For**: Continuous nonlinear optimization problems + +### BAT (Bat Algorithm) +- **Inspiration**: Echolocation behavior of microbats +- **Year**: 2010 +- **Developer**: Yang +- **Key Parameters**: + - Pulse rate: Controls exploitation + - Loudness: Controls exploration + - Frequency range: Controls step size +- **Process**: Bats use echolocation to sense distance and prey, adjusting flight patterns based on loudness and pulse rate +- **Strengths**: Balance between exploration and exploitation, adaptive parameters +- **Ideal For**: Continuous optimization problems with constraints + +### WOA (Whale Optimization Algorithm) +- **Inspiration**: Hunting behavior of humpback whales, particularly bubble-net feeding +- **Year**: 2016 +- **Developer**: Mirjalili and Lewis +- **Key Parameters**: + - Parameter a: Decreases linearly from 2 to 0 + - Parameter b: Defines spiral shape +- **Process**: Uses encircling prey, bubble-net attack (exploitation), and search for prey (exploration) +- **Strengths**: Good balance between exploration and exploitation phases +- **Ideal For**: Highly nonlinear optimization problems + +### FFA (Firefly Algorithm) +- **Inspiration**: Flashing behavior of fireflies +- **Year**: 2008 +- **Developer**: Yang +- **Key Parameters**: + - Light absorption coefficient: Controls visibility + - Attractiveness: Defines the strength of attraction + - Randomization parameter: Controls random movement +- **Process**: Fireflies are attracted to each other based on brightness, which relates to objective function +- **Strengths**: Good at dealing with multimodal problems, automatic subdivision of population +- **Ideal For**: Multimodal problems and problems requiring multi-swarm approach + +### SSA (Salp Swarm Algorithm) +- **Inspiration**: Swarming behavior of salps in deep oceans +- **Year**: 2017 +- **Developer**: Mirjalili +- **Key Parameters**: + - Parameter c1: Balances exploration and exploitation +- **Process**: Salps form chains with leader salp guiding the movement, and followers using chain formation rules +- **Strengths**: Simple implementation, good exploration capability +- **Ideal For**: Problems requiring both exploration and quick convergence + +### GA (Genetic Algorithm) +- **Inspiration**: Natural selection and genetics +- **Year**: 1975 +- **Developer**: Holland +- **Key Parameters**: + - Crossover rate: Controls rate of information exchange + - Mutation rate: Controls rate of random changes + - Selection pressure: Controls selective pressure toward better solutions +- **Process**: Uses selection, crossover, and mutation to evolve population of solutions +- **Strengths**: Robust performance, well-suited for combinatorial problems +- **Ideal For**: Discrete optimization, combinatorial problems, complex landscapes + +### HHO (Harris Hawks Optimization) +- **Inspiration**: Cooperative hunting behavior of Harris hawks +- **Year**: 2019 +- **Developers**: Heidari et al. +- **Key Parameters**: + - Energy of the rabbit (E): Controls transition between exploration/exploitation + - Jump strength (J): Controls escaping behavior of prey +- **Process**: Hawks perform surprise pounce, varied dives, and team encircling to catch prey +- **Strengths**: Strong exploration capabilities, adaptive behavior +- **Ideal For**: Global optimization problems requiring high exploration + +### SCA (Sine Cosine Algorithm) +- **Inspiration**: Mathematical sine and cosine functions +- **Year**: 2016 +- **Developer**: Mirjalili +- **Key Parameters**: + - Parameter r1: Controls search direction + - Parameter r2: Controls search distance + - Parameter r3: Adds randomness + - Parameter r4: Controls switching between sine/cosine +- **Process**: Fluctuates solutions using sine and cosine functions to converge toward best solution +- **Strengths**: Mathematical foundation, balanced exploration/exploitation +- **Ideal For**: Problems with smooth objective functions + +### JAYA (JAYA Algorithm) +- **Inspiration**: Sanskrit word meaning "victory" +- **Year**: 2016 +- **Developer**: Rao +- **Key Parameters**: + - No algorithm-specific parameters (parameter-less) +- **Process**: Solutions move toward best solution and away from worst solution +- **Strengths**: Simple implementation, no control parameters, fast convergence +- **Ideal For**: Constraint optimization problems, problems requiring minimal tuning + +### DE (Differential Evolution) +- **Inspiration**: Evolutionary process with vector differences +- **Year**: 1997 +- **Developers**: Storn and Price +- **Key Parameters**: + - Crossover rate (CR): Controls rate of crossover + - Differential weight (F): Controls amplitude of difference vectors + - Population size: Controls diversity +- **Process**: Creates new candidate solutions by combining existing ones with weighted differences +- **Strengths**: Effective for continuous function optimization, robust to noise +- **Ideal For**: Continuous nonlinear, non-differentiable, multimodal optimization problems + +## πŸ› οΈ How to Optimize Your Own Function + +### Simple Custom Functions + +```python +import numpy as np +from EvoloPy.optimizers import PSO + +# Step 1: Define your objective function (minimize this) +def my_equation(x): + # Example: Rosenbrock function + sum_value = 0 + for i in range(len(x) - 1): + sum_value += 100 * (x[i + 1] - x[i]**2)**2 + (x[i] - 1)**2 + return sum_value + +# Step 2: Set optimization parameters +lb = [-5] * 10 # v4.0+ supports list bounds +ub = [5] * 10 +dim = 10 +population = 40 +iterations = 200 + +# Step 3: Run the optimizer +result = PSO.PSO(my_equation, lb, ub, dim, population, iterations) + +# Step 4: Get and use the results +best_solution = result.bestIndividual +best_fitness = result.best_score # v4.0+ provides best_score directly +print(f"Best solution: {best_solution}") +print(f"Best fitness: {best_fitness}") ``` -NumOfRuns=10 -``` -Select general parameters for all optimizers (population size, number of iterations). For example: -``` -params = {'PopulationSize' : 30, 'Iterations' : 50} -``` -Choose whether to Export the results in different formats. For example: -``` -export_flags = {'Export_avg':True, 'Export_details':True, 'Export_convergence':True, 'Export_boxplot':True} + +### Complex Functions with Additional Data + +```python +import numpy as np +from EvoloPy.optimizers import PSO + +# For functions that need additional data, use a class-based approach +class MyOptimizationProblem: + def __init__(self, data, weights): + self.data = data + self.weights = weights + + def objective_function(self, x): + # Example: Weighted sum of squared error + error = np.sum(self.weights * (self.data - x)**2) + return error + +# Create your optimization problem with data +my_data = np.random.rand(10) +my_weights = np.random.rand(10) +problem = MyOptimizationProblem(my_data, my_weights) + +# Run optimization +result = PSO.PSO( + problem.objective_function, + lb=[-10] * 10, # v4.0+ supports list bounds + ub=[10] * 10, + dim=10, + PopSize=30, + iters=100 +) + +print(f"Best solution: {result.bestIndividual}") +print(f"Best fitness: {result.best_score}") # v4.0+ exposes best_score directly ``` -Now your experiment is ready to run. Enjoy! +## πŸ“ Results Organization (v4.0+) -Run the example file: +Version 4.0 introduces a more organized output structure: + +``` +results_[timestamp]/ +β”œβ”€β”€ [optimizer_name]/ +β”‚ └── [function_name]/ +β”‚ β”œβ”€β”€ config.txt # Configuration summary +β”‚ β”œβ”€β”€ avg_results.csv # Average results across runs +β”‚ β”œβ”€β”€ detailed_results.csv # Detailed results for each run +β”‚ β”œβ”€β”€ [optimizer]_[function]_boxplot.png # Boxplot visualization +β”‚ └── [optimizer]_[function]_convergence.png # Convergence plot +└── multiple_optimizers/ + └── ... (similar structure for multiple optimizers) ``` -python examples/example.py + +## πŸš„ Parallel Processing (v3.0+) + +Significantly speed up optimization by using multiple CPU cores or GPUs. + +```python +from EvoloPy.api import run_optimizer + +# Enable parallel processing +result = run_optimizer( + optimizer="PSO", + objective_func="F1", + dim=30, + lb=-100, + ub=100, + population_size=50, + iterations=100, + num_runs=10, + enable_parallel=True, # Enable parallel processing + parallel_backend="auto", # "auto", "multiprocessing", or "cuda" + num_processes=None # None = auto-detect optimal count +) ``` +## πŸ‘¨β€πŸ’» Leadership & Credits -## Contribute -- **Issue Tracker**: https://github.com/7ossam81/EvoloPy/issues -- **Source Code**: https://github.com/7ossam81/EvoloPy +### EvoloPy v4.0 Refinements -## Useful Links -- **Video Demo**:https://www.youtube.com/watch?v=8t10SyrhDjQ -- **Paper source**: https://github.com/7ossam81/EvoloPy -- **Paper**: https://www.scitepress.org/Papers/2016/60482/60482.pdf -- **Poster source**: https://github.com/7ossam81/EvoloPy-poster -- **Live Demo**: http://evo-ml.com/evolopy-live-demo/ +Version 4.0 introduced several improvements: +- Consistent bounds handling (lists instead of scalars) +- Organized output folders with better structure +- Direct access to best fitness scores +- Enhanced visualization and statistics +- Improved handling of multiple runs -## List of contributors -- 7ossam81 -- RaneemQaddoura -- aljarrahcs -- jbae11 -- dietmarwo -- bionboy -- deepak-158 -- JJ +For details on v4.0 improvements, see [changes_v4.md](changes_v4.md). -## Reference +### Parallel Processing System (v3.0) -For more information about EvoloPy, please refer to our paper: +The parallel processing system in v3.0 was implemented by **Jaber Jaber** ([@jaberjaber23](https://github.com/jaberjaber23)), providing: -Faris, Hossam, Ibrahim Aljarah, Seyedali Mirjalili, Pedro A. Castillo, and Juan JuliΓ‘n Merelo GuervΓ³s. "EvoloPy: An Open-source Nature-inspired Optimization Framework in Python." In IJCCI (ECTA), pp. 171-177. 2016. -https://www.scitepress.org/Papers/2016/60482/60482.pdf +- **Dramatic Performance Improvements**: Up to 20x speedup on large tasks +- **Multi-platform Support**: Utilizes both CPU cores and CUDA GPUs +- **Smart Hardware Detection**: Automatically configures optimal settings +- **Improved Scalability**: Handles much larger optimization problems +- **Excellent Developer Experience**: Simple API with automatic backend selection -Please include the following related citations: +For details on Jaber's parallel processing implementation, see [changes_v3.md](changes_v3.md). -- Qaddoura, Raneem, Hossam Faris, Ibrahim Aljarah, and Pedro A. Castillo. "EvoCluster: An Open-Source Nature-Inspired Optimization Clustering Framework in Python." In International Conference on the Applications of Evolutionary Computation (Part of EvoStar), pp. 20-36. Springer, Cham, 2020. -- Ruba Abu Khurma, Ibrahim Aljarah, Ahmad Sharieh, and Seyedali Mirjalili. Evolopy-fs: An open-source nature-inspired optimization framework in python for feature selection. In Evolutionary Machine Learning Techniques, pages 131–173. Springer, 2020 +### Original EvoloPy Concept +Original library concept and initial algorithms by Faris, Aljarah, Mirjalili, Castillo, and GuervΓ³s. +## πŸ“„ Citation -## Support +If you use EvoloPy in your research, please cite: + +```bibtex +@inproceedings{faris2016evolopy, + title={EvoloPy: An Open-source Nature-inspired Optimization Framework in Python}, + author={Faris, Hossam and Aljarah, Ibrahim and Mirjalili, Seyedali and Castillo, Pedro A and GuervΓ³s, Juan JuliΓ‘n Merelo}, + booktitle={IJCCI (ECTA)}, + pages={171--177}, + year={2016} +} +``` -Use the [issue tracker](https://github.com/7ossam81/EvoloPy/issues) to report bugs or request features. +## πŸ“š Benchmark Functions + +The following table provides details about the benchmark functions available in EvoloPy: + +| Function ID | Name | Formula | Range | Dimension | Properties | +|-------------|----------------------|---------------------------------------------|--------------------|-----------|------------------------------------| +| F1 | Sphere | $f(x) = \sum_{i=1}^{n} x_i^2$ | [-100, 100] | 30 | Unimodal, Separable | +| F2 | Sum of Abs & Product | $f(x) = \sum\|x_i\| + \prod\|x_i\|$ | [-10, 10] | 30 | Unimodal, Non-separable | +| F3 | Sum of Squares | $f(x) = \sum_{i=1}^{n} (\sum_{j=1}^{i} x_j)^2$ | [-100, 100] | 30 | Unimodal, Non-separable | +| F4 | Maximum | $f(x) = \max_i\{\|x_i\|\}$ | [-100, 100] | 30 | Unimodal, Non-separable | +| F5 | Rosenbrock | $f(x) = \sum_{i=1}^{n-1} [100(x_{i+1} - x_i^2)^2 + (x_i - 1)^2]$ | [-30, 30] | 30 | Multimodal, Non-separable | +| F6 | Shifted Absolute | $f(x) = \sum_{i=1}^{n} \|x_i + 0.5\|^2$ | [-100, 100] | 30 | Unimodal, Separable | +| F7 | Sum of Powers | $f(x) = \sum_{i=1}^{n} i \cdot x_i^4 + \text{random}[0,1)$ | [-1.28, 1.28] | 30 | Unimodal, Non-separable, Noisy | +| F8 | Sine Problem | $f(x) = \sum_{i=1}^{n} -x_i \sin(\sqrt{\|x_i\|})$ | [-500, 500] | 30 | Multimodal, Separable | +| F9 | Rastrigin | $f(x) = \sum_{i=1}^{n} [x_i^2 - 10\cos(2\pi x_i) + 10]$ | [-5.12, 5.12] | 30 | Multimodal, Separable | +| F10 | Ackley | $f(x) = -20e^{-0.2\sqrt{\frac{1}{n}\sum_{i=1}^{n}x_i^2}} - e^{\frac{1}{n}\sum_{i=1}^{n}\cos(2\pi x_i)} + 20 + e$ | [-32, 32] | 30 | Multimodal, Non-separable | +| F11 | Griewank | $f(x) = \frac{1}{4000}\sum_{i=1}^{n}x_i^2 - \prod_{i=1}^{n}\cos(\frac{x_i}{\sqrt{i}}) + 1$ | [-600, 600] | 30 | Multimodal, Non-separable | +| F12 | Penalized 1 | Complex formula with penalties | [-50, 50] | 30 | Multimodal, Non-separable | +| F13 | Penalized 2 | Complex formula with penalties | [-50, 50] | 30 | Multimodal, Non-separable | +| F14 | Shekel's Foxholes | Complex formula | [-65.536, 65.536] | 2 | Multimodal with many local minima | +| F15 | Kowalik | Complex formula | [-5, 5] | 4 | Multimodal, Non-separable | +| F16 | Six-Hump Camel | Complex formula | [-5, 5] | 2 | Multimodal, Non-separable | +| F17 | Branin | Complex formula | [-5, 15] | 2 | Multimodal, Non-separable | +| F18 | Goldstein-Price | Complex formula | [-2, 2] | 2 | Multimodal, Non-separable | +| F19 | Hartman 3 | Complex formula | [0, 1] | 3 | Multimodal, Non-separable | +| F20 | Hartman 6 | Complex formula | [0, 1] | 6 | Multimodal, Non-separable | +| F21 | Shekel 5 | Complex formula | [0, 10] | 4 | Multimodal, Non-separable | +| F22 | Shekel 7 | Complex formula | [0, 10] | 4 | Multimodal, Non-separable | +| F23 | Shekel 10 | Complex formula | [0, 10] | 4 | Multimodal, Non-separable | +| ackley | Ackley (Standalone) | Same as F10 | [-32.768, 32.768] | 30 | Multimodal, Non-separable | +| rosenbrock | Rosenbrock (Standalone) | Same as F5 | [-5, 10] | 30 | Multimodal, Non-separable | +| rastrigin | Rastrigin (Standalone) | Same as F9 | [-5.12, 5.12] | 30 | Multimodal, Separable | +| griewank | Griewank (Standalone) | Same as F11 | [-600, 600] | 30 | Multimodal, Non-separable | + +## πŸ“œ License + +EvoloPy is licensed under the MIT License. diff --git a/README.md.bak b/README.md.bak deleted file mode 100644 index 10063e0..0000000 --- a/README.md.bak +++ /dev/null @@ -1,121 +0,0 @@ -
-EvoloPy-logo -
- -# EvoloPy: An open source nature-inspired optimization toolbox for global optimization in Python - -The EvoloPy toolbox provides classical and recent nature-inspired metaheuristic for the global optimization. The list of optimizers that have been implemented includes Particle Swarm Optimization (PSO), Multi-Verse Optimizer (MVO), Grey Wolf Optimizer (GWO), and Moth Flame Optimization (MFO). The full list of implemented optimizers is available here https://github.com/7ossam81/EvoloPy/wiki/List-of-optimizers - -If you like our framework then we would really appreciate **a Star ⭐!** - - -## Features -- Fourteen nature-inspired metaheuristic optimizers were implemented. -- The implementation uses the fast array manipulation using `NumPy`. -- Matrix support using `SciPy`'s package. -- More optimizers is coming soon. - -## New Benchmark Functions -We have added the following benchmark functions: - -1. **Ackley Function** - Tests convergence behavior. -2. **Rosenbrock Function** - Evaluates valley-following performance. -3. **Rastrigin Function** - Measures global and local search capabilities. -4. **Griewank Function** - Analyzes algorithm robustness. - -These functions are now available for evaluating optimization algorithms in EvoloPy. - - -## Installation -- Python 3.6 or higher is required. - -Run - - pip install -r requirements.txt - -(possibly with `sudo`) - -This command will install `sklearn`, `NumPy`, `SciPy`, and other dependencies for you. - -- **For Windows**: Please install Anaconda from [here](https://www.continuum.io/downloads), which is the leading open data science platform powered by Python. -- **For Ubuntu or Debian (Python 3)**: - - sudo apt-get install python3-numpy python3-scipy liblapack-dev libatlas-base-dev libgsl0-dev fftw-dev libglpk-dev libdsdp-dev - -## Get the source - -Clone the Git repository from GitHub - - git clone https://github.com/7ossam81/EvoloPy.git - - -## Quick User Guide - -EvoloPy toolbox contains twenty three benchmarks (F1-F24). The main file is the optimizer.py, which considered the interface of the toolbox. In the optimizer.py you can setup your experiment by selecting the optimizers, the benchmarks, number of runs, number of iterations, and population size. -The following is a sample example to use the EvoloPy toolbox. -Select optimizers from the list of available ones: "SSA","PSO","GA","BAT","FFA","GWO","WOA","MVO","MFO","CS","HHO","SCA","JAYA","DE". For example: -``` -optimizer=["SSA","PSO","GA"] -``` - -After that, Select benchmark function from the list of available ones: "F1","F2","F3","F4","F5","F6","F7","F8","F9","F10","F11","F12","F13","F14","F15","F16","F17","F18","F19". For example: -``` -objectivefunc=["F3","F4"] -``` - -Select the number of repetitions for each experiment. To obtain meaningful statistical results, usually 30 independent runs are executed for each algorithm. For example: -``` -NumOfRuns=10 -``` -Select general parameters for all optimizers (population size, number of iterations). For example: -``` -params = {'PopulationSize' : 30, 'Iterations' : 50} -``` -Choose whether to Export the results in different formats. For example: -``` -export_flags = {'Export_avg':True, 'Export_details':True, 'Export_convergence':True, 'Export_boxplot':True} -``` - -Now your experiment is ready to run. Enjoy! - -## Contribute -- **Issue Tracker**: https://github.com/7ossam81/EvoloPy/issues -- **Source Code**: https://github.com/7ossam81/EvoloPy - -## Useful Links -- **Video Demo**:https://www.youtube.com/watch?v=8t10SyrhDjQ -- **Paper source**: https://github.com/7ossam81/EvoloPy -- **Paper**: https://www.scitepress.org/Papers/2016/60482/60482.pdf -- **Poster source**: https://github.com/7ossam81/EvoloPy-poster -- **Live Demo**: http://evo-ml.com/evolopy-live-demo/ - -## List of contributors -- 7ossam81 -- RaneemQaddoura -- aljarrahcs -- jbae11 -- dietmarwo -- bionboy -- deepak-158 -- JJ - - -## Reference - -For more information about EvoloPy, please refer to our paper: - -Faris, Hossam, Ibrahim Aljarah, Seyedali Mirjalili, Pedro A. Castillo, and Juan JuliΓ‘n Merelo GuervΓ³s. "EvoloPy: An Open-source Nature-inspired Optimization Framework in Python." In IJCCI (ECTA), pp. 171-177. 2016. -https://www.scitepress.org/Papers/2016/60482/60482.pdf - -Please include the following related citations: - -- Qaddoura, Raneem, Hossam Faris, Ibrahim Aljarah, and Pedro A. Castillo. "EvoCluster: An Open-Source Nature-Inspired Optimization Clustering Framework in Python." In International Conference on the Applications of Evolutionary Computation (Part of EvoStar), pp. 20-36. Springer, Cham, 2020. -- Ruba Abu Khurma, Ibrahim Aljarah, Ahmad Sharieh, and Seyedali Mirjalili. Evolopy-fs: An open-source nature-inspired optimization framework in python for feature selection. In Evolutionary Machine Learning Techniques, pages 131–173. Springer, 2020 - - - -## Support - -Use the [issue tracker](https://github.com/7ossam81/EvoloPy/issues) to report bugs or request features. - - diff --git a/examples/evolopy_notebook_example.ipynb b/examples/evolopy_notebook_example.ipynb new file mode 100644 index 0000000..0519ecb --- /dev/null +++ b/examples/evolopy_notebook_example.ipynb @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/examples/parallel_processing_example.py b/examples/parallel_processing_example.py new file mode 100644 index 0000000..8ef23df --- /dev/null +++ b/examples/parallel_processing_example.py @@ -0,0 +1,157 @@ +#!/usr/bin/env python +""" +Example script demonstrating EvoloPy's parallel processing feature. + +This script compares execution times of sequential and parallel processing +for running multiple optimization algorithms on benchmark functions. +""" + +import time +import numpy as np +import matplotlib.pyplot as plt +from EvoloPy.api import run_optimizer, run_multiple_optimizers, get_hardware_info + +def run_benchmark(enable_parallel=False, backend="auto"): + """Run a benchmark comparing different optimizers with and without parallel processing.""" + + # Define parameters + optimizers = ["PSO", "GWO", "MVO", "WOA"] + functions = ["F1", "F5", "F10"] + dim = 20 + population_size = 50 + iterations = 100 + num_runs = 10 + + # Print configuration + print("\n" + "="*50) + print(f"Running benchmark with parallel={enable_parallel}, backend={backend if enable_parallel else 'N/A'}") + print(f"Optimizers: {optimizers}") + print(f"Functions: {functions}") + print(f"Dimension: {dim}, Population size: {population_size}") + print(f"Iterations: {iterations}, Number of runs: {num_runs}") + print("="*50) + + # Store execution times + execution_times = {} + + # Run each optimizer on each function + for optimizer in optimizers: + execution_times[optimizer] = {} + + for function in functions: + print(f"\nRunning {optimizer} on {function}...") + + # Measure execution time + start_time = time.time() + + # Run the optimizer + result = run_optimizer( + optimizer=optimizer, + objective_func=function, + dim=dim, + population_size=population_size, + iterations=iterations, + num_runs=num_runs, + enable_parallel=enable_parallel, + parallel_backend=backend + ) + + # Calculate total execution time + total_time = time.time() - start_time + + # Store times + execution_times[optimizer][function] = { + 'optimizer_time': result['execution_time'], + 'total_time': total_time, + 'best_fitness': result['best_fitness'] + } + + # Print results + print(f" Best fitness: {result['best_fitness']}") + print(f" Optimizer execution time: {result['execution_time']:.2f} seconds") + print(f" Total wall time: {total_time:.2f} seconds") + + return execution_times + +def plot_results(sequential_times, parallel_times): + """Plot comparison of sequential vs parallel execution times.""" + + optimizers = list(sequential_times.keys()) + functions = list(sequential_times[optimizers[0]].keys()) + + # Create figure + fig, axes = plt.subplots(len(functions), 1, figsize=(12, 5*len(functions))) + if len(functions) == 1: + axes = [axes] + + for i, function in enumerate(functions): + ax = axes[i] + + # Prepare data + seq_times = [sequential_times[opt][function]['total_time'] for opt in optimizers] + par_times = [parallel_times[opt][function]['total_time'] for opt in optimizers] + speedups = [seq_times[j]/par_times[j] for j in range(len(optimizers))] + + # Set width of bars + bar_width = 0.35 + + # Set position of bars + r1 = np.arange(len(optimizers)) + r2 = [x + bar_width for x in r1] + + # Create bars + ax.bar(r1, seq_times, width=bar_width, label='Sequential', color='blue') + ax.bar(r2, par_times, width=bar_width, label='Parallel', color='green') + + # Add speedup text above bars + for j, speedup in enumerate(speedups): + ax.text(r2[j], par_times[j] + 0.1, f'{speedup:.1f}x', + ha='center', va='bottom', fontweight='bold') + + # Add labels and title + ax.set_xlabel('Optimizer') + ax.set_ylabel('Execution Time (seconds)') + ax.set_title(f'Execution Time Comparison for {function}') + ax.set_xticks([r + bar_width/2 for r in range(len(optimizers))]) + ax.set_xticklabels(optimizers) + ax.legend() + + # Set y-axis limit to make room for text + ax.set_ylim(0, max(max(seq_times), max(par_times)) * 1.2) + + plt.tight_layout() + plt.savefig('parallel_benchmark_results.png') + plt.show() + +if __name__ == "__main__": + # Display hardware information + try: + hw_info = get_hardware_info() + print("Hardware Information:") + print(f"CPU cores: {hw_info['cpu_count']}") + print(f"CPU threads: {hw_info['cpu_threads']}") + print(f"RAM: {hw_info['ram_gb']:.2f} GB") + + if hw_info['gpu_available']: + print(f"CUDA GPUs available: {hw_info['gpu_count']}") + for i, (name, mem) in enumerate(zip(hw_info['gpu_names'], hw_info['gpu_memory'])): + print(f" GPU {i}: {name} ({mem:.2f} GB)") + + # Use CUDA backend if GPU is available + parallel_backend = "cuda" + else: + print("CUDA GPUs: None detected") + # Fall back to multiprocessing if no GPU + parallel_backend = "multiprocessing" + except: + print("Hardware detection not available. Using multiprocessing backend.") + parallel_backend = "multiprocessing" + + # Run sequential benchmark + sequential_times = run_benchmark(enable_parallel=False) + + # Run parallel benchmark + parallel_times = run_benchmark(enable_parallel=True, backend=parallel_backend) + + # Plot results + plot_results(sequential_times, parallel_times) \ No newline at end of file diff --git a/examples/quickstart.ipynb b/examples/quickstart.ipynb new file mode 100644 index 0000000..0519ecb --- /dev/null +++ b/examples/quickstart.ipynb @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/pyproject.toml b/pyproject.toml new file mode 100644 index 0000000..0bfbc90 --- /dev/null +++ b/pyproject.toml @@ -0,0 +1,60 @@ +[build-system] +requires = ["setuptools>=42", "wheel", "build>=0.7.0"] +build-backend = "setuptools.build_meta" + +[project] +name = "EvoloPy" +version = "4.0.6" +description = "An open source nature-inspired optimization toolbox with parallel processing capabilities" +readme = "README.md" +authors = [ + {name = "Hossam Faris", email = "hossam.faris@ju.edu.jo"}, +] +maintainers = [ + {name = "Raneem Qaddoura", email = "raneem.qaddoura@gmail.com"}, + {name = "Jaber Jaber", email = "jaber2jabet@gmail.com"} +] +license = {text = "MIT"} +classifiers = [ + "Development Status :: 4 - Beta", + "Intended Audience :: Science/Research", + "Intended Audience :: Developers", + "License :: OSI Approved :: MIT License", + "Programming Language :: Python :: 3", + "Programming Language :: Python :: 3.6", + "Programming Language :: Python :: 3.7", + "Programming Language :: Python :: 3.8", + "Programming Language :: Python :: 3.9", + "Topic :: Scientific/Engineering :: Artificial Intelligence", +] +keywords = ["optimization", "meta-heuristic", "evolutionary", "swarm intelligence", "parallel", "gpu-accelerated"] +dependencies = [ + "numpy>=1.19.0", + "pandas>=1.0.0", + "scipy>=1.5.0", + "matplotlib>=3.3.0", + "scikit-learn>=0.23.0", + "psutil>=5.8.0", +] +requires-python = ">=3.6" + +[project.optional-dependencies] +gpu = ["torch>=1.7.0"] + +[project.urls] +Homepage = "https://github.com/7ossam81/EvoloPy" +"Bug Tracker" = "https://github.com/7ossam81/EvoloPy/issues" +"Source Code" = "https://github.com/7ossam81/EvoloPy" +"Contributors" = "https://github.com/7ossam81/EvoloPy/graphs/contributors" + +[project.scripts] +evolopy = "EvoloPy.cli:run_cli" + +[tool.setuptools] +packages = ["EvoloPy", "EvoloPy.optimizers"] + +[tool.contributors] +"Jaber Jaber" = {github = "jaberjaber23", contribution = "Lead developer of parallel processing implementation for v3.0.0, including multi-core CPU and GPU acceleration"} + +[tool.wheel] +name = "EvoloPy" \ No newline at end of file diff --git a/requirements.txt b/requirements.txt index c3d9bbf..44590e7 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,5 +1,7 @@ -numpy -pandas -scipy -matplotlib -scikit-learn \ No newline at end of file +numpy>=1.19.0 +pandas>=1.0.0 +scipy>=1.5.0 +matplotlib>=3.3.0 +scikit-learn>=0.23.0 +psutil>=5.8.0 +torch>=1.7.0; sys_platform != "darwin" or platform_machine != "arm64" \ No newline at end of file diff --git a/setup.cfg b/setup.cfg new file mode 100644 index 0000000..65c95d6 --- /dev/null +++ b/setup.cfg @@ -0,0 +1,36 @@ +[metadata] +name = EvoloPy +version = 3.0.0 +author = EvoloPy Team +author_email = raneem.qaddoura@gmail.com +description = Implementation of meta-heuristic optimizers +long_description = file: README.md +long_description_content_type = text/markdown +url = https://github.com/7ossam81/EvoloPy +project_urls = + Bug Tracker = https://github.com/7ossam81/EvoloPy/issues +classifiers = + Programming Language :: Python :: 3 + License :: OSI Approved :: MIT License + Operating System :: OS Independent + +[options] +package_dir = + = . +packages = find: +python_requires = >=3.6 + +[options.packages.find] +where = . + +[options.entry_points] +console_scripts = + evolopy = EvoloPy.cli:run_cli + +[contributors] +Jaber Jaber (GitHub = @jaberjaber23) = Parallel processing implementation for v3.0.0 + +[egg_info] +tag_build = +tag_date = 0 + diff --git a/setup.py b/setup.py index 4fbf5ec..5a2b6e1 100644 --- a/setup.py +++ b/setup.py @@ -1,25 +1,57 @@ from setuptools import setup, find_packages +# Read the contents of README.md file +with open("README.md", "r", encoding="utf-8") as fh: + long_description = fh.read() + setup( - name='EvoloPy', # Replace with your package name - version='0.1.0', # Set the version of your package - description='A framework for metaheuristic optimization algorithms', - author='Evo-ML', # Replace with your name or organization - author_email='raneem.qaddoura@gmail.com', # Replace with your email - url='https://github.com/7ossam81/EvoloPy', # Replace with your GitHub repository URL - packages=find_packages(where='EvoloPy'), # Automatically find all the packages inside the EvoloPy directory - include_package_data=True, # To include any additional files specified in MANIFEST.in - install_requires=[ # List of dependencies - 'numpy', - 'pandas', - 'scipy', - 'matplotlib', - 'scikit-learn' + name='EvoloPy', + version='4.0.6', + description='An open source nature-inspired optimization toolbox with parallel processing', + long_description=long_description, + long_description_content_type="text/markdown", + author='EvoloPy Team', + author_email='raneem.qaddoura@gmail.com', + maintainer='Jaber Jaber', + maintainer_email='jaber2jabet@gmail.com', # Replace with your actual email if desired + url='https://github.com/7ossam81/EvoloPy', + # Explicitly specify package directories to ensure proper capitalization + package_dir={'EvoloPy': 'EvoloPy'}, + packages=['EvoloPy', 'EvoloPy.optimizers'], + include_package_data=True, + install_requires=[ + 'numpy>=1.19.0', + 'pandas>=1.0.0', + 'scipy>=1.5.0', + 'matplotlib>=3.3.0', + 'scikit-learn>=0.23.0', + 'psutil>=5.8.0', ], - classifiers=[ # Optional classifiers for categorizing your project + classifiers=[ + 'Development Status :: 4 - Beta', + 'Intended Audience :: Science/Research', + 'Intended Audience :: Developers', + 'License :: OSI Approved :: MIT License', 'Programming Language :: Python :: 3', - 'License :: OSI Approved :: Apache-2.0 license', # Adjust if you are using a different license - 'Operating System :: OS Independent', + 'Programming Language :: Python :: 3.6', + 'Programming Language :: Python :: 3.7', + 'Programming Language :: Python :: 3.8', + 'Programming Language :: Python :: 3.9', + 'Topic :: Scientific/Engineering :: Artificial Intelligence', ], - python_requires='>=3.6', # Adjust this based on the Python version you support + python_requires='>=3.6', + keywords=['optimization', 'meta-heuristic', 'evolutionary', 'parallel-processing'], + project_urls={ + 'Bug Reports': 'https://github.com/7ossam81/EvoloPy/issues', + 'Source': 'https://github.com/7ossam81/EvoloPy', + 'Contributors': 'https://github.com/7ossam81/EvoloPy/graphs/contributors', + }, + entry_points={ + 'console_scripts': [ + 'evolopy-run=EvoloPy.cli:run_cli', + ], + }, + extras_require={ + 'gpu': ['torch>=1.7.0'], + }, ) \ No newline at end of file diff --git a/tests/__init__.py b/tests/__init__.py new file mode 100644 index 0000000..1d3fb9e --- /dev/null +++ b/tests/__init__.py @@ -0,0 +1,3 @@ +""" +Unit tests for the EvoloPy package. +""" \ No newline at end of file diff --git a/tests/test_optimizers.py b/tests/test_optimizers.py new file mode 100644 index 0000000..b15565b --- /dev/null +++ b/tests/test_optimizers.py @@ -0,0 +1,74 @@ +import unittest +import numpy as np +from EvoloPy.optimizers import PSO, GWO, MVO +from EvoloPy.benchmarks import F1, F2, F3 + +class TestOptimizers(unittest.TestCase): + + def setUp(self): + """Set up test parameters common for all optimizers""" + self.lb = -10 + self.ub = 10 + self.dim = 5 + self.population_size = 10 + self.iterations = 20 + + def test_pso_optimizer(self): + """Test that PSO optimizer returns a valid solution""" + result = PSO.PSO(F1, self.lb, self.ub, self.dim, self.population_size, self.iterations) + + # Check if result object has all required properties + self.assertIsNotNone(result.bestIndividual) + self.assertEqual(len(result.bestIndividual), self.dim) + self.assertIsNotNone(result.convergence) + self.assertEqual(len(result.convergence), self.iterations) + self.assertEqual(result.optimizer, "PSO") + self.assertEqual(result.objfname, "F1") + + # Check if the solution is within bounds + self.assertTrue(np.all(result.bestIndividual >= self.lb)) + self.assertTrue(np.all(result.bestIndividual <= self.ub)) + + def test_gwo_optimizer(self): + """Test that GWO optimizer returns a valid solution""" + result = GWO.GWO(F1, self.lb, self.ub, self.dim, self.population_size, self.iterations) + + # Check if result object has all required properties + self.assertIsNotNone(result.bestIndividual) + self.assertEqual(len(result.bestIndividual), self.dim) + self.assertIsNotNone(result.convergence) + self.assertEqual(len(result.convergence), self.iterations) + self.assertEqual(result.optimizer, "GWO") + self.assertEqual(result.objfname, "F1") + + # Check if the solution is within bounds + self.assertTrue(np.all(result.bestIndividual >= self.lb)) + self.assertTrue(np.all(result.bestIndividual <= self.ub)) + + def test_mvo_optimizer(self): + """Test that MVO optimizer returns a valid solution""" + result = MVO.MVO(F1, self.lb, self.ub, self.dim, self.population_size, self.iterations) + + # Check if result object has all required properties + self.assertIsNotNone(result.bestIndividual) + self.assertEqual(len(result.bestIndividual), self.dim) + self.assertIsNotNone(result.convergence) + self.assertEqual(len(result.convergence), self.iterations) + self.assertEqual(result.optimizer, "MVO") + self.assertEqual(result.objfname, "F1") + + # Check if the solution is within bounds + self.assertTrue(np.all(result.bestIndividual >= self.lb)) + self.assertTrue(np.all(result.bestIndividual <= self.ub)) + + def test_multiple_benchmarks(self): + """Test that optimizers work with different benchmark functions""" + benchmarks = [F1, F2, F3] + + for benchmark in benchmarks: + result = PSO.PSO(benchmark, self.lb, self.ub, self.dim, self.population_size, self.iterations) + self.assertEqual(result.objfname, benchmark.__name__) + self.assertEqual(len(result.bestIndividual), self.dim) + +if __name__ == '__main__': + unittest.main() \ No newline at end of file diff --git a/tests/test_optimizers/test_WOA.py b/tests/test_optimizers/test_WOA.py index c7707ee..05cbb38 100644 --- a/tests/test_optimizers/test_WOA.py +++ b/tests/test_optimizers/test_WOA.py @@ -2,7 +2,7 @@ import os # Get the absolute path to the EvoloPy directory -base_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), '..')) +base_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..')) # Add the EvoloPy directory to the Python path sys.path.append(base_dir)