diff --git a/.github/workflows/package-build.yaml b/.github/workflows/package-build.yaml new file mode 100644 index 00000000..97d0fb91 --- /dev/null +++ b/.github/workflows/package-build.yaml @@ -0,0 +1,132 @@ +name: Build and Release + +on: + push: + tags: + - "v*" + +jobs: + check-branch: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - name: Verify tag is on main + run: | + if [ "$(git branch --contains $GITHUB_REF)" != "* main" ]; then + echo "Tag $GITHUB_REF is not on main branch" + exit 1 + fi + build: + needs: check-branch + runs-on: ${{ matrix.os }} + strategy: + matrix: + os: [ubuntu-latest, windows-latest, macos-latest] + include: + - os: ubuntu-latest + artifact_name: linux-x64 + - os: windows-latest + artifact_name: windows-x64.exe + - os: macos-latest + artifact_name: macos-x64 + + steps: + - uses: actions/checkout@v4 + + - name: Set up Python + uses: actions/setup-python@v4 + with: + python-version: "3.10" + architecture: ${{ runner.os == 'Windows' && 'x64' || '' }} + + - name: Install tools + run: | + python -m pip install --upgrade pip + pip install pyinstaller + + - name: Install package + run: | + pip install . + + - name: Create Linux executable + if: matrix.os == 'ubuntu-latest' + run: | + pyinstaller --onefile --name ecooptimizer-server $(which eco-ext) + mv dist/ecooptimizer-server dist/ecooptimizer-server-${{ matrix.artifact_name }} + + pyinstaller --onefile --name ecooptimizer-server-dev $(which eco-ext-dev) + mv dist/ecooptimizer-server-dev dist/ecooptimizer-server-dev-${{ matrix.artifact_name }} + + - name: Create Windows executable + if: matrix.os == 'windows-latest' + shell: pwsh + run: | + $entryProd = python -c "from importlib.metadata import entry_points; print([ep.value for ep in entry_points()['console_scripts'] if ep.name == 'eco-ext'][0])" + $pyPathProd = $entryProd.Split(':')[0].Replace('.', '\') + '.py' + + $entryDev = python -c "from importlib.metadata import entry_points; print([ep.value for ep in entry_points()['console_scripts'] if ep.name == 'eco-ext-dev'][0])" + $pyPathDev = $entryDev.Split(':')[0].Replace('.', '\') + '.py' + + pyinstaller --onefile --name ecooptimizer-server "src/$pyPathProd" + Move-Item dist\ecooptimizer-server.exe "dist\ecooptimizer-server-${{ matrix.artifact_name }}" + + pyinstaller --onefile --name ecooptimizer-server-dev "src/$pyPathDev" + Move-Item dist\ecooptimizer-server-dev.exe "dist\ecooptimizer-server-dev-${{ matrix.artifact_name }}" + + - name: Create macOS executable + if: matrix.os == 'macos-latest' + run: | + pyinstaller --onefile --name ecooptimizer-server $(which eco-ext) + mv dist/ecooptimizer-server dist/ecooptimizer-server-${{ matrix.artifact_name }} + + pyinstaller --onefile --name ecooptimizer-server-dev $(which eco-ext-dev) + mv dist/ecooptimizer-server-dev dist/ecooptimizer-server-dev-${{ matrix.artifact_name }} + + - name: Upload artifacts + uses: actions/upload-artifact@v4 + with: + name: artifacts-${{ matrix.os }} + path: | + dist/ecooptimizer-server-* + dist/ecooptimizer-server-dev-* + if-no-files-found: error + + create-release: + needs: build + runs-on: ubuntu-latest + steps: + - name: Download all artifacts + uses: actions/download-artifact@v4 + with: + path: artifacts + pattern: artifacts-* + merge-multiple: false # Keep separate folders per OS + + - name: Create release + uses: softprops/action-gh-release@v1 + with: + tag_name: ${{ github.ref }} + name: ${{ github.ref_name }} + body: | + ${{ github.event.head_commit.message }} + + ## EcoOptimizer Server Executables + This release contains the standalone server executables for launching the EcoOptimizer analysis engine. + These are designed to work with the corresponding **EcoOptimizer VS Code Extension**. + + ### Included Artifacts + - **Production Server**: `ecooptimizer-server-` + (Stable version for production use) + - **Development Server**: `ecooptimizer-server-dev-` + (Development version with debug features) + + ### Platform Support + - Linux (`linux-x64`) + - Windows (`windows-x64.exe`) + - macOS (`macos-x64`) + files: | + artifacts/artifacts-ubuntu-latest/dist/* + artifacts/artifacts-windows-latest/dist/* + artifacts/artifacts-macos-latest/dist/* + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} \ No newline at end of file diff --git a/src/ecooptimizer/__main__.py b/src/ecooptimizer/__main__.py index bbe683c2..90ed8259 100644 --- a/src/ecooptimizer/__main__.py +++ b/src/ecooptimizer/__main__.py @@ -6,24 +6,24 @@ import libcst as cst -from .utils.output_manager import LoggingManager -from .utils.output_manager import save_file, save_json_files, copy_file_to_output +from ecooptimizer.utils.output_manager import LoggingManager +from ecooptimizer.utils.output_manager import save_file, save_json_files, copy_file_to_output -from .api.routes.refactor_smell import ChangedFile, RefactoredData +from ecooptimizer.api.routes.refactor_smell import ChangedFile, RefactoredData -from .measurements.codecarbon_energy_meter import CodeCarbonEnergyMeter +from ecooptimizer.measurements.codecarbon_energy_meter import CodeCarbonEnergyMeter -from .analyzers.analyzer_controller import AnalyzerController +from ecooptimizer.analyzers.analyzer_controller import AnalyzerController -from .refactorers.refactorer_controller import RefactorerController +from ecooptimizer.refactorers.refactorer_controller import RefactorerController -from . import ( +from ecooptimizer import ( SAMPLE_PROJ_DIR, SOURCE, ) -from .config import CONFIG +from ecooptimizer.config import CONFIG loggingManager = LoggingManager() @@ -53,9 +53,15 @@ def main(): logging.error("Could not retrieve initial emissions. Exiting.") exit(1) + enabled_smells = { + "cached-repeated-calls": {"threshold": 2}, + "no-self-use": {}, + "use-a-generator": {}, + "too-many-arguments": {"max_args": 5}, + } + analyzer_controller = AnalyzerController() - # update_smell_registry(["no-self-use"]) - smells_data = analyzer_controller.run_analysis(SOURCE) + smells_data = analyzer_controller.run_analysis(SOURCE, enabled_smells) save_json_files("code_smells.json", [smell.model_dump() for smell in smells_data]) copy_file_to_output(SOURCE, "refactored-test-case.py") diff --git a/src/ecooptimizer/analyzers/analyzer_controller.py b/src/ecooptimizer/analyzers/analyzer_controller.py index 65835b0c..556ff2ca 100644 --- a/src/ecooptimizer/analyzers/analyzer_controller.py +++ b/src/ecooptimizer/analyzers/analyzer_controller.py @@ -1,77 +1,86 @@ +"""Controller class for coordinating multiple code analysis tools.""" + # pyright: reportOptionalMemberAccess=false from pathlib import Path +import traceback from typing import Callable, Any -from ..data_types.smell_record import SmellRecord - -from ..config import CONFIG - -from ..data_types.smell import Smell +from ecooptimizer.data_types.smell_record import SmellRecord +from ecooptimizer.config import CONFIG +from ecooptimizer.data_types.smell import Smell +from ecooptimizer.analyzers.pylint_analyzer import PylintAnalyzer +from ecooptimizer.analyzers.ast_analyzer import ASTAnalyzer +from ecooptimizer.analyzers.astroid_analyzer import AstroidAnalyzer +from ecooptimizer.utils.smells_registry import retrieve_smell_registry -from .pylint_analyzer import PylintAnalyzer -from .ast_analyzer import ASTAnalyzer -from .astroid_analyzer import AstroidAnalyzer - -from ..utils.smells_registry import retrieve_smell_registry +logger = CONFIG["detectLogger"] class AnalyzerController: + """Orchestrates multiple code analysis tools and aggregates their results.""" + def __init__(self): - """Initializes analyzers for different analysis methods.""" + """Initializes analyzers for Pylint, AST, and Astroid analysis methods.""" self.pylint_analyzer = PylintAnalyzer() self.ast_analyzer = ASTAnalyzer() self.astroid_analyzer = AstroidAnalyzer() - def run_analysis(self, file_path: Path, selected_smells: str | list[str] = "ALL"): - """ - Runs multiple analysis tools on the given Python file and logs the results. - Returns a list of detected code smells. - """ + def run_analysis( + self, file_path: Path, enabled_smells: dict[str, dict[str, int | str]] | list[str] + ) -> list[Smell]: + """Runs configured analyzers on a file and returns aggregated results. + + Args: + file_path: Path to the Python file to analyze + enabled_smells: Dictionary or list specifying which smells to detect + Returns: + list[Smell]: All detected code smells + + Raises: + TypeError: If no smells are selected for detection + Exception: Any errors during analysis are logged and re-raised + """ smells_data: list[Smell] = [] - if not selected_smells: - raise TypeError("At least 1 smell must be selected for detection") + if not enabled_smells: + raise TypeError("At least one smell must be selected for detection.") - SMELL_REGISTRY = retrieve_smell_registry(selected_smells) + SMELL_REGISTRY = retrieve_smell_registry(enabled_smells) try: pylint_smells = self.filter_smells_by_method(SMELL_REGISTRY, "pylint") ast_smells = self.filter_smells_by_method(SMELL_REGISTRY, "ast") astroid_smells = self.filter_smells_by_method(SMELL_REGISTRY, "astroid") - CONFIG["detectLogger"].info("🟢 Starting analysis process") - CONFIG["detectLogger"].info(f"📂 Analyzing file: {file_path}") + logger.info("🟢 Starting analysis process") + logger.info(f"📂 Analyzing file: {file_path}") if pylint_smells: - CONFIG["detectLogger"].info(f"🔍 Running Pylint analysis on {file_path}") + logger.info(f"🔍 Running Pylint analysis on {file_path}") pylint_options = self.generate_pylint_options(pylint_smells) pylint_results = self.pylint_analyzer.analyze(file_path, pylint_options) smells_data.extend(pylint_results) - CONFIG["detectLogger"].info( - f"✅ Pylint analysis completed. {len(pylint_results)} smells detected." - ) + logger.info(f"✅ Pylint analysis completed. {len(pylint_results)} smells detected.") if ast_smells: - CONFIG["detectLogger"].info(f"🔍 Running AST analysis on {file_path}") + logger.info(f"🔍 Running AST analysis on {file_path}") ast_options = self.generate_custom_options(ast_smells) - ast_results = self.ast_analyzer.analyze(file_path, ast_options) + ast_results = self.ast_analyzer.analyze(file_path, ast_options) # type: ignore smells_data.extend(ast_results) - CONFIG["detectLogger"].info( - f"✅ AST analysis completed. {len(ast_results)} smells detected." - ) + logger.info(f"✅ AST analysis completed. {len(ast_results)} smells detected.") if astroid_smells: - CONFIG["detectLogger"].info(f"🔍 Running Astroid analysis on {file_path}") + logger.info(f"🔍 Running Astroid analysis on {file_path}") astroid_options = self.generate_custom_options(astroid_smells) - astroid_results = self.astroid_analyzer.analyze(file_path, astroid_options) + astroid_results = self.astroid_analyzer.analyze(file_path, astroid_options) # type: ignore smells_data.extend(astroid_results) - CONFIG["detectLogger"].info( + logger.info( f"✅ Astroid analysis completed. {len(astroid_results)} smells detected." ) if smells_data: - CONFIG["detectLogger"].info("⚠️ Detected Code Smells:") + logger.info("⚠️ Detected Code Smells:") for smell in smells_data: if smell.occurences: first_occurrence = smell.occurences[0] @@ -84,12 +93,14 @@ def run_analysis(self, file_path: Path, selected_smells: str | list[str] = "ALL" else: line_info = "" - CONFIG["detectLogger"].info(f" • {smell.symbol} {line_info}: {smell.message}") + logger.info(f" • {smell.symbol} {line_info}: {smell.message}") else: - CONFIG["detectLogger"].info("🎉 No code smells detected.") + logger.info("🎉 No code smells detected.") except Exception as e: - CONFIG["detectLogger"].error(f"❌ Error during analysis: {e!s}") + logger.error(f"❌ Error during analysis: {e!s}") + traceback.print_exc() + raise e return smells_data @@ -97,41 +108,54 @@ def run_analysis(self, file_path: Path, selected_smells: str | list[str] = "ALL" def filter_smells_by_method( smell_registry: dict[str, SmellRecord], method: str ) -> dict[str, SmellRecord]: - filtered = { + """Filters smell registry by analysis method. + + Args: + smell_registry: Dictionary of all available smells + method: Analysis method to filter by ('pylint', 'ast', or 'astroid') + + Returns: + dict[str, SmellRecord]: Filtered dictionary of smells for the specified method + """ + return { name: smell for name, smell in smell_registry.items() - if smell["enabled"] and (method == smell["analyzer_method"]) + if smell["enabled"] and smell["analyzer_method"] == method } - return filtered @staticmethod def generate_pylint_options(filtered_smells: dict[str, SmellRecord]) -> list[str]: - pylint_smell_symbols = [] - extra_pylint_options = [ - "--disable=all", - ] + """Generates Pylint command-line options from enabled smells. - for symbol, smell in zip(filtered_smells.keys(), filtered_smells.values()): - pylint_smell_symbols.append(symbol) + Args: + filtered_smells: Dictionary of smells enabled for Pylint analysis + Returns: + list[str]: Pylint command-line arguments + """ + pylint_options = ["--disable=all"] + + for _smell_name, smell in filtered_smells.items(): if len(smell["analyzer_options"]) > 0: for param_data in smell["analyzer_options"].values(): flag = param_data["flag"] value = param_data["value"] if value: - extra_pylint_options.append(f"{flag}={value}") + pylint_options.append(f"{flag}={value}") - extra_pylint_options.append(f"--enable={','.join(pylint_smell_symbols)}") - return extra_pylint_options + pylint_options.append(f"--enable={','.join(filtered_smells.keys())}") + return pylint_options @staticmethod def generate_custom_options( filtered_smells: dict[str, SmellRecord], - ) -> list[tuple[Callable, dict[str, Any]]]: # type: ignore - ast_options = [] - for smell in filtered_smells.values(): - method = smell["checker"] - options = smell["analyzer_options"] - ast_options.append((method, options)) - - return ast_options + ) -> list[tuple[Callable | None, dict[str, Any]]]: # type: ignore + """Generates options for custom AST/Astroid analyzers. + + Args: + filtered_smells: Dictionary of smells enabled for custom analysis + + Returns: + list[tuple]: List of (checker_function, options_dict) pairs + """ + return [(smell["checker"], smell["analyzer_options"]) for smell in filtered_smells.values()] diff --git a/src/ecooptimizer/analyzers/ast_analyzer.py b/src/ecooptimizer/analyzers/ast_analyzer.py index e9c0b051..d2de1cea 100644 --- a/src/ecooptimizer/analyzers/ast_analyzer.py +++ b/src/ecooptimizer/analyzers/ast_analyzer.py @@ -1,22 +1,37 @@ +"""AST-based code analysis framework for detecting code smells.""" + from typing import Callable, Any from pathlib import Path from ast import AST, parse - -from .base_analyzer import Analyzer -from ..data_types.smell import Smell +from ecooptimizer.analyzers.base_analyzer import Analyzer +from ecooptimizer.data_types.smell import Smell class ASTAnalyzer(Analyzer): + """Analyzes Python source code using AST traversal to detect code smells. + + This analyzer executes multiple detection functions on a parsed AST and + aggregates their results. + """ + def analyze( self, file_path: Path, extra_options: list[tuple[Callable[[Path, AST], list[Smell]], dict[str, Any]]], - ): - smells_data: list[Smell] = [] + ) -> list[Smell]: + """Runs all configured detectors on the given source file. - source_code = file_path.read_text() + Args: + file_path: Path to the Python source file to analyze + extra_options: List of detector functions with their parameters, + each as a tuple (detector_function, params_dict) + Returns: + list[Smell]: Aggregated list of all smells found by all detectors + """ + smells_data: list[Smell] = [] + source_code = file_path.read_text() tree = parse(source_code) for detector, params in extra_options: diff --git a/src/ecooptimizer/analyzers/ast_analyzers/detect_long_element_chain.py b/src/ecooptimizer/analyzers/ast_analyzers/detect_long_element_chain.py index ae729adb..539dfc7a 100644 --- a/src/ecooptimizer/analyzers/ast_analyzers/detect_long_element_chain.py +++ b/src/ecooptimizer/analyzers/ast_analyzers/detect_long_element_chain.py @@ -1,10 +1,10 @@ import ast from pathlib import Path -from ...utils.smell_enums import CustomSmell +from ecooptimizer.utils.smell_enums import CustomSmell -from ...data_types.smell import LECSmell -from ...data_types.custom_fields import AdditionalInfo, Occurence +from ecooptimizer.data_types.smell import LECSmell +from ecooptimizer.data_types.custom_fields import AdditionalInfo, Occurence def detect_long_element_chain(file_path: Path, tree: ast.AST, threshold: int = 5) -> list[LECSmell]: diff --git a/src/ecooptimizer/analyzers/ast_analyzers/detect_long_lambda_expression.py b/src/ecooptimizer/analyzers/ast_analyzers/detect_long_lambda_expression.py index 2ff0fccb..9f49ca56 100644 --- a/src/ecooptimizer/analyzers/ast_analyzers/detect_long_lambda_expression.py +++ b/src/ecooptimizer/analyzers/ast_analyzers/detect_long_lambda_expression.py @@ -1,10 +1,10 @@ import ast from pathlib import Path -from ...utils.smell_enums import CustomSmell +from ecooptimizer.utils.smell_enums import CustomSmell -from ...data_types.smell import LLESmell -from ...data_types.custom_fields import AdditionalInfo, Occurence +from ecooptimizer.data_types.smell import LLESmell +from ecooptimizer.data_types.custom_fields import AdditionalInfo, Occurence def count_expressions(node: ast.expr) -> int: @@ -117,7 +117,9 @@ def check_lambda(node: ast.Lambda): # Convert the lambda function to a string and check its total length in characters lambda_code = get_lambda_code(node) if len(lambda_code) > threshold_length: - message = f"Lambda function too long ({len(lambda_code)} characters, max {threshold_length})" + message = ( + f"Lambda function too long ({len(lambda_code)} characters, max {threshold_length})" + ) smell = LLESmell( path=str(file_path), module=file_path.stem, diff --git a/src/ecooptimizer/analyzers/ast_analyzers/detect_long_message_chain.py b/src/ecooptimizer/analyzers/ast_analyzers/detect_long_message_chain.py index b3d59c73..514c0762 100644 --- a/src/ecooptimizer/analyzers/ast_analyzers/detect_long_message_chain.py +++ b/src/ecooptimizer/analyzers/ast_analyzers/detect_long_message_chain.py @@ -1,10 +1,10 @@ import ast from pathlib import Path -from ...utils.smell_enums import CustomSmell +from ecooptimizer.utils.smell_enums import CustomSmell -from ...data_types.smell import LMCSmell -from ...data_types.custom_fields import AdditionalInfo, Occurence +from ecooptimizer.data_types.smell import LMCSmell +from ecooptimizer.data_types.custom_fields import AdditionalInfo, Occurence def compute_chain_length(node: ast.expr) -> int: @@ -29,9 +29,7 @@ def compute_chain_length(node: ast.expr) -> int: return 0 -def detect_long_message_chain( - file_path: Path, tree: ast.AST, threshold: int = 5 -) -> list[LMCSmell]: +def detect_long_message_chain(file_path: Path, tree: ast.AST, threshold: int = 5) -> list[LMCSmell]: """ Detects long message chains in the given Python code. diff --git a/src/ecooptimizer/analyzers/ast_analyzers/detect_repeated_calls.py b/src/ecooptimizer/analyzers/ast_analyzers/detect_repeated_calls.py index 6764ad7b..c0cb3f88 100644 --- a/src/ecooptimizer/analyzers/ast_analyzers/detect_repeated_calls.py +++ b/src/ecooptimizer/analyzers/ast_analyzers/detect_repeated_calls.py @@ -3,9 +3,9 @@ from pathlib import Path import astor -from ...data_types.custom_fields import CRCInfo, Occurence -from ...data_types.smell import CRCSmell -from ...utils.smell_enums import CustomSmell +from ecooptimizer.data_types.custom_fields import CRCInfo, Occurence +from ecooptimizer.data_types.smell import CRCSmell +from ecooptimizer.utils.smell_enums import CustomSmell IGNORED_PRIMITIVE_BUILTINS = {"abs", "round"} # Built-ins safe to ignore when used with primitives diff --git a/src/ecooptimizer/analyzers/astroid_analyzer.py b/src/ecooptimizer/analyzers/astroid_analyzer.py index e2622c4d..54fc40d0 100644 --- a/src/ecooptimizer/analyzers/astroid_analyzer.py +++ b/src/ecooptimizer/analyzers/astroid_analyzer.py @@ -1,13 +1,20 @@ +"""Astroid-based code analysis framework for detecting code smells.""" + from typing import Callable, Any from pathlib import Path from astroid import nodes, parse - -from .base_analyzer import Analyzer -from ..data_types.smell import Smell +from ecooptimizer.analyzers.base_analyzer import Analyzer +from ecooptimizer.data_types.smell import Smell class AstroidAnalyzer(Analyzer): + """Analyzes Python source code using Astroid to detect code smells. + + This analyzer executes multiple detection functions on parsed Astroid nodes + and aggregates their results. + """ + def analyze( self, file_path: Path, @@ -17,11 +24,19 @@ def analyze( dict[str, Any], ] ], - ): - smells_data: list[Smell] = [] + ) -> list[Smell]: + """Runs all configured detectors on the given source file. - source_code = file_path.read_text() + Args: + file_path: Path to the Python source file to analyze + extra_options: List of detector functions with their parameters as + tuples of (detector_function, params_dict) + Returns: + list[Smell]: Combined list of all smells detected by all detectors + """ + smells_data: list[Smell] = [] + source_code = file_path.read_text() tree = parse(source_code) for detector, params in extra_options: diff --git a/src/ecooptimizer/analyzers/astroid_analyzers/detect_string_concat_in_loop.py b/src/ecooptimizer/analyzers/astroid_analyzers/detect_string_concat_in_loop.py index 05a8c125..cd1e15a5 100644 --- a/src/ecooptimizer/analyzers/astroid_analyzers/detect_string_concat_in_loop.py +++ b/src/ecooptimizer/analyzers/astroid_analyzers/detect_string_concat_in_loop.py @@ -3,10 +3,10 @@ from typing import Any from astroid import nodes, util, parse, extract_node, AttributeInferenceError -from ...config import CONFIG -from ...data_types.custom_fields import Occurence, SCLInfo -from ...data_types.smell import SCLSmell -from ...utils.smell_enums import CustomSmell +from ecooptimizer.config import CONFIG +from ecooptimizer.data_types.custom_fields import Occurence, SCLInfo +from ecooptimizer.data_types.smell import SCLSmell +from ecooptimizer.utils.smell_enums import CustomSmell logger = CONFIG["detectLogger"] @@ -355,7 +355,11 @@ def get_ordered_scope_nodes( ) -> list[nodes.NodeNG]: """Get all nodes in scope in execution order, flattening nested blocks.""" nodes_list = [] - for child in scope.body: + + if not hasattr(scope, "body"): + return [] + + for child in scope.body: # type: ignore # Recursively flatten block nodes (loops, ifs, etc) if child.lineno >= target.lineno: # type: ignore break diff --git a/src/ecooptimizer/analyzers/base_analyzer.py b/src/ecooptimizer/analyzers/base_analyzer.py index a20673f4..a89f77a1 100644 --- a/src/ecooptimizer/analyzers/base_analyzer.py +++ b/src/ecooptimizer/analyzers/base_analyzer.py @@ -1,12 +1,30 @@ +"""Abstract base class for all code smell analyzers.""" + from abc import ABC, abstractmethod from pathlib import Path from typing import Any - -from ..data_types.smell import Smell +from ecooptimizer.data_types.smell import Smell class Analyzer(ABC): + """Abstract base class defining the interface for code smell analyzers. + + Concrete analyzer implementations must implement the analyze() method. + """ + @abstractmethod def analyze(self, file_path: Path, extra_options: list[Any]) -> list[Smell]: + """Analyze a source file and return detected code smells. + + Args: + file_path: Path to the source file to analyze + extra_options: List of analyzer-specific configuration options + + Returns: + list[Smell]: Detected code smells in the source file + + Note: + Concrete analyzer implementations must override this method. + """ pass diff --git a/src/ecooptimizer/analyzers/pylint_analyzer.py b/src/ecooptimizer/analyzers/pylint_analyzer.py index e11f2e22..d6d615ad 100644 --- a/src/ecooptimizer/analyzers/pylint_analyzer.py +++ b/src/ecooptimizer/analyzers/pylint_analyzer.py @@ -1,20 +1,29 @@ +"""Pylint-based analyzer for detecting code smells.""" + from io import StringIO import json from pathlib import Path from pylint.lint import Run from pylint.reporters.json_reporter import JSON2Reporter -from ..config import CONFIG +from ecooptimizer.config import CONFIG +from ecooptimizer.data_types.custom_fields import AdditionalInfo, Occurence +from ecooptimizer.analyzers.base_analyzer import Analyzer +from ecooptimizer.data_types.smell import Smell + -from ..data_types.custom_fields import AdditionalInfo, Occurence +class PylintAnalyzer(Analyzer): + """Analyzer that detects code smells using Pylint.""" -from .base_analyzer import Analyzer -from ..data_types.smell import Smell + def _build_smells(self, pylint_smells: dict) -> list[Smell]: # type: ignore + """Convert Pylint JSON output to Eco Optimizer smell objects. + Args: + pylint_smells: Dictionary of smells from Pylint JSON report -class PylintAnalyzer(Analyzer): - def _build_smells(self, pylint_smells: dict): # type: ignore - """Casts initial list of pylint smells to the Eco Optimizer's Smell configuration.""" + Returns: + list[Smell]: List of converted smell objects + """ smells: list[Smell] = [] for smell in pylint_smells: @@ -42,9 +51,21 @@ def _build_smells(self, pylint_smells: dict): # type: ignore return smells - def analyze(self, file_path: Path, extra_options: list[str]): + def analyze(self, file_path: Path, extra_options: list[str]) -> list[Smell]: + """Run Pylint analysis on a source file and return detected smells. + + Args: + file_path: Path to the source file to analyze + extra_options: Additional Pylint command-line options + + Returns: + list[Smell]: Detected code smells + + Note: + Catches and logs Pylint execution and JSON parsing errors + """ smells_data: list[Smell] = [] - pylint_options = [str(file_path), *extra_options] + pylint_options = [str(file_path), *extra_options, "--clear-cache-post-run=True"] with StringIO() as buffer: reporter = JSON2Reporter(buffer) @@ -54,8 +75,8 @@ def analyze(self, file_path: Path, extra_options: list[str]): buffer.seek(0) smells_data.extend(self._build_smells(json.loads(buffer.getvalue())["messages"])) except json.JSONDecodeError as e: - CONFIG["detectLogger"].error(f"❌ Failed to parse JSON output from pylint: {e}") # type: ignore + CONFIG["detectLogger"].error(f"❌ Failed to parse JSON output from pylint: {e}") except Exception as e: - CONFIG["detectLogger"].error(f"❌ An error occurred during pylint analysis: {e}") # type: ignore + CONFIG["detectLogger"].error(f"❌ An error occurred during pylint analysis: {e}") return smells_data diff --git a/src/ecooptimizer/api/__main__.py b/src/ecooptimizer/api/__main__.py index aa1f1713..08bb0e6d 100644 --- a/src/ecooptimizer/api/__main__.py +++ b/src/ecooptimizer/api/__main__.py @@ -1,14 +1,25 @@ +"""Application entry point and server configuration for EcoOptimizer.""" + import logging import sys import uvicorn -from .app import app - -from ..config import CONFIG +from ecooptimizer.api.app import app +from ecooptimizer.config import CONFIG class HealthCheckFilter(logging.Filter): + """Filters out health check requests from access logs.""" + def filter(self, record: logging.LogRecord) -> bool: + """Determines if a log record should be filtered. + + Args: + record: The log record to evaluate + + Returns: + bool: False if record contains health check, True otherwise + """ return "/health" not in record.getMessage() @@ -17,7 +28,11 @@ def filter(self, record: logging.LogRecord) -> bool: def start(): - # ANSI codes + """Starts the Uvicorn server with configured settings. + + Displays startup banner and handles different run modes. + """ + # ANSI color codes RESET = "\u001b[0m" BLUE = "\u001b[36m" PURPLE = "\u001b[35m" @@ -25,14 +40,16 @@ def start(): mode_message = f"{CONFIG['mode'].upper()} MODE" msg_len = len(mode_message) - print(f"\n\t\t\t***{'*'*msg_len}***") + print(f"\n\t\t\t***{'*' * msg_len}***") print(f"\t\t\t* {BLUE}{mode_message}{RESET} *") - print(f"\t\t\t***{'*'*msg_len}***\n") + print(f"\t\t\t***{'*' * msg_len}***\n") + if CONFIG["mode"] == "production": print(f"{PURPLE}hint: add --dev flag at the end to ignore energy checks\n") logging.info("🚀 Running EcoOptimizer Application...") logging.info(f"{'=' * 100}\n") + uvicorn.run( app, host="127.0.0.1", @@ -44,11 +61,13 @@ def start(): def main(): + """Main entry point that sets mode based on command line arguments.""" CONFIG["mode"] = "development" if "--dev" in sys.argv else "production" start() def dev(): + """Development mode entry point that bypasses energy checks.""" CONFIG["mode"] = "development" start() diff --git a/src/ecooptimizer/api/app.py b/src/ecooptimizer/api/app.py index bace8451..b5c5aa4e 100644 --- a/src/ecooptimizer/api/app.py +++ b/src/ecooptimizer/api/app.py @@ -1,15 +1,31 @@ +"""Main FastAPI application setup and health check endpoint.""" + from fastapi import FastAPI -from .routes import RefactorRouter, DetectRouter, LogRouter +from ecooptimizer.api.error_handler import AppError, global_error_handler +from ecooptimizer.api.routes import RefactorRouter, DetectRouter, LogRouter + +app = FastAPI( + title="Ecooptimizer", + description="API for detecting and refactoring energy-inefficient Python code", +) -app = FastAPI(title="Ecooptimizer") -# Include API routes -app.include_router(RefactorRouter) -app.include_router(DetectRouter) -app.include_router(LogRouter) +# Register handlers for all exception types +app.add_exception_handler(AppError, global_error_handler) +app.add_exception_handler(Exception, global_error_handler) + +# Register all API routers +app.include_router(RefactorRouter, tags=["refactoring"]) +app.include_router(DetectRouter, tags=["detection"]) +app.include_router(LogRouter, tags=["logging"]) @app.get("/health") async def ping(): + """Check if the API service is running. + + Returns: + dict: Simple status response {'status': 'ok'} + """ return {"status": "ok"} diff --git a/src/ecooptimizer/api/error_handler.py b/src/ecooptimizer/api/error_handler.py new file mode 100644 index 00000000..e29b0d56 --- /dev/null +++ b/src/ecooptimizer/api/error_handler.py @@ -0,0 +1,94 @@ +# ecooptimizer/api/error_handler.py +import logging +import os +import stat +import traceback + +from fastapi import Request +from fastapi.responses import JSONResponse + +from ecooptimizer.config import CONFIG + + +class AppError(Exception): + """Base class for all application errors.""" + + def __init__(self, message: str, status_code: int = 500): + self.message = message + self.status_code = status_code + super().__init__(message) + + +class EnergySavingsError(AppError): + """Raised when energy savings validation fails.""" + + def __init__(self): + message = "Energy was not saved after refactoring." + super().__init__(message, 400) + + +class EnergyMeasurementError(AppError): + """Raised when energy measurement fails.""" + + def __init__(self, file_path: str): + message = f"Could not retrieve emissions of {file_path}." + super().__init__(message, 400) + + +class RefactoringError(AppError): + """Raised when refactoring fails.""" + + pass + + +class RessourceNotFoundError(AppError): + """Raised when a ressource (file or folder) cannot be found.""" + + def __init__(self, path: str, ressourceType: str): + message = f"{ressourceType.capitalize()} not found: {path}." + super().__init__(message, 404) + + +def get_route_logger(request: Request): + """Determine which logger to use based on route path.""" + route_path = request.url.path + if "/detect" in route_path.lower(): + return CONFIG["detectLogger"] + elif "/refactor" in route_path.lower(): + return CONFIG["refactorLogger"] + return logging.getLogger() + + +async def global_error_handler(request: Request, e: Exception) -> JSONResponse: + logger = get_route_logger(request) + + if isinstance(e, AppError): + logger.error(f"Application error at {request.url.path}: {e.message}") + return JSONResponse( + status_code=e.status_code, + content={"detail": e.message}, + ) + else: + logger.error( + f"Unexpected error at {request.url.path}\n" + f"{''.join(traceback.format_exception(type(e), e, e.__traceback__))}" + ) + return JSONResponse( + status_code=500, + content={"detail": "Internal server error"}, + ) + + +def remove_readonly(func, path, _) -> None: # noqa: ANN001 + """Removes readonly attribute from files/directories to enable deletion. + + Args: + func: Original removal function that failed + path: Path to the file/directory + _: Unused excinfo parameter + + Note: + Used as error handler for shutil.rmtree() + """ + os.chmod(path, stat.S_IWRITE) # noqa: PTH101 + func(path) diff --git a/src/ecooptimizer/api/routes/__init__.py b/src/ecooptimizer/api/routes/__init__.py index b0b59465..99e0370c 100644 --- a/src/ecooptimizer/api/routes/__init__.py +++ b/src/ecooptimizer/api/routes/__init__.py @@ -1,5 +1,5 @@ -from .refactor_smell import router as RefactorRouter -from .detect_smells import router as DetectRouter -from .show_logs import router as LogRouter +from ecooptimizer.api.routes.refactor_smell import router as RefactorRouter +from ecooptimizer.api.routes.detect_smells import router as DetectRouter +from ecooptimizer.api.routes.show_logs import router as LogRouter __all__ = ["DetectRouter", "LogRouter", "RefactorRouter"] diff --git a/src/ecooptimizer/api/routes/detect_smells.py b/src/ecooptimizer/api/routes/detect_smells.py index fb86357c..13b3c7f4 100644 --- a/src/ecooptimizer/api/routes/detect_smells.py +++ b/src/ecooptimizer/api/routes/detect_smells.py @@ -1,66 +1,70 @@ +"""API endpoint for detecting code smells in Python files.""" + # pyright: reportOptionalMemberAccess=false from pathlib import Path -from fastapi import APIRouter, HTTPException +from fastapi import APIRouter from pydantic import BaseModel import time -from ...config import CONFIG +from ecooptimizer.api.error_handler import AppError, RessourceNotFoundError -from ...analyzers.analyzer_controller import AnalyzerController -from ...data_types.smell import Smell +from ecooptimizer.config import CONFIG +from ecooptimizer.analyzers.analyzer_controller import AnalyzerController +from ecooptimizer.data_types.smell import Smell router = APIRouter() - analyzer_controller = AnalyzerController() class SmellRequest(BaseModel): + """Request model for smell detection endpoint. + + Attributes: + file_path: Path to the Python file to analyze + enabled_smells: Dictionary mapping smell names to their configurations + """ + file_path: str - enabled_smells: list[str] + enabled_smells: dict[str, dict[str, int | str]] -@router.post("/smells", response_model=list[Smell]) -def detect_smells(request: SmellRequest): - """ - Detects code smells in a given file, logs the process, and measures execution time. - """ +@router.post("/smells", response_model=list[Smell], summary="Detect code smells") +def detect_smells(request: SmellRequest) -> list[Smell]: + """Analyzes a Python file and returns detected code smells. + + Args: + request: SmellRequest containing file path and smell configurations + + Returns: + list[Smell]: Detected code smells with their metadata + Raises: + HTTPException: 404 if file not found, 500 for analysis errors + """ CONFIG["detectLogger"].info(f"{'=' * 100}") CONFIG["detectLogger"].info(f"📂 Received smell detection request for: {request.file_path}") start_time = time.time() - try: - file_path_obj = Path(request.file_path) - - if not file_path_obj.exists(): - CONFIG["detectLogger"].error(f"❌ File does not exist: {file_path_obj}") - raise FileNotFoundError(f"File not found: {file_path_obj}") + file_path_obj = Path(request.file_path) - CONFIG["detectLogger"].debug( - f"🔎 Enabled smells: {', '.join(request.enabled_smells) if request.enabled_smells else 'None'}" - ) + if not file_path_obj.exists(): + CONFIG["detectLogger"].error(f"❌ File does not exist: {file_path_obj}") + raise RessourceNotFoundError(str(file_path_obj), "file") - # Run analysis + try: CONFIG["detectLogger"].info(f"🎯 Running analysis on: {file_path_obj}") smells_data = analyzer_controller.run_analysis(file_path_obj, request.enabled_smells) + except AppError as e: + raise AppError(str(e), e.status_code) from e + except Exception as e: + raise Exception(str(e)) from e - execution_time = round(time.time() - start_time, 2) - CONFIG["detectLogger"].info(f"📊 Execution Time: {execution_time} seconds") - - CONFIG["detectLogger"].info( - f"🏁 Analysis completed for {file_path_obj}. {len(smells_data)} smells found." - ) - CONFIG["detectLogger"].info(f"{'=' * 100}\n") - - return smells_data - - except FileNotFoundError as e: - CONFIG["detectLogger"].error(f"❌ File not found: {e}") - CONFIG["detectLogger"].info(f"{'=' * 100}\n") - raise HTTPException(status_code=404, detail=str(e)) from e + execution_time = round(time.time() - start_time, 2) + CONFIG["detectLogger"].info(f"📊 Execution Time: {execution_time} seconds") + CONFIG["detectLogger"].info( + f"🏁 Analysis completed for {file_path_obj}. {len(smells_data)} smells found." + ) + CONFIG["detectLogger"].info(f"{'=' * 100}\n") - except Exception as e: - CONFIG["detectLogger"].error(f"❌ Error during smell detection: {e!s}") - CONFIG["detectLogger"].info(f"{'=' * 100}\n") - raise HTTPException(status_code=500, detail="Internal server error") from e + return smells_data diff --git a/src/ecooptimizer/api/routes/refactor_smell.py b/src/ecooptimizer/api/routes/refactor_smell.py index 799700a5..2eb6e1e5 100644 --- a/src/ecooptimizer/api/routes/refactor_smell.py +++ b/src/ecooptimizer/api/routes/refactor_smell.py @@ -1,34 +1,59 @@ +"""API endpoints for code refactoring with energy measurement.""" + # pyright: reportOptionalMemberAccess=false import shutil -import math from pathlib import Path from tempfile import mkdtemp import traceback -from fastapi import APIRouter, HTTPException +from fastapi import APIRouter from pydantic import BaseModel -from typing import Any, Optional - -from ...config import CONFIG -from ...analyzers.analyzer_controller import AnalyzerController -from ...exceptions import EnergySavingsError, RefactoringError, remove_readonly -from ...refactorers.refactorer_controller import RefactorerController -from ...measurements.codecarbon_energy_meter import CodeCarbonEnergyMeter -from ...data_types.smell import Smell +from typing import Optional + +from ecooptimizer.api.error_handler import ( + AppError, + EnergyMeasurementError, + EnergySavingsError, + RefactoringError, + RessourceNotFoundError, + remove_readonly, +) + +from ecooptimizer.config import CONFIG +from ecooptimizer.refactorers.refactorer_controller import RefactorerController +from ecooptimizer.analyzers.analyzer_controller import AnalyzerController +from ecooptimizer.measurements.codecarbon_energy_meter import CodeCarbonEnergyMeter +from ecooptimizer.data_types.smell import Smell logger = CONFIG["refactorLogger"] router = APIRouter() -analyzer_controller = AnalyzerController() refactorer_controller = RefactorerController() +analyzer_controller = AnalyzerController() energy_meter = CodeCarbonEnergyMeter() class ChangedFile(BaseModel): + """Tracks file changes during refactoring. + + Attributes: + original: Path to original file + refactored: Path to refactored file + """ + original: str refactored: str class RefactoredData(BaseModel): + """Contains results of a refactoring operation. + + Attributes: + tempDir: Temporary directory with refactored files + targetFile: Main file that was refactored + energySaved: Estimated energy savings in kg CO2 + affectedFiles: List of all files modified during refactoring + """ + tempDir: str targetFile: ChangedFile energySaved: Optional[float] = None @@ -36,151 +61,241 @@ class RefactoredData(BaseModel): class RefactorRqModel(BaseModel): - source_dir: str + """Request model for single smell refactoring. + + Attributes: + sourceDir: Directory containing code to refactor + smell: Smell to refactor + """ + + sourceDir: str smell: Smell -class RefactorResModel(BaseModel): - refactoredData: Optional[RefactoredData] = None - updatedSmells: list[Smell] +class RefactorTypeRqModel(BaseModel): + """Request model for refactoring by smell type. + + Attributes: + sourceDir: Directory containing code to refactor + smellType: Type of smell to refactor + firstSmell: First instance of the smell to refactor + """ + + sourceDir: str + smellType: str + firstSmell: Smell + +@router.post("/refactor", response_model=RefactoredData, summary="Refactor a specific code smell") +def refactor(request: RefactorRqModel) -> RefactoredData | None: + """Refactors a specific code smell and measures energy impact. -@router.post("/refactor", response_model=RefactorResModel) -def refactor(request: RefactorRqModel): - """Handles the refactoring process for a given smell.""" + Args: + request: Contains source directory and smell to refactor + + Returns: + RefactoredData: Results including energy savings and changed files + None: If refactoring fails + + Raises: + HTTPException: Various error cases with appropriate status codes + """ logger.info(f"{'=' * 100}") - logger.info("🔄 Received refactor request.") + + source_dir = Path(request.sourceDir) + target_file = Path(request.smell.path) + + logger.info(f"🔄 Refactoring smell: {request.smell.symbol} in {source_dir!s}") + + if not target_file.exists(): + raise RessourceNotFoundError(str(target_file), "file") + + if not source_dir.is_dir(): + raise RessourceNotFoundError(str(source_dir), "folder") try: - logger.info(f"🔍 Analyzing smell: {request.smell.symbol} in {request.source_dir}") - refactor_data, updated_smells = perform_refactoring(Path(request.source_dir), request.smell) + initial_emissions = measure_energy(target_file) + if not initial_emissions: + logger.error("❌ Could not retrieve initial emissions.") + raise EnergyMeasurementError(str(target_file)) - logger.info(f"✅ Refactoring process completed. Updated smells: {len(updated_smells)}") + logger.info(f"📊 Initial emissions: {initial_emissions} kg CO2") + refactor_data = perform_refactoring(source_dir, request.smell, initial_emissions) if refactor_data: - refactor_data = clean_refactored_data(refactor_data) logger.info(f"{'=' * 100}\n") - return RefactorResModel(refactoredData=refactor_data, updatedSmells=updated_smells) + return refactor_data logger.info(f"{'=' * 100}\n") - return RefactorResModel(updatedSmells=updated_smells) - - except OSError as e: - logger.error(f"❌ OS error: {e!s}") - raise HTTPException(status_code=404, detail=str(e)) from e + except AppError as e: + raise AppError(str(e), e.status_code) from e except Exception as e: - logger.error(f"❌ Refactoring error: {e!s}") - logger.info(f"{'=' * 100}\n") - raise HTTPException(status_code=400, detail=str(e)) from e + raise Exception(str(e)) from e -def perform_refactoring(source_dir: Path, smell: Smell): - """Executes the refactoring process for a given smell.""" - target_file = Path(smell.path) +@router.post( + "/refactor-by-type", response_model=RefactoredData, summary="Refactor all smells of a type" +) +def refactorSmell(request: RefactorTypeRqModel) -> RefactoredData: + """Refactors all instances of a smell type in a file. - logger.info( - f"🚀 Starting refactoring for {smell.symbol} at line {smell.occurences[0].line} in {target_file}" - ) + Args: + request: Contains source directory, smell type and first instance - if not source_dir.is_dir(): - logger.error(f"❌ Directory does not exist: {source_dir}") - raise OSError(f"Directory {source_dir} does not exist.") + Returns: + RefactoredData: Aggregated results of all refactorings + + Raises: + HTTPException: Various error cases with appropriate status codes + """ + logger.info(f"{'=' * 100}") + source_dir = Path(request.sourceDir) + target_file = Path(request.firstSmell.path) + + logger.info(f"🔄 Refactoring smell: {request.firstSmell.symbol} in {source_dir!s}") - initial_emissions = measure_energy(target_file) + if not target_file.exists(): + raise RessourceNotFoundError(str(target_file), "file") - if not initial_emissions: - logger.error("❌ Could not retrieve initial emissions.") - raise RuntimeError("Could not retrieve initial emissions.") + if not source_dir.is_dir(): + raise RessourceNotFoundError(str(source_dir), "folder") + try: + initial_emissions = measure_energy(target_file) + if not initial_emissions: + raise EnergyMeasurementError("Could not retrieve initial emissions.") + logger.info(f"📊 Initial emissions: {initial_emissions} kg CO2") + + total_energy_saved = 0.0 + all_affected_files: list[ChangedFile] = [] + temp_dir = None + current_smell = request.firstSmell + current_source_dir = source_dir + + refactor_data = perform_refactoring(current_source_dir, current_smell, initial_emissions) + total_energy_saved += refactor_data.energySaved or 0.0 + all_affected_files.extend(refactor_data.affectedFiles) + + temp_dir = refactor_data.tempDir + target_file = refactor_data.targetFile + refactored_file_path = target_file.refactored + source_copy_dir = Path(temp_dir) / source_dir.name + + while True: + next_smells = analyzer_controller.run_analysis( + Path(refactored_file_path), [request.smellType] + ) + if not next_smells: + break + current_smell = next_smells[0] + step_data = perform_refactoring( + source_copy_dir, + current_smell, + initial_emissions - total_energy_saved, + Path(temp_dir), + ) + total_energy_saved += step_data.energySaved or 0.0 + all_affected_files.extend(step_data.affectedFiles) + + logger.info(f"✅ Total energy saved: {total_energy_saved} kg CO2") - logger.info(f"📊 Initial emissions: {initial_emissions} kg CO2") + return RefactoredData( + tempDir=temp_dir, + targetFile=target_file, + energySaved=total_energy_saved, + affectedFiles=list({file.original: file for file in all_affected_files}.values()), + ) + except AppError as e: + raise AppError(str(e), e.status_code) from e + except Exception as e: + raise Exception(str(e)) from e + + +def perform_refactoring( + source_dir: Path, + smell: Smell, + initial_emissions: float, + existing_temp_dir: Optional[Path] = None, +) -> RefactoredData: + """Executes the refactoring process and measures energy impact. + + Args: + sourceDir: Source directory to refactor + smell: Smell to refactor + initial_emissions: Baseline energy measurement + existing_temp_dir: Optional existing temp directory to use + + Returns: + RefactoredData: Results of the refactoring operation + + Raises: + RuntimeError: If energy measurement fails + EnergySavingsError: If refactoring doesn't save energy + RefactoringError: If refactoring fails + """ + print() + target_file = Path(smell.path) - temp_dir = mkdtemp(prefix="ecooptimizer-") - source_copy = Path(temp_dir) / source_dir.name - target_file_copy = Path(str(target_file).replace(str(source_dir), str(source_copy), 1)) + logger.info( + f"🚀 Starting refactoring for {smell.symbol} at line {smell.occurences[0].line} in {target_file}" + ) - shutil.copytree(source_dir, source_copy, ignore=shutil.ignore_patterns(".git*")) + if existing_temp_dir is None: + temp_dir = Path(mkdtemp(prefix="ecooptimizer-")) + source_copy = temp_dir / source_dir.name + shutil.copytree(source_dir, source_copy, ignore=shutil.ignore_patterns(".git*")) + else: + temp_dir = existing_temp_dir + source_copy = source_dir + target_file_copy = source_copy / target_file.relative_to(source_dir) modified_files = [] try: modified_files: list[Path] = refactorer_controller.run_refactorer( target_file_copy, source_copy, smell ) - except NotImplementedError: - print("Not implemented yet.") except Exception as e: - print(f"An unexpected error occured: {e!s}") + shutil.rmtree(temp_dir, onerror=remove_readonly) # type: ignore traceback.print_exc() - shutil.rmtree(temp_dir, onerror=remove_readonly) - raise RefactoringError(str(target_file), str(e)) from e + raise RefactoringError(str(e)) from e + print("energy") final_emissions = measure_energy(target_file_copy) - if not final_emissions: - print("❌ Could not retrieve final emissions. Discarding refactoring.") - - logger.error("❌ Could not retrieve final emissions. Discarding refactoring.") - - shutil.rmtree(temp_dir, onerror=remove_readonly) - raise RuntimeError("Could not retrieve final emissions.") + if existing_temp_dir is None: + shutil.rmtree(temp_dir, onerror=remove_readonly) # type: ignore + raise EnergyMeasurementError(str(target_file)) if CONFIG["mode"] == "production" and final_emissions >= initial_emissions: - logger.info(f"📊 Final emissions: {final_emissions} kg CO2") - logger.info("⚠️ No measured energy savings. Discarding refactoring.") - - print("❌ Could not retrieve final emissions. Discarding refactoring.") - - shutil.rmtree(temp_dir, onerror=remove_readonly) - raise EnergySavingsError(str(target_file), "Energy was not saved after refactoring.") - - logger.info(f"✅ Energy saved! Initial: {initial_emissions}, Final: {final_emissions}") - - refactor_data = { - "tempDir": temp_dir, - "targetFile": { - "original": str(target_file.resolve()), - "refactored": str(target_file_copy.resolve()), - }, - "energySaved": initial_emissions - final_emissions - if not math.isnan(initial_emissions - final_emissions) - else None, - "affectedFiles": [ - { - "original": str(file.resolve()).replace( - str(source_copy.resolve()), str(source_dir.resolve()) - ), - "refactored": str(file.resolve()), - } + if existing_temp_dir is None: + shutil.rmtree(temp_dir, onerror=remove_readonly) # type: ignore + raise EnergySavingsError() + + energy_saved = initial_emissions - final_emissions + return RefactoredData( + tempDir=str(temp_dir), + targetFile=ChangedFile( + original=str(target_file.resolve()), + refactored=str(target_file_copy.resolve()), + ), + energySaved=energy_saved, + affectedFiles=[ + ChangedFile( + original=str(file.resolve()).replace(str(source_copy), str(source_dir)), + refactored=str(file.resolve()), + ) for file in modified_files ], - } + ) - updated_smells = analyzer_controller.run_analysis(target_file_copy) - return refactor_data, updated_smells +def measure_energy(file: Path) -> Optional[float]: + """Measures energy consumption of executing a file. -def measure_energy(file: Path): + Args: + file: Python file to measure + + Returns: + Optional[float]: Energy consumption in kg CO2, or None if measurement fails + """ energy_meter.measure_energy(file) return energy_meter.emissions - - -def clean_refactored_data(refactor_data: dict[str, Any]): - """Ensures the refactored data is correctly structured and handles missing fields.""" - try: - return RefactoredData( - tempDir=refactor_data.get("tempDir", ""), - targetFile=ChangedFile( - original=refactor_data["targetFile"].get("original", ""), - refactored=refactor_data["targetFile"].get("refactored", ""), - ), - energySaved=refactor_data.get("energySaved", None), - affectedFiles=[ - ChangedFile( - original=file.get("original", ""), - refactored=file.get("refactored", ""), - ) - for file in refactor_data.get("affectedFiles", []) - ], - ) - except KeyError as e: - logger.error(f"❌ Missing expected key in refactored data: {e}") - raise HTTPException(status_code=500, detail=f"Missing key: {e}") from e diff --git a/src/ecooptimizer/api/routes/show_logs.py b/src/ecooptimizer/api/routes/show_logs.py index 7e689978..54e6e225 100644 --- a/src/ecooptimizer/api/routes/show_logs.py +++ b/src/ecooptimizer/api/routes/show_logs.py @@ -1,5 +1,6 @@ -# pyright: reportOptionalMemberAccess=false +"""WebSocket endpoints for real-time log streaming.""" +# pyright: reportOptionalMemberAccess=false import asyncio from pathlib import Path import re @@ -7,46 +8,73 @@ from fastapi.websockets import WebSocketState, WebSocket, WebSocketDisconnect from pydantic import BaseModel -from ...utils.output_manager import LoggingManager -from ...config import CONFIG +from ecooptimizer.utils.output_manager import LoggingManager +from ecooptimizer.config import CONFIG router = APIRouter() class LogInit(BaseModel): + """Request model for initializing logging. + + Attributes: + log_dir: Directory path where logs should be stored + """ + log_dir: str -@router.post("/logs/init") -def initialize_logs(log_init: LogInit): +@router.post("/logs/init", summary="Initialize logging system") +def initialize_logs(log_init: LogInit) -> dict[str, str]: + """Initializes the logging manager and configures application loggers. + + Args: + log_init: Contains the log directory path + + Returns: + dict: Success message + + Raises: + WebSocketException: If initialization fails + """ try: loggingManager = LoggingManager(Path(log_init.log_dir), CONFIG["mode"] == "production") CONFIG["loggingManager"] = loggingManager CONFIG["detectLogger"] = loggingManager.loggers["detect"] CONFIG["refactorLogger"] = loggingManager.loggers["refactor"] - return {"message": "Logging initialized succesfully."} + return {"message": "Logging initialized successfully."} except Exception as e: raise WebSocketException(code=500, reason=str(e)) from e @router.websocket("/logs/main") -async def websocket_main_logs(websocket: WebSocket): +async def websocket_main_logs(websocket: WebSocket) -> None: + """WebSocket endpoint for streaming main application logs.""" await websocket_log_stream(websocket, CONFIG["loggingManager"].log_files["main"]) @router.websocket("/logs/detect") -async def websocket_detect_logs(websocket: WebSocket): +async def websocket_detect_logs(websocket: WebSocket) -> None: + """WebSocket endpoint for streaming code detection logs.""" await websocket_log_stream(websocket, CONFIG["loggingManager"].log_files["detect"]) @router.websocket("/logs/refactor") -async def websocket_refactor_logs(websocket: WebSocket): +async def websocket_refactor_logs(websocket: WebSocket) -> None: + """WebSocket endpoint for streaming code refactoring logs.""" await websocket_log_stream(websocket, CONFIG["loggingManager"].log_files["refactor"]) -async def listen_for_disconnect(websocket: WebSocket): - """Listens for client disconnects.""" +async def listen_for_disconnect(websocket: WebSocket) -> None: + """Background task to monitor WebSocket connection state. + + Args: + websocket: The WebSocket connection to monitor + + Raises: + WebSocketDisconnect: When client disconnects + """ try: while True: await websocket.receive() @@ -60,8 +88,17 @@ async def listen_for_disconnect(websocket: WebSocket): print(f"Unexpected error in listener: {e}") -async def websocket_log_stream(websocket: WebSocket, log_file: Path): - """Streams log file content via WebSocket.""" +async def websocket_log_stream(websocket: WebSocket, log_file: Path) -> None: + """Streams log file content to WebSocket client in real-time. + + Args: + websocket: Active WebSocket connection + log_file: Path to the log file to stream + + Note: + Only streams INFO, WARNING, ERROR, and CRITICAL level messages + Automatically handles client disconnects + """ await websocket.accept() # Start background task to listen for disconnect diff --git a/src/ecooptimizer/config.py b/src/ecooptimizer/config.py index af693926..3c8a90fc 100644 --- a/src/ecooptimizer/config.py +++ b/src/ecooptimizer/config.py @@ -1,17 +1,29 @@ +"""Application configuration settings and type definitions.""" + from logging import Logger import logging from typing import TypedDict -from .utils.output_manager import LoggingManager +from ecooptimizer.utils.output_manager import LoggingManager class Config(TypedDict): + """Type definition for application configuration dictionary. + + Attributes: + mode: Current application mode ('production' or 'development') + loggingManager: Central logging manager instance + detectLogger: Logger for code detection operations + refactorLogger: Logger for code refactoring operations + """ + mode: str loggingManager: LoggingManager | None detectLogger: Logger refactorLogger: Logger +# Global application configuration CONFIG: Config = { "mode": "production", "loggingManager": None, diff --git a/src/ecooptimizer/data_types/__init__.py b/src/ecooptimizer/data_types/__init__.py index 1c130bb6..1a41f425 100644 --- a/src/ecooptimizer/data_types/__init__.py +++ b/src/ecooptimizer/data_types/__init__.py @@ -1,11 +1,11 @@ -from .custom_fields import ( +from ecooptimizer.data_types.custom_fields import ( AdditionalInfo, CRCInfo, Occurence, SCLInfo, ) -from .smell import ( +from ecooptimizer.data_types.smell import ( Smell, CRCSmell, SCLSmell, diff --git a/src/ecooptimizer/data_types/custom_fields.py b/src/ecooptimizer/data_types/custom_fields.py index f57000f8..4d6b2dd4 100644 --- a/src/ecooptimizer/data_types/custom_fields.py +++ b/src/ecooptimizer/data_types/custom_fields.py @@ -1,8 +1,19 @@ +"""Data models for code smell occurrences and additional metadata.""" + from typing import Optional from pydantic import BaseModel class Occurence(BaseModel): + """Tracks the location of a code smell in source files. + + Attributes: + line: Starting line number + endLine: Ending line number (optional) + column: Starting column number + endColumn: Ending column number (optional) + """ + line: int endLine: int | None column: int @@ -10,6 +21,15 @@ class Occurence(BaseModel): class AdditionalInfo(BaseModel): + """Base model for storing optional metadata about code smells. + + Attributes: + innerLoopLine: Line number of inner loop (if applicable) + concatTarget: Target of string concatenation (if applicable) + repetitions: Number of repetitions (if applicable) + callString: Function call string (if applicable) + """ + innerLoopLine: Optional[int] = None concatTarget: Optional[str] = None repetitions: Optional[int] = None @@ -17,10 +37,24 @@ class AdditionalInfo(BaseModel): class CRCInfo(AdditionalInfo): + """Extended metadata for Cache-Related Computations (CRC) smells. + + Attributes: + callString: Required function call string + repetitions: Required number of repetitions + """ + callString: str # type: ignore repetitions: int # type: ignore class SCLInfo(AdditionalInfo): + """Extended metadata for String Concatenation in Loops (SCL) smells. + + Attributes: + innerLoopLine: Required inner loop line number + concatTarget: Required concatenation target string + """ + innerLoopLine: int # type: ignore concatTarget: str # type: ignore diff --git a/src/ecooptimizer/data_types/smell.py b/src/ecooptimizer/data_types/smell.py index a12401ce..7275a2a2 100644 --- a/src/ecooptimizer/data_types/smell.py +++ b/src/ecooptimizer/data_types/smell.py @@ -1,26 +1,29 @@ +"""Data models for representing different types of code smells.""" + from pydantic import BaseModel from typing import Optional -from .custom_fields import CRCInfo, Occurence, AdditionalInfo, SCLInfo +from ecooptimizer.data_types.custom_fields import CRCInfo, Occurence, AdditionalInfo, SCLInfo class Smell(BaseModel): - """ - Represents a code smell detected in a source file, including its location, type, and related metadata. + """Base model representing a detected code smell. Attributes: - confidence (str): The level of confidence for the smell detection (e.g., "high", "medium", "low"). - message (str): A descriptive message explaining the nature of the smell. - messageId (str): A unique identifier for the specific message or warning related to the smell. - module (str): The name of the module or component in which the smell is located. - obj (str): The specific object (e.g., function, class) associated with the smell. - path (str): The relative path to the source file from the project root. - symbol (str): The symbol or code construct (e.g., variable, method) involved in the smell. - type (str): The type or category of the smell (e.g., "complexity", "duplication"). - occurences (list[Occurence]): A list of individual occurences of a same smell, contains positional info. - additionalInfo (AddInfo): (Optional) Any custom information m for a type of smell + id: Optional unique identifier + confidence: Detection confidence level + message: Description of the smell + messageId: Unique message identifier + module: Module where smell was found + obj: Specific object containing the smell + path: File path relative to project root + symbol: Code symbol involved + type: Smell category/type + occurences: List of locations where smell appears + additionalInfo: Optional smell-specific metadata """ + id: Optional[str] = "" confidence: str message: str messageId: str @@ -34,17 +37,22 @@ class Smell(BaseModel): class CRCSmell(Smell): + """Represents Cache-Related Computation smells with required CRC metadata.""" + additionalInfo: CRCInfo # type: ignore class SCLSmell(Smell): + """Represents String Concatenation in Loops smells with required SCL metadata.""" + additionalInfo: SCLInfo # type: ignore -LECSmell = Smell -LLESmell = Smell -LMCSmell = Smell -LPLSmell = Smell -UVASmell = Smell -MIMSmell = Smell -UGESmell = Smell +# Type aliases for other smell categories +LECSmell = Smell # Long Element Chain +LLESmell = Smell # Long List Expansion +LMCSmell = Smell # Long Method Chain +LPLSmell = Smell # Long Parameter List +UVASmell = Smell # Unused Variable Assignment +MIMSmell = Smell # Multiple Items Mutation +UGESmell = Smell # Unnecessary Get Element diff --git a/src/ecooptimizer/data_types/smell_record.py b/src/ecooptimizer/data_types/smell_record.py index 31736939..2d82a554 100644 --- a/src/ecooptimizer/data_types/smell_record.py +++ b/src/ecooptimizer/data_types/smell_record.py @@ -1,6 +1,6 @@ from typing import Any, Callable, TypedDict -from ..refactorers.base_refactorer import BaseRefactorer +from ecooptimizer.refactorers.base_refactorer import BaseRefactorer class SmellRecord(TypedDict): diff --git a/src/ecooptimizer/exceptions.py b/src/ecooptimizer/exceptions.py deleted file mode 100644 index 298a5327..00000000 --- a/src/ecooptimizer/exceptions.py +++ /dev/null @@ -1,25 +0,0 @@ -import os -import stat - - -class RefactoringError(Exception): - """Exception raised for errors that occured during the refcatoring process. - - Attributes: - targetFile -- file being refactored - message -- explanation of the error - """ - - def __init__(self, targetFile: str, message: str) -> None: - self.targetFile = targetFile - super().__init__(message) - - -class EnergySavingsError(RefactoringError): - pass - - -def remove_readonly(func, path, _): # noqa: ANN001 - # "Clear the readonly bit and reattempt the removal" - os.chmod(path, stat.S_IWRITE) # noqa: PTH101 - func(path) diff --git a/src/ecooptimizer/measurements/base_energy_meter.py b/src/ecooptimizer/measurements/base_energy_meter.py index 425b1fc0..2524e102 100644 --- a/src/ecooptimizer/measurements/base_energy_meter.py +++ b/src/ecooptimizer/measurements/base_energy_meter.py @@ -1,21 +1,27 @@ +"""Abstract base class for energy measurement implementations.""" + from abc import ABC, abstractmethod from pathlib import Path class BaseEnergyMeter(ABC): - def __init__(self): - """ - Base class for energy meters to measure the emissions of a given file. + """Abstract base class for measuring code energy consumption. - :param file_path: Path to the file to measure energy consumption. - :param logger: Logger instance to handle log messages. - """ + Provides the interface for concrete energy measurement implementations. + """ + + def __init__(self): + """Initializes the energy meter with empty emissions.""" self.emissions = None @abstractmethod - def measure_energy(self, file_path: Path): - """ - Abstract method to measure the energy consumption of the specified file. - Must be implemented by subclasses. + def measure_energy(self, file_path: Path) -> None: + """Measures energy consumption of a code file. + + Args: + file_path: Path to the file to measure + + Note: + Must be implemented by concrete subclasses """ pass diff --git a/src/ecooptimizer/measurements/codecarbon_energy_meter.py b/src/ecooptimizer/measurements/codecarbon_energy_meter.py index 99c0aa83..eceddf55 100644 --- a/src/ecooptimizer/measurements/codecarbon_energy_meter.py +++ b/src/ecooptimizer/measurements/codecarbon_energy_meter.py @@ -1,30 +1,37 @@ +"""CodeCarbon-based implementation for measuring code energy consumption.""" + import logging +import math import os from pathlib import Path import sys import subprocess import pandas as pd from tempfile import TemporaryDirectory +from typing import Optional from codecarbon import EmissionsTracker -from .base_energy_meter import BaseEnergyMeter +from ecooptimizer.measurements.base_energy_meter import BaseEnergyMeter class CodeCarbonEnergyMeter(BaseEnergyMeter): - def __init__(self): - """ - Initializes the CodeCarbonEnergyMeter with a file path and logger. + """Measures code energy consumption using CodeCarbon's emissions tracker.""" - :param file_path: Path to the file to measure energy consumption. - :param logger: Logger instance for logging events. - """ + def __init__(self): + """Initializes the energy meter with empty emissions data.""" super().__init__() self.emissions_data = None - def measure_energy(self, file_path: Path): - """ - Measures the carbon emissions for the specified file by running it with CodeCarbon. - Logs each step and stores the emissions data if available. + def measure_energy(self, file_path: Path) -> None: + """Executes a file while tracking emissions using CodeCarbon. + + Args: + file_path: Path to Python file to measure + + Note: + Creates temporary directory for emissions data + Handles subprocess execution errors + Stores emissions data if measurement succeeds """ logging.info(f"Starting CodeCarbon energy measurement on {file_path.name}") @@ -32,7 +39,6 @@ def measure_energy(self, file_path: Path): os.environ["TEMP"] = custom_temp_dir # For Windows os.environ["TMPDIR"] = custom_temp_dir # For Unix-based systems - # TODO: Save to logger so doesn't print to console tracker = EmissionsTracker( output_dir=custom_temp_dir, allow_multiple_runs=True, @@ -49,32 +55,37 @@ def measure_energy(self, file_path: Path): except subprocess.CalledProcessError as e: logging.error(f"Error executing file '{file_path}': {e}") finally: - self.emissions = tracker.stop() - emissions_file = custom_temp_dir / Path("emissions.csv") + emissions = tracker.stop() + # Only store float or None values + if ( + isinstance(emissions, float) and not math.isnan(emissions) + ) or emissions is None: + self.emissions = emissions + else: + logging.warning( + f"Unexpected emissions type {type(emissions)}. Setting to None." + ) + self.emissions = None + emissions_file = Path(custom_temp_dir) / "emissions.csv" if emissions_file.exists(): - self.emissions_data = self.extract_emissions_csv(emissions_file) + self.emissions_data = self._extract_emissions_data(emissions_file) else: - logging.error( - "Emissions file was not created due to an error during execution." - ) - self.emissions_data = None + logging.error("Emissions file missing - measurement failed") - def extract_emissions_csv(self, csv_file_path: Path): - """ - Extracts emissions data from a CSV file generated by CodeCarbon. + def _extract_emissions_data(self, csv_path: Path) -> Optional[dict]: + """Extracts emissions data from CodeCarbon output CSV. + + Args: + csv_path: Path to emissions CSV file - :param csv_file_path: Path to the CSV file. - :return: Dictionary containing the last row of emissions data or None if an error occurs. + Returns: + dict: Last measurement record from CSV + None: If extraction fails """ - str_csv_path = str(csv_file_path) - if csv_file_path.exists(): - try: - df = pd.read_csv(str_csv_path) - return df.to_dict(orient="records")[-1] - except Exception as e: - logging.info(f"Error reading file '{str_csv_path}': {e}") - return None - else: - logging.info(f"File '{str_csv_path}' does not exist.") + try: + df = pd.read_csv(csv_path) + return df.to_dict(orient="records")[-1] + except Exception as e: + logging.error(f"Failed to read emissions data: {e}") return None diff --git a/src/ecooptimizer/refactorers/base_refactorer.py b/src/ecooptimizer/refactorers/base_refactorer.py index e0d0c3b7..0ee32cd1 100644 --- a/src/ecooptimizer/refactorers/base_refactorer.py +++ b/src/ecooptimizer/refactorers/base_refactorer.py @@ -1,14 +1,23 @@ +"""Abstract base class for all code smell refactorers.""" + from abc import ABC, abstractmethod from pathlib import Path from typing import Generic, TypeVar -from ..data_types.smell import Smell +from ecooptimizer.data_types.smell import Smell T = TypeVar("T", bound=Smell) class BaseRefactorer(ABC, Generic[T]): + """Defines the interface for concrete refactoring implementations. + + Type Parameters: + T: Type of smell this refactorer handles (must inherit from Smell) + """ + def __init__(self): + """Initializes the refactorer with empty modified files list.""" self.modified_files: list[Path] = [] @abstractmethod @@ -19,5 +28,17 @@ def refactor( smell: T, output_file: Path, overwrite: bool = True, - ): + ) -> None: + """Performs the refactoring operation on the target file. + + Args: + target_file: File containing the smell to refactor + source_dir: Root directory of the source files + smell: Detected smell instance with metadata + output_file: Destination path for refactored code + overwrite: Whether to overwrite existing output file + + Note: + Concrete subclasses must implement this method + """ pass diff --git a/src/ecooptimizer/refactorers/concrete/list_comp_any_all.py b/src/ecooptimizer/refactorers/concrete/list_comp_any_all.py index 7b590abb..8699b679 100644 --- a/src/ecooptimizer/refactorers/concrete/list_comp_any_all.py +++ b/src/ecooptimizer/refactorers/concrete/list_comp_any_all.py @@ -2,8 +2,8 @@ from pathlib import Path from libcst.metadata import PositionProvider -from ..base_refactorer import BaseRefactorer -from ...data_types.smell import UGESmell +from ecooptimizer.refactorers.base_refactorer import BaseRefactorer +from ecooptimizer.data_types.smell import UGESmell class ListCompInAnyAllTransformer(cst.CSTTransformer): diff --git a/src/ecooptimizer/refactorers/concrete/long_element_chain.py b/src/ecooptimizer/refactorers/concrete/long_element_chain.py index b38df65c..0d57050c 100644 --- a/src/ecooptimizer/refactorers/concrete/long_element_chain.py +++ b/src/ecooptimizer/refactorers/concrete/long_element_chain.py @@ -4,8 +4,8 @@ import re from typing import Any, Optional -from ..multi_file_refactorer import MultiFileRefactorer -from ...data_types.smell import LECSmell +from ecooptimizer.refactorers.multi_file_refactorer import MultiFileRefactorer +from ecooptimizer.data_types.smell import LECSmell class DictAccess: @@ -164,7 +164,7 @@ def find_dict_assignment_in_file(self, tree: ast.AST): """find the dictionary assignment from AST based on the dict name""" class DictVisitor(ast.NodeVisitor): - def visit_Assign(self_, node: ast.Assign): + def visit_Assign(self_, node: ast.Assign): # type: ignore if isinstance(node.value, ast.Dict) and len(node.targets) == 1: # dictionary is a varibale if ( @@ -192,7 +192,7 @@ def extract_dict_literal(self, node: ast.AST): return { self.extract_dict_literal(k) if isinstance(k, ast.AST) - else k: self.extract_dict_literal(v) if isinstance(v, ast.AST) else v + else k: self.extract_dict_literal(v) if isinstance(v, ast.AST) else v # type: ignore for k, v in zip(node.keys, node.values) } elif isinstance(node, ast.Constant): @@ -253,7 +253,7 @@ def _refactor_all_in_file(self, file_path: Path): refactored_lines = self._update_dict_assignment(refactored_lines) # Write changes back to file - file_path.write_text("\n".join(refactored_lines)) + file_path.write_text("\n".join(refactored_lines)) # type: ignore return True @@ -340,4 +340,4 @@ def _update_dict_assignment(self, refactored_lines: list[str]) -> None: refactored_lines = [line for line in refactored_lines if line.strip() != "Remove this line"] - return refactored_lines + return refactored_lines # type: ignore diff --git a/src/ecooptimizer/refactorers/concrete/long_lambda_function.py b/src/ecooptimizer/refactorers/concrete/long_lambda_function.py index 76c5e6bc..47dcc4c8 100644 --- a/src/ecooptimizer/refactorers/concrete/long_lambda_function.py +++ b/src/ecooptimizer/refactorers/concrete/long_lambda_function.py @@ -1,7 +1,7 @@ from pathlib import Path import re -from ..base_refactorer import BaseRefactorer -from ...data_types.smell import LLESmell +from ecooptimizer.refactorers.base_refactorer import BaseRefactorer +from ecooptimizer.data_types.smell import LLESmell class LongLambdaFunctionRefactorer(BaseRefactorer[LLESmell]): @@ -62,9 +62,7 @@ def refactor( # Find continuation lines only if needed if has_parentheses: - while current_line < len(lines) - 1 and not lambda_lines[ - -1 - ].strip().endswith(")"): + while current_line < len(lines) - 1 and not lambda_lines[-1].strip().endswith(")"): current_line += 1 lambda_lines.append(lines[current_line].rstrip()) else: @@ -82,9 +80,7 @@ def refactor( # Use different regex based on whether the lambda line starts with a parenthesis if has_parentheses: - lambda_match = re.search( - r"lambda\s+([\w, ]+):\s+(.+?)(?=\s*\))", full_lambda_line - ) + lambda_match = re.search(r"lambda\s+([\w, ]+):\s+(.+?)(?=\s*\))", full_lambda_line) else: lambda_match = re.search(r"lambda\s+([\w, ]+):\s+(.+)", full_lambda_line) diff --git a/src/ecooptimizer/refactorers/concrete/long_message_chain.py b/src/ecooptimizer/refactorers/concrete/long_message_chain.py index 5f7f9738..ed418e37 100644 --- a/src/ecooptimizer/refactorers/concrete/long_message_chain.py +++ b/src/ecooptimizer/refactorers/concrete/long_message_chain.py @@ -1,7 +1,7 @@ from pathlib import Path import re -from ..base_refactorer import BaseRefactorer -from ...data_types.smell import LMCSmell +from ecooptimizer.refactorers.base_refactorer import BaseRefactorer +from ecooptimizer.data_types.smell import LMCSmell class LongMessageChainRefactorer(BaseRefactorer[LMCSmell]): @@ -63,17 +63,17 @@ def refactor( if i < len(method_calls): refactored_lines.append( - f"{leading_whitespace}intermediate_{i} = " f"intermediate_{i-1}.{method}" + f"{leading_whitespace}intermediate_{i} = intermediate_{i - 1}.{method}" ) else: # Final assignment using original variable name if is_print: refactored_lines.append( - f"{leading_whitespace}print(intermediate_{i-1}.{method})" + f"{leading_whitespace}print(intermediate_{i - 1}.{method})" ) else: refactored_lines.append( - f"{leading_whitespace}{original_var} = " f"intermediate_{i-1}.{method}" + f"{leading_whitespace}{original_var} = intermediate_{i - 1}.{method}" ) lines[line_number - 1] = "\n".join(refactored_lines) + "\n" @@ -103,20 +103,19 @@ def refactor( if i < len(method_calls) - 1: refactored_lines.append( - f"{leading_whitespace}intermediate_{i} = " - f"intermediate_{i-1}.{method}" + f"{leading_whitespace}intermediate_{i} = intermediate_{i - 1}.{method}" ) else: # Preserve original assignment/print structure if original_has_print: refactored_lines.append( - f"{leading_whitespace}print(intermediate_{i-1}.{method})" + f"{leading_whitespace}print(intermediate_{i - 1}.{method})" ) else: original_assignment = line_with_chain.split("=", 1)[0].strip() refactored_lines.append( f"{leading_whitespace}{original_assignment} = " - f"intermediate_{i-1}.{method}" + f"intermediate_{i - 1}.{method}" ) lines[line_number - 1] = "\n".join(refactored_lines) + "\n" diff --git a/src/ecooptimizer/refactorers/concrete/long_parameter_list.py b/src/ecooptimizer/refactorers/concrete/long_parameter_list.py index 063ee3de..eda3849b 100644 --- a/src/ecooptimizer/refactorers/concrete/long_parameter_list.py +++ b/src/ecooptimizer/refactorers/concrete/long_parameter_list.py @@ -5,8 +5,8 @@ from typing import Optional from collections.abc import Mapping -from ..multi_file_refactorer import MultiFileRefactorer -from ...data_types.smell import LPLSmell +from ecooptimizer.refactorers.multi_file_refactorer import MultiFileRefactorer +from ecooptimizer.data_types.smell import LPLSmell class FunctionCallVisitor(cst.CSTVisitor): @@ -21,7 +21,7 @@ def __init__(self, function_name: str, class_name: str, is_constructor: bool): def visit_Call(self, node: cst.Call): """Check if the function/class constructor is called.""" # handle class constructor call - if self.is_constructor and m.matches(node.func, m.Name(self.class_name)): + if self.is_constructor and m.matches(node.func, m.Name(self.class_name)): # type: ignore self.found = True # handle standalone function calls @@ -137,7 +137,7 @@ def create_parameter_object_class( param_cst = cst.Param( name=cst.Name(param), - default=default_value, # set default value if available + default=default_value, # set default value if available # type: ignore ) constructor_params.append(param_cst) @@ -509,7 +509,7 @@ def leave_Call(self, original_node: cst.Call, updated_node: cst.Call) -> cst.Cal # Separate positional, keyword, and variadic arguments for i, arg in enumerate(updated_node.args): - if isinstance(arg, cst.Arg): + if isinstance(arg, cst.Arg): # type: ignore if arg.keyword is None: # If this is a positional argument beyond the number of parameters, # it's a variadic arg @@ -765,10 +765,10 @@ def refactor( self.function_node, param_names ) - if len(self.used_params) > max_param_limit: + if len(self.used_params) > max_param_limit: # type: ignore # classify used params into data and config types and store the results in a dictionary, if number of used params is beyond the configured limit self.classified_params = self.parameter_analyzer.classify_parameters( - self.used_params + self.used_params # type: ignore ) self.classified_param_names = self._generate_unique_param_class_names( target_line @@ -788,10 +788,10 @@ def refactor( tree = self.function_updater.update_function_calls( tree, self.function_node, - self.used_params, + self.used_params, # type: ignore self.classified_params, self.classified_param_names, - self.enclosing_class_name, + self.enclosing_class_name, # type: ignore ) # next updaate function signature and parameter usages within function body updated_function_node = self.function_updater.update_function_signature( @@ -804,12 +804,17 @@ def refactor( else: # just remove the unused params if the used parameters are within the max param list updated_function_node = self.function_updater.remove_unused_params( - self.function_node, self.used_params, default_value_params + self.function_node, + self.used_params, # type: ignore + default_value_params, # type: ignore ) # update all calls to match the new signature tree = self.function_updater.update_function_calls_unclassified( - tree, self.function_node, self.used_params, self.enclosing_class_name + tree, + self.function_node, + self.used_params, # type: ignore + self.enclosing_class_name, # type: ignore ) class FunctionReplacer(cst.CSTTransformer): @@ -827,7 +832,7 @@ def leave_FunctionDef( return self.updated_function # replace with the modified function return updated_node # leave other functions unchanged - tree = tree.visit(FunctionReplacer(self.function_node, updated_function_node)) + tree = tree.visit(FunctionReplacer(self.function_node, updated_function_node)) # type: ignore # Write the modified source modified_source = tree.code @@ -846,7 +851,7 @@ def _generate_unique_param_class_names(self, target_line: int) -> tuple[str, str Generate unique class names for data params and config params based on function name and line number. :return: A tuple containing (DataParams class name, ConfigParams class name). """ - unique_suffix = f"{self.function_node.name.value}_{target_line}" + unique_suffix = f"{self.function_node.name.value}_{target_line}" # type: ignore data_class_name = f"DataParams_{unique_suffix}" config_class_name = f"ConfigParams_{unique_suffix}" return data_class_name, config_class_name @@ -858,7 +863,9 @@ def _process_file(self, file: Path): tree = cst.parse_module(file.read_text()) visitor = FunctionCallVisitor( - self.function_node.name.value, self.enclosing_class_name, self.is_constructor + self.function_node.name.value, # type: ignore + self.enclosing_class_name, # type: ignore + self.is_constructor, # type: ignore ) tree.visit(visitor) @@ -871,11 +878,11 @@ def _process_file(self, file: Path): # update function calls/class instantiations tree = self.function_updater.update_function_calls( tree, - self.function_node, - self.used_params, - self.classified_params, - self.classified_param_names, - self.enclosing_class_name, + self.function_node, # type: ignore + self.used_params, # type: ignore + self.classified_params, # type: ignore + self.classified_param_names, # type: ignore + self.enclosing_class_name, # type: ignore ) modified_source = tree.code diff --git a/src/ecooptimizer/refactorers/concrete/member_ignoring_method.py b/src/ecooptimizer/refactorers/concrete/member_ignoring_method.py index 0d1fda6c..d7f43dc3 100644 --- a/src/ecooptimizer/refactorers/concrete/member_ignoring_method.py +++ b/src/ecooptimizer/refactorers/concrete/member_ignoring_method.py @@ -5,10 +5,10 @@ from pathlib import Path -from ...config import CONFIG +from ecooptimizer.config import CONFIG -from ..multi_file_refactorer import MultiFileRefactorer -from ...data_types.smell import MIMSmell +from ecooptimizer.refactorers.multi_file_refactorer import MultiFileRefactorer +from ecooptimizer.data_types.smell import MIMSmell logger = CONFIG["refactorLogger"] diff --git a/src/ecooptimizer/refactorers/concrete/repeated_calls.py b/src/ecooptimizer/refactorers/concrete/repeated_calls.py index d45db02d..86d6b6d2 100644 --- a/src/ecooptimizer/refactorers/concrete/repeated_calls.py +++ b/src/ecooptimizer/refactorers/concrete/repeated_calls.py @@ -2,8 +2,8 @@ import re from pathlib import Path -from ...data_types.smell import CRCSmell -from ..base_refactorer import BaseRefactorer +from ecooptimizer.data_types.smell import CRCSmell +from ecooptimizer.refactorers.base_refactorer import BaseRefactorer def extract_function_name(call_string: str): @@ -54,17 +54,20 @@ def refactor( if not parent_node: return - # Determine the insertion point for the cached variable - insert_line = self._find_insert_line(parent_node) - indent = self._get_indentation(lines, insert_line) + # Find the first occurrence line + first_occurrence = min(occ.line for occ in self.smell.occurences) + + # Get the indentation of the first occurrence + indent = self._get_indentation(lines, first_occurrence) cached_assignment = f"{indent}{self.cached_var_name} = {self.call_string}\n" - # Insert the cached variable into the source lines - lines.insert(insert_line - 1, cached_assignment) + # Insert the cached variable at the first occurrence line + lines.insert(first_occurrence - 1, cached_assignment) line_shift = 1 # Track the shift in line numbers caused by the insertion # Replace calls with the cached variable in the affected lines for occurrence in self.smell.occurences: + # Adjust line number considering the insertion adjusted_line_index = occurrence.line - 1 + line_shift original_line = lines[adjusted_line_index] updated_line = self._replace_call_in_line( @@ -103,57 +106,6 @@ def _find_valid_parent(self, tree: ast.Module): candidate_parent = node return candidate_parent - def _find_insert_line(self, parent_node: ast.FunctionDef | ast.ClassDef | ast.Module): - """ - Find the line to insert the cached variable assignment. - - - If it's a function, insert at the beginning but **after a docstring** if present. - - If it's a method call (`obj.method()`), insert after `obj` is defined. - - If it's a lambda assignment (`compute_demo = lambda ...`), insert after it. - """ - if isinstance(parent_node, ast.Module): - return 1 # Top of the module - - # Extract variable or function name from call string - var_match = re.match(r"(\w+)\.", self.call_string) # Matches `obj.method()` - if var_match: - obj_name = var_match.group(1) # Extract `obj` - - # Find the first assignment of `obj` - for node in parent_node.body: - if isinstance(node, ast.Assign): - if any( - isinstance(target, ast.Name) and target.id == obj_name - for target in node.targets - ): - return node.lineno + 1 # Insert after the assignment of `obj` - - # Find the first lambda assignment - for node in parent_node.body: - if isinstance(node, ast.Assign) and isinstance(node.value, ast.Lambda): - lambda_var_name = node.targets[0].id # Extract variable name - if lambda_var_name in self.call_string: - return node.lineno + 1 # Insert after the lambda function - - # Check if the first statement is a docstring - if ( - isinstance(parent_node.body[0], ast.Expr) - and isinstance(parent_node.body[0].value, ast.Constant) - and isinstance(parent_node.body[0].value.value, str) # Ensures it's a string docstring - ): - docstring_start = parent_node.body[0].lineno - docstring_end = docstring_start - - # Find the last line of the docstring by counting the lines it spans - docstring_content = parent_node.body[0].value.value - docstring_lines = docstring_content.count("\n") - if docstring_lines > 0: - docstring_end += docstring_lines - - return docstring_end + 1 # Insert after the last line of the docstring - - return parent_node.body[0].lineno # Default: insert at function start - def _line_in_node_body(self, node: ast.FunctionDef | ast.ClassDef | ast.Module, line: int): """ Check if a line is within the body of a given AST node. diff --git a/src/ecooptimizer/refactorers/concrete/str_concat_in_loop.py b/src/ecooptimizer/refactorers/concrete/str_concat_in_loop.py index e4575844..5c60eefc 100644 --- a/src/ecooptimizer/refactorers/concrete/str_concat_in_loop.py +++ b/src/ecooptimizer/refactorers/concrete/str_concat_in_loop.py @@ -4,8 +4,8 @@ import astroid from astroid import nodes -from ..base_refactorer import BaseRefactorer -from ...data_types.smell import SCLSmell +from ecooptimizer.refactorers.base_refactorer import BaseRefactorer +from ecooptimizer.data_types.smell import SCLSmell class UseListAccumulationRefactorer(BaseRefactorer[SCLSmell]): diff --git a/src/ecooptimizer/refactorers/multi_file_refactorer.py b/src/ecooptimizer/refactorers/multi_file_refactorer.py index 77d8dc4f..52b87698 100644 --- a/src/ecooptimizer/refactorers/multi_file_refactorer.py +++ b/src/ecooptimizer/refactorers/multi_file_refactorer.py @@ -1,18 +1,18 @@ +"""Base class for refactorers that operate across multiple files.""" + # pyright: reportOptionalMemberAccess=false from abc import abstractmethod import fnmatch from pathlib import Path from typing import TypeVar -from ..config import CONFIG - -from .base_refactorer import BaseRefactorer - -from ..data_types.smell import Smell - +from ecooptimizer.config import CONFIG +from ecooptimizer.refactorers.base_refactorer import BaseRefactorer +from ecooptimizer.data_types.smell import Smell T = TypeVar("T", bound=Smell) +# Default patterns for files/directories to ignore during refactoring DEFAULT_IGNORED_PATTERNS = { "__pycache__", "build", @@ -23,18 +23,29 @@ ".*", } +# Default location for ignore pattern configuration files DEFAULT_IGNORE_PATH = Path(__file__).parent / "patterns_to_ignore" class MultiFileRefactorer(BaseRefactorer[T]): + """Abstract base class for refactorers that need to process multiple files.""" + def __init__(self): + """Initializes the refactorer with default ignore patterns.""" super().__init__() self.target_file: Path = None # type: ignore self.ignore_patterns = self._load_ignore_patterns() self.py_files: list[Path] = [] def _load_ignore_patterns(self, ignore_dir: Path = DEFAULT_IGNORE_PATH) -> set[str]: - """Load ignore patterns from a file, similar to .gitignore.""" + """Loads ignore patterns from configuration files. + + Args: + ignore_dir: Directory containing ignore pattern files + + Returns: + Combined set of default and custom ignore patterns + """ if not ignore_dir.is_dir(): return DEFAULT_IGNORED_PATTERNS @@ -48,33 +59,56 @@ def _load_ignore_patterns(self, ignore_dir: Path = DEFAULT_IGNORE_PATH) -> set[s return patterns def is_ignored(self, item: Path) -> bool: - """Check if a file or directory matches any ignore pattern.""" + """Checks if a path should be ignored during refactoring. + + Args: + item: File or directory path to check + + Returns: + True if the path matches any ignore pattern, False otherwise + """ return any(fnmatch.fnmatch(item.name, pattern) for pattern in self.ignore_patterns) - def traverse(self, directory: Path): + def traverse(self, directory: Path) -> None: + """Recursively scans a directory for Python files, skipping ignored paths. + + Args: + directory: Root directory to scan + """ for item in directory.iterdir(): if item.is_dir(): - CONFIG["refactorLogger"].debug(f"Scanning directory: {item!s}, name: {item.name}") + CONFIG["refactorLogger"].debug(f"Scanning directory: {item!s}") if self.is_ignored(item): CONFIG["refactorLogger"].debug(f"Ignored directory: {item!s}") continue - CONFIG["refactorLogger"].debug(f"Entering directory: {item!s}") self.traverse(item) elif item.is_file() and item.suffix == ".py": self.py_files.append(item) - def traverse_and_process(self, directory: Path): + def traverse_and_process(self, directory: Path) -> None: + """Processes all Python files in a directory. + + Args: + directory: Root directory containing files to process + """ if not self.py_files: self.traverse(directory) for file in self.py_files: - CONFIG["refactorLogger"].debug(f"Checking file: {file!s}") + CONFIG["refactorLogger"].debug(f"Processing file: {file!s}") if self._process_file(file): if file not in self.modified_files and not file.samefile(self.target_file): self.modified_files.append(file.resolve()) - CONFIG["refactorLogger"].debug("finished processing file") + CONFIG["refactorLogger"].debug("Finished processing file") @abstractmethod def _process_file(self, file: Path) -> bool: - """Abstract method to be implemented by subclasses to handle file processing.""" + """Processes an individual file (implemented by concrete refactorers). + + Args: + file: Python file to process + + Returns: + True if the file was modified, False otherwise + """ pass diff --git a/src/ecooptimizer/refactorers/refactorer_controller.py b/src/ecooptimizer/refactorers/refactorer_controller.py index 214dd29d..9a5b6a11 100644 --- a/src/ecooptimizer/refactorers/refactorer_controller.py +++ b/src/ecooptimizer/refactorers/refactorer_controller.py @@ -1,33 +1,36 @@ +"""Controller for executing code smell refactoring operations.""" + # pyright: reportOptionalMemberAccess=false from pathlib import Path -from ..config import CONFIG - -from ..data_types.smell import Smell -from ..utils.smells_registry import get_refactorer +from ecooptimizer.config import CONFIG +from ecooptimizer.data_types.smell import Smell +from ecooptimizer.utils.smells_registry import get_refactorer class RefactorerController: + """Orchestrates refactoring operations for detected code smells.""" + def __init__(self): - """Manages the execution of refactorers for detected code smells.""" + """Initializes the controller with empty smell counters.""" self.smell_counters = {} def run_refactorer( self, target_file: Path, source_dir: Path, smell: Smell, overwrite: bool = True - ): - """Executes the appropriate refactorer for the given smell. + ) -> list[Path]: + """Executes the appropriate refactorer for a detected smell. Args: - target_file (Path): The file to be refactored. - source_dir (Path): The source directory containing the file. - smell (Smell): The detected smell to be refactored. - overwrite (bool, optional): Whether to overwrite existing files. Defaults to True. + target_file: File containing the smell to refactor + source_dir: Root directory of the source files + smell: Detected smell instance with metadata + overwrite: Whether to overwrite existing files Returns: - list[Path]: A list of modified files resulting from the refactoring process. + List of paths to all modified files Raises: - NotImplementedError: If no refactorer exists for the given smell. + NotImplementedError: If no refactorer exists for this smell type """ smell_id = smell.messageId smell_symbol = smell.symbol @@ -35,20 +38,32 @@ def run_refactorer( modified_files = [] if refactorer_class: - self.smell_counters[smell_id] = self.smell_counters.get(smell_id, 0) + 1 - file_count = self.smell_counters[smell_id] - - output_file_name = f"{target_file.stem}_path_{smell_id}_{file_count}.py" - output_file_path = Path(__file__).parent / "../../../outputs" / output_file_name + self._track_smell_occurrence(smell_id) + output_path = self._generate_output_path(target_file, smell_id) CONFIG["refactorLogger"].info( - f"🔄 Running refactoring for {smell_symbol} using {refactorer_class.__name__}" + f"🔄 Running {refactorer_class.__name__} for {smell_symbol}" ) + refactorer = refactorer_class() - refactorer.refactor(target_file, source_dir, smell, output_file_path, overwrite) + refactorer.refactor(target_file, source_dir, smell, output_path, overwrite) modified_files = refactorer.modified_files else: - CONFIG["refactorLogger"].error(f"❌ No refactorer found for smell: {smell_symbol}") - raise NotImplementedError(f"No refactorer implemented for smell: {smell_symbol}") + self._handle_missing_refactorer(smell_symbol) return modified_files + + def _track_smell_occurrence(self, smell_id: str) -> None: + """Increments counter for a specific smell type.""" + self.smell_counters[smell_id] = self.smell_counters.get(smell_id, 0) + 1 + + def _generate_output_path(self, target_file: Path, smell_id: str) -> Path: + """Generates output path for refactored file.""" + file_count = self.smell_counters[smell_id] + output_name = f"{target_file.stem}_path_{smell_id}_{file_count}.py" + return Path(__file__).parent / "../../../outputs" / output_name + + def _handle_missing_refactorer(self, smell_symbol: str) -> None: + """Logs error and raises exception for unimplemented refactorers.""" + CONFIG["refactorLogger"].error(f"❌ No refactorer for smell: {smell_symbol}") + raise NotImplementedError(f"No refactorer for smell: {smell_symbol}") diff --git a/src/ecooptimizer/utils/output_manager.py b/src/ecooptimizer/utils/output_manager.py index 8c2c1db1..73a110fb 100644 --- a/src/ecooptimizer/utils/output_manager.py +++ b/src/ecooptimizer/utils/output_manager.py @@ -1,3 +1,5 @@ +"""Logging and file output management utilities.""" + from enum import Enum import json import logging @@ -10,15 +12,32 @@ class EnumEncoder(json.JSONEncoder): + """Custom JSON encoder that handles Enum serialization.""" + def default(self, o): # noqa: ANN001 + """Converts Enum objects to their values for JSON serialization. + + Args: + o: Object to serialize + + Returns: + Serialized value for Enums, default JSON serialization for other types + """ if isinstance(o, Enum): - return o.value # Serialize using the Enum's value + return o.value return super().default(o) class LoggingManager: + """Manages log file setup and configuration for different application components.""" + def __init__(self, logs_dir: Path = DEV_OUTPUT / "logs", production: bool = False): - """Initializes log paths based on mode.""" + """Initializes logging directory structure and configures loggers. + + Args: + logs_dir: Directory to store log files + production: Whether to run in production mode + """ self.production = production self.logs_dir = logs_dir @@ -30,35 +49,33 @@ def __init__(self, logs_dir: Path = DEV_OUTPUT / "logs", production: bool = Fals } self._setup_loggers() - def _initialize_output_structure(self): - """Ensures required directories exist and clears old logs.""" + def _initialize_output_structure(self) -> None: + """Creates required directories and clears old logs if not in production.""" if not self.production: DEV_OUTPUT.mkdir(exist_ok=True) self.logs_dir.mkdir(exist_ok=True) - def _clear_logs(self): - """Removes existing log files while preserving the log directory.""" + def _clear_logs(self) -> None: + """Removes existing log files while preserving the log directory structure.""" if self.logs_dir.exists(): for log_file in self.logs_dir.iterdir(): if log_file.is_file(): log_file.unlink() logging.info("🗑️ Cleared existing log files.") - def _setup_loggers(self): - """Configures loggers for different EcoOptimizer processes.""" + def _setup_loggers(self) -> None: + """Configures root logger and component-specific loggers.""" logging.root.handlers.clear() - self._configure_root_logger() self.loggers = { "detect": self._create_child_logger("detect", self.log_files["detect"]), "refactor": self._create_child_logger("refactor", self.log_files["refactor"]), } - logging.info("📝 Loggers initialized successfully.") - def _configure_root_logger(self): - """Configures the root logger to capture all logs.""" + def _configure_root_logger(self) -> None: + """Sets up the root logger with file handler and formatting.""" root_logger = logging.getLogger() root_logger.setLevel(logging.DEBUG) @@ -71,25 +88,25 @@ def _configure_root_logger(self): root_logger.addHandler(main_handler) def _create_child_logger(self, name: str, log_file: Path) -> logging.Logger: - """ - Creates a child logger that logs to its own file and propagates to the root logger. + """Creates and configures a component-specific logger. Args: - name (str): Name of the logger. - log_file (Path): Path to the specific log file. + name: Logger name + log_file: Path to log file Returns: - logging.Logger: Configured logger instance. + Configured logger instance """ logger = logging.getLogger(name) logger.setLevel(logging.DEBUG) logger.propagate = True file_handler = logging.FileHandler(str(log_file), mode="a", encoding="utf-8") - formatter = logging.Formatter( - "%(asctime)s.%(msecs)03d [%(levelname)s] %(message)s", "%Y-%m-%d %H:%M:%S" + file_handler.setFormatter( + logging.Formatter( + "%(asctime)s.%(msecs)03d [%(levelname)s] %(message)s", "%Y-%m-%d %H:%M:%S" + ) ) - file_handler.setFormatter(formatter) file_handler.setLevel(logging.DEBUG) logger.addHandler(file_handler) @@ -97,8 +114,15 @@ def _create_child_logger(self, name: str, log_file: Path) -> logging.Logger: return logger -def save_file(file_name: str, data: str, mode: str, message: str = ""): - """Saves data to a file in the output directory.""" +def save_file(file_name: str, data: str, mode: str, message: str = "") -> None: + """Saves text data to a file in the output directory. + + Args: + file_name: Target filename + data: Content to write + mode: File open mode + message: Optional custom success message + """ file_path = DEV_OUTPUT / file_name with file_path.open(mode) as file: file.write(data) @@ -106,15 +130,28 @@ def save_file(file_name: str, data: str, mode: str, message: str = ""): logging.info(log_message) -def save_json_files(file_name: str, data: dict[Any, Any] | list[Any]): - """Saves data to a JSON file in the output directory.""" +def save_json_files(file_name: str, data: dict[Any, Any] | list[Any]) -> None: + """Saves data as JSON file in the output directory. + + Args: + file_name: Target filename + data: Serializable data to write + """ file_path = DEV_OUTPUT / file_name file_path.write_text(json.dumps(data, cls=EnumEncoder, sort_keys=True, indent=4)) logging.info(f"📝 {file_name} saved to {file_path!s} as JSON file") -def copy_file_to_output(source_file_path: Path, new_file_name: str): - """Copies a file to the output directory with a new name.""" +def copy_file_to_output(source_file_path: Path, new_file_name: str) -> Path: + """Copies a file to the output directory with a new name. + + Args: + source_file_path: Source file to copy + new_file_name: Destination filename + + Returns: + Path to the copied file + """ destination_path = DEV_OUTPUT / new_file_name shutil.copy(source_file_path, destination_path) logging.info(f"📝 {new_file_name} copied to {destination_path!s}") diff --git a/src/ecooptimizer/utils/smell_enums.py b/src/ecooptimizer/utils/smell_enums.py index 3661002e..22a25dcf 100644 --- a/src/ecooptimizer/utils/smell_enums.py +++ b/src/ecooptimizer/utils/smell_enums.py @@ -1,29 +1,45 @@ +"""Enums for code smell classification and identification.""" + from enum import Enum class ExtendedEnum(Enum): + """Base enum class with additional utility methods.""" + @classmethod def list(cls) -> list[str]: + """Returns all enum values as a list. + + Returns: + List of all enum values as strings + """ return [c.value for c in cls] def __eq__(self, value: object) -> bool: + """Compares enum value with string representation. + + Args: + value: Value to compare against + + Returns: + True if values match, False otherwise + """ return str(self.value) == value -# Enum class for standard Pylint code smells class PylintSmell(ExtendedEnum): - LONG_PARAMETER_LIST = "R0913" # Pylint code smell for functions with too many parameters - NO_SELF_USE = "R6301" # Pylint code smell for class methods that don't use any self calls - USE_A_GENERATOR = ( - "R1729" # Pylint code smell for unnecessary list comprehensions inside `any()` or `all()` - ) + """Standard code smells detected by Pylint.""" + + LONG_PARAMETER_LIST = "R0913" # Too many function parameters + NO_SELF_USE = "R6301" # Class methods not using self + USE_A_GENERATOR = "R1729" # Unnecessary list comprehensions in any()/all() -# Enum class for custom code smells not detected by Pylint class CustomSmell(ExtendedEnum): - LONG_MESSAGE_CHAIN = "LMC001" # Ast code smell for long message chains - UNUSED_VAR_OR_ATTRIBUTE = "UVA001" # Ast code smell for unused variable or attribute - LONG_ELEMENT_CHAIN = "LEC001" # Ast code smell for long element chains - LONG_LAMBDA_EXPR = "LLE001" # Ast code smell for long lambda expressions - STR_CONCAT_IN_LOOP = "SCL001" # Astroid code smell for string concatenation inside loops - CACHE_REPEATED_CALLS = "CRC001" # Ast code smell for repeated calls + """Custom code smells not detected by standard Pylint.""" + + LONG_MESSAGE_CHAIN = "LMC001" # Excessive method chaining + LONG_ELEMENT_CHAIN = "LEC001" # Excessive dictionary/object chaining + LONG_LAMBDA_EXPR = "LLE001" # Overly complex lambda expressions + STR_CONCAT_IN_LOOP = "SCL001" # Inefficient string concatenation in loops + CACHE_REPEATED_CALLS = "CRC001" # Repeated expensive function calls diff --git a/src/ecooptimizer/utils/smells_registry.py b/src/ecooptimizer/utils/smells_registry.py index 0de8fe82..b524c6ee 100644 --- a/src/ecooptimizer/utils/smells_registry.py +++ b/src/ecooptimizer/utils/smells_registry.py @@ -1,24 +1,29 @@ -from copy import deepcopy -from .smell_enums import CustomSmell, PylintSmell - -from ..analyzers.ast_analyzers.detect_long_element_chain import detect_long_element_chain -from ..analyzers.ast_analyzers.detect_long_lambda_expression import detect_long_lambda_expression -from ..analyzers.ast_analyzers.detect_long_message_chain import detect_long_message_chain -from ..analyzers.astroid_analyzers.detect_string_concat_in_loop import detect_string_concat_in_loop -from ..analyzers.ast_analyzers.detect_repeated_calls import detect_repeated_calls +"""Registry of code smells with their detection and refactoring configurations.""" -from ..refactorers.concrete.list_comp_any_all import UseAGeneratorRefactorer - -from ..refactorers.concrete.long_lambda_function import LongLambdaFunctionRefactorer -from ..refactorers.concrete.long_element_chain import LongElementChainRefactorer -from ..refactorers.concrete.long_message_chain import LongMessageChainRefactorer -from ..refactorers.concrete.member_ignoring_method import MakeStaticRefactorer -from ..refactorers.concrete.long_parameter_list import LongParameterListRefactorer -from ..refactorers.concrete.str_concat_in_loop import UseListAccumulationRefactorer -from ..refactorers.concrete.repeated_calls import CacheRepeatedCallsRefactorer +from copy import deepcopy +from typing import Any -from ..data_types.smell_record import SmellRecord +from ecooptimizer.utils.smell_enums import CustomSmell, PylintSmell +from ecooptimizer.analyzers.ast_analyzers.detect_long_element_chain import detect_long_element_chain +from ecooptimizer.analyzers.ast_analyzers.detect_long_lambda_expression import ( + detect_long_lambda_expression, +) +from ecooptimizer.analyzers.ast_analyzers.detect_long_message_chain import detect_long_message_chain +from ecooptimizer.analyzers.astroid_analyzers.detect_string_concat_in_loop import ( + detect_string_concat_in_loop, +) +from ecooptimizer.analyzers.ast_analyzers.detect_repeated_calls import detect_repeated_calls +from ecooptimizer.refactorers.concrete.list_comp_any_all import UseAGeneratorRefactorer +from ecooptimizer.refactorers.concrete.long_lambda_function import LongLambdaFunctionRefactorer +from ecooptimizer.refactorers.concrete.long_element_chain import LongElementChainRefactorer +from ecooptimizer.refactorers.concrete.long_message_chain import LongMessageChainRefactorer +from ecooptimizer.refactorers.concrete.member_ignoring_method import MakeStaticRefactorer +from ecooptimizer.refactorers.concrete.long_parameter_list import LongParameterListRefactorer +from ecooptimizer.refactorers.concrete.str_concat_in_loop import UseListAccumulationRefactorer +from ecooptimizer.refactorers.concrete.repeated_calls import CacheRepeatedCallsRefactorer +from ecooptimizer.data_types.smell_record import SmellRecord +# Base registry of all supported code smells _SMELL_REGISTRY: dict[str, SmellRecord] = { "use-a-generator": { "id": PylintSmell.USE_A_GENERATOR.value, @@ -88,13 +93,73 @@ }, } +# Default configuration values for smell detection +OPTIONS_CONFIG = { + "too-many-arguments": {"max_args": 6}, + "long-lambda-expression": {"threshold_length": 100, "threshold_count": 5}, + "long-message-chain": {"threshold": 3}, + "long-element-chain": {"threshold": 3}, + "cached-repeated-calls": {"threshold": 2}, +} + + +def retrieve_smell_registry(enabled_smells: dict[str, dict[str, int | str]] | list[str]): + """Returns a modified smell registry based on user preferences. + + Args: + enabled_smells: Either a list of enabled smell names or a dictionary + with smell-specific configurations + + Returns: + Dictionary containing only enabled smells with updated configurations + """ + updated_registry = deepcopy(_SMELL_REGISTRY) + + if isinstance(enabled_smells, list): + return { + smell_name: config + for smell_name, config in updated_registry.items() + if smell_name in enabled_smells + } + + # Handle dictionary configuration + for smell_name, smell_config in updated_registry.items(): + if smell_name in enabled_smells: + smell_config["enabled"] = True + user_options = enabled_smells[smell_name] + if not user_options: + continue + + analyzer_method = smell_config["analyzer_method"] + original_options = smell_config["analyzer_options"] + + if analyzer_method == "pylint": + updated_options = {} + for opt_key, opt_data in original_options.items(): + if opt_key in user_options: + updated_options[opt_key] = { + "flag": opt_data["flag"], + "value": user_options[opt_key], + } + else: + updated_options[opt_key] = opt_data + smell_config["analyzer_options"] = updated_options + else: + # Merge user options with defaults for non-Pylint smells + smell_config["analyzer_options"] = {**original_options, **user_options} + else: + smell_config["enabled"] = False + + return updated_registry + -def retrieve_smell_registry(enabled_smells: list[str] | str): - """Returns a modified SMELL_REGISTRY based on user preferences (enables/disables smells).""" - if enabled_smells == "ALL": - return deepcopy(_SMELL_REGISTRY) - return {key: val for (key, val) in _SMELL_REGISTRY.items() if key in enabled_smells} +def get_refactorer(symbol: str) -> Any: # noqa: ANN401 + """Retrieves the refactorer class for a given smell symbol. + Args: + symbol: The smell identifier (e.g., "long-lambda-expression") -def get_refactorer(symbol: str): - return _SMELL_REGISTRY[symbol].get("refactorer", None) + Returns: + The refactorer class associated with the smell, or None if not found + """ + return _SMELL_REGISTRY.get(symbol, {}).get("refactorer") diff --git a/tests/_input_copies/test_2_copy.py b/tests/_input_copies/test_2_copy.py deleted file mode 100644 index 4d1f853d..00000000 --- a/tests/_input_copies/test_2_copy.py +++ /dev/null @@ -1,105 +0,0 @@ -import datetime # unused import - - -class Temp: - - def __init__(self) -> None: - self.unused_class_attribute = True - self.a = 3 - - def temp_function(self): - unused_var = 3 - b = 4 - return self.a + b - - -# LC: Large Class with too many responsibilities -class DataProcessor: - def __init__(self, data): - self.data = data - self.processed_data = [] - - # LM: Long Method - this method does way too much - def process_all_data(self): - results = [] - for item in self.data: - try: - # LPL: Long Parameter List - result = self.complex_calculation( - item, True, False, "multiply", 10, 20, None, "end" - ) - results.append(result) - except ( - Exception - ) as e: # UEH: Unqualified Exception Handling, catching generic exceptions - print("An error occurred:", e) - - # LMC: Long Message Chain - print(self.data[0].upper().strip().replace(" ", "_").lower()) - - # LLF: Long Lambda Function - self.processed_data = list( - filter(lambda x: x != None and x != 0 and len(str(x)) > 1, results) - ) - - return self.processed_data - - # LBCL: Long Base Class List - - -class AdvancedProcessor(DataProcessor): - pass - - # LTCE: Long Ternary Conditional Expression - def check_data(self, item): - return ( - True if item > 10 else False if item < -10 else None if item == 0 else item - ) - - # Complex List Comprehension - def complex_comprehension(self): - # CLC: Complex List Comprehension - self.processed_data = [ - x**2 if x % 2 == 0 else x**3 - for x in range(1, 100) - if x % 5 == 0 and x != 50 and x > 3 - ] - - # Long Element Chain - def long_chain(self): - # LEC: Long Element Chain accessing deeply nested elements - try: - deep_value = self.data[0][1]["details"]["info"]["more_info"][2]["target"] - return deep_value - except KeyError: - return None - - # Long Scope Chaining (LSC) - def long_scope_chaining(self): - for a in range(10): - for b in range(10): - for c in range(10): - for d in range(10): - for e in range(10): - if a + b + c + d + e > 25: - return "Done" - - # LPL: Long Parameter List - def complex_calculation( - self, item, flag1, flag2, operation, threshold, max_value, option, final_stage - ): - if operation == "multiply": - result = item * threshold - elif operation == "add": - result = item + max_value - else: - result = item - return result - - -# Main method to execute the code -if __name__ == "__main__": - sample_data = [1, 2, 3, 4, 5] - processor = DataProcessor(sample_data) - processed = processor.process_all_data() - print("Processed Data:", processed) diff --git a/tests/api/test_detect_route.py b/tests/api/test_detect_route.py index 150f94b9..6f3ead15 100644 --- a/tests/api/test_detect_route.py +++ b/tests/api/test_detect_route.py @@ -1,8 +1,8 @@ -from pathlib import Path from fastapi.testclient import TestClient from unittest.mock import patch from ecooptimizer.api.app import app +from ecooptimizer.api.error_handler import AppError from ecooptimizer.data_types import Smell from ecooptimizer.data_types.custom_fields import Occurence @@ -33,7 +33,10 @@ def get_mock_smell(): def test_detect_smells_success(): request_data = { "file_path": "fake_path.py", - "enabled_smells": ["smell1", "smell2"], + "enabled_smells": { + "smell1": {"threshold": 3}, + "smell2": {"threshold": 4}, + }, } with patch("pathlib.Path.exists", return_value=True): @@ -51,31 +54,34 @@ def test_detect_smells_success(): def test_detect_smells_file_not_found(): request_data = { "file_path": "path/to/nonexistent/file.py", - "enabled_smells": ["smell1", "smell2"], + "enabled_smells": { + "smell1": {"threshold": 3}, + "smell2": {"threshold": 4}, + }, } response = client.post("/smells", json=request_data) assert response.status_code == 404 - assert ( - response.json()["detail"] - == f"File not found: {Path('path','to','nonexistent','file.py')!s}" - ) + assert "File not found" in response.json()["detail"] def test_detect_smells_internal_server_error(): request_data = { "file_path": "fake_path.py", - "enabled_smells": ["smell1", "smell2"], + "enabled_smells": { + "smell1": {"threshold": 3}, + "smell2": {"threshold": 4}, + }, } with patch("pathlib.Path.exists", return_value=True): with patch( "ecooptimizer.analyzers.analyzer_controller.AnalyzerController.run_analysis" ) as mock_run_analysis: - mock_run_analysis.side_effect = Exception("Internal error") + mock_run_analysis.side_effect = AppError("Internal error") response = client.post("/smells", json=request_data) assert response.status_code == 500 - assert response.json()["detail"] == "Internal server error" + assert response.json()["detail"] == "Internal error" diff --git a/tests/api/test_refactor_route.py b/tests/api/test_refactor_route.py index 79a81155..c1049460 100644 --- a/tests/api/test_refactor_route.py +++ b/tests/api/test_refactor_route.py @@ -1,6 +1,5 @@ -# ruff: noqa: PT004 +# ruff: noqa: PT004, ARG001 import pytest - import shutil from pathlib import Path from typing import Any @@ -8,34 +7,38 @@ from fastapi.testclient import TestClient from unittest.mock import patch - from ecooptimizer.api.app import app +from ecooptimizer.api.error_handler import AppError +from ecooptimizer.api.routes.refactor_smell import perform_refactoring from ecooptimizer.analyzers.analyzer_controller import AnalyzerController +from ecooptimizer.data_types.custom_fields import Occurence +from ecooptimizer.data_types.smell import Smell from ecooptimizer.refactorers.refactorer_controller import RefactorerController client = TestClient(app) -SAMPLE_SMELL = { - "confidence": "UNKNOWN", - "message": "This is a message", - "messageId": "smellID", - "module": "module", - "obj": "obj", - "path": "fake_path.py", - "symbol": "smell-symbol", - "type": "type", - "occurences": [ - { - "line": 9, - "endLine": 999, - "column": 999, - "endColumn": 999, - } +SAMPLE_SMELL_MODEL = Smell( + confidence="UNKNOWN", + message="This is a message", + messageId="smellID", + module="module", + obj="obj", + path=str(Path("path/to/source_dir/fake_path.py").absolute()), + symbol="smell-symbol", + type="type", + occurences=[ + Occurence( + line=9, + endLine=999, + column=999, + endColumn=999, + ) ], -} +) -SAMPLE_SOURCE_DIR = "path\\to\\source_dir" +SAMPLE_SMELL = SAMPLE_SMELL_MODEL.model_dump() +SAMPLE_SOURCE_DIR = str(Path("path/to/source_dir").absolute()) @pytest.fixture(scope="module") @@ -43,115 +46,305 @@ def mock_dependencies() -> Generator[None, Any, None]: """Fixture to mock all dependencies for the /refactor route.""" with ( patch.object(Path, "is_dir"), + patch.object(Path, "exists"), patch.object(shutil, "copytree"), patch.object(shutil, "rmtree"), patch.object( RefactorerController, "run_refactorer", return_value=[ - Path("path/to/modified_file_1.py"), - Path("path/to/modified_file_2.py"), + Path("path/to/modified_file_1.py").absolute(), + Path("path/to/modified_file_2.py").absolute(), ], ), - patch.object(AnalyzerController, "run_analysis", return_value=[SAMPLE_SMELL]), - patch("tempfile.mkdtemp", return_value="/fake/temp/dir"), + patch.object(AnalyzerController, "run_analysis"), + patch( + "ecooptimizer.api.routes.refactor_smell.mkdtemp", + return_value="/fake/temp/dir", + ), ): yield -def test_refactor_success(mock_dependencies): # noqa: ARG001 - """Test the /refactor route with a successful refactoring process.""" - Path.is_dir.return_value = True # type: ignore +@pytest.fixture +def mock_refactor_success(): + """Fixture for successful refactor operations.""" + with ( + patch.object(Path, "is_dir", return_value=True), + patch.object(Path, "exists", return_value=True), + patch("ecooptimizer.api.routes.refactor_smell.measure_energy", side_effect=[10.0, 5.0]), + patch.object( + RefactorerController, + "run_refactorer", + return_value=[ + Path("path/to/modified_file_1.py").absolute(), + Path("path/to/modified_file_2.py").absolute(), + ], + ), + patch.object(Path, "relative_to", return_value=Path("fake_path.py")), + ): + yield - with patch("ecooptimizer.api.routes.refactor_smell.measure_energy", side_effect=[10.0, 5.0]): - request_data = { - "source_dir": SAMPLE_SOURCE_DIR, - "smell": SAMPLE_SMELL, - } - response = client.post("/refactor", json=request_data) +def test_refactor_target_file_not_found(mock_dependencies): + """Test the /refactor route when the source directory does not exist.""" + Path.exists.return_value = False # type: ignore - assert response.status_code == 200 - assert "refactoredData" in response.json() - assert "updatedSmells" in response.json() - assert len(response.json()["updatedSmells"]) == 1 + request_data = { + "sourceDir": SAMPLE_SOURCE_DIR, + "smell": SAMPLE_SMELL, + } + response = client.post("/refactor", json=request_data) + + assert response.status_code == 404 + assert "File not found" in response.json()["detail"] -def test_refactor_source_dir_not_found(mock_dependencies): # noqa: ARG001 + +def test_refactor_source_dir_not_found(mock_dependencies): """Test the /refactor route when the source directory does not exist.""" + Path.exists.return_value = True # type: ignore Path.is_dir.return_value = False # type: ignore request_data = { - "source_dir": SAMPLE_SOURCE_DIR, + "sourceDir": SAMPLE_SOURCE_DIR, "smell": SAMPLE_SMELL, } response = client.post("/refactor", json=request_data) assert response.status_code == 404 - assert f"Directory {SAMPLE_SOURCE_DIR} does not exist" in response.json()["detail"] + assert "Folder not found" in response.json()["detail"] -def test_refactor_energy_not_saved(mock_dependencies): # noqa: ARG001 +@patch("ecooptimizer.api.routes.refactor_smell.measure_energy", side_effect=[10.0, 15.0]) +def test_refactor_energy_not_saved(mock_measure, mock_dependencies, mock_refactor_success): """Test the /refactor route when no energy is saved after refactoring.""" - Path.is_dir.return_value = True # type: ignore - - with patch("ecooptimizer.api.routes.refactor_smell.measure_energy", side_effect=[10.0, 15.0]): - request_data = { - "source_dir": SAMPLE_SOURCE_DIR, - "smell": SAMPLE_SMELL, - } + request_data = { + "sourceDir": SAMPLE_SOURCE_DIR, + "smell": SAMPLE_SMELL, + } - response = client.post("/refactor", json=request_data) + response = client.post("/refactor", json=request_data) - assert response.status_code == 400 - assert "Energy was not saved" in response.json()["detail"] + assert response.status_code == 400 + assert "Energy was not saved" in response.json()["detail"] -def test_refactor_initial_energy_not_retrieved(mock_dependencies): # noqa: ARG001 +@patch("ecooptimizer.api.routes.refactor_smell.measure_energy", return_value=None) +def test_refactor_initial_energy_not_retrieved(mock_measure, mock_dependencies): """Test the /refactor route when no energy is saved after refactoring.""" Path.is_dir.return_value = True # type: ignore + Path.exists.return_value = True # type: ignore - with patch("ecooptimizer.api.routes.refactor_smell.measure_energy", return_value=None): - request_data = { - "source_dir": SAMPLE_SOURCE_DIR, - "smell": SAMPLE_SMELL, - } + request_data = { + "sourceDir": SAMPLE_SOURCE_DIR, + "smell": SAMPLE_SMELL, + } - response = client.post("/refactor", json=request_data) + response = client.post("/refactor", json=request_data) - assert response.status_code == 400 - assert "Could not retrieve initial emissions" in response.json()["detail"] + assert response.status_code == 400 + assert "Could not retrieve emissions" in response.json()["detail"] -def test_refactor_final_energy_not_retrieved(mock_dependencies): # noqa: ARG001 +@patch("ecooptimizer.api.routes.refactor_smell.measure_energy", side_effect=[10.0, None]) +def test_refactor_final_energy_not_retrieved(mock_measure, mock_dependencies): """Test the /refactor route when no energy is saved after refactoring.""" Path.is_dir.return_value = True # type: ignore - with patch("ecooptimizer.api.routes.refactor_smell.measure_energy", side_effect=[10.0, None]): - request_data = { - "source_dir": SAMPLE_SOURCE_DIR, - "smell": SAMPLE_SMELL, - } + request_data = { + "sourceDir": SAMPLE_SOURCE_DIR, + "smell": SAMPLE_SMELL, + } - response = client.post("/refactor", json=request_data) + response = client.post("/refactor", json=request_data) - assert response.status_code == 400 - assert "Could not retrieve final emissions" in response.json()["detail"] + assert response.status_code == 400 + assert "Could not retrieve emissions" in response.json()["detail"] -def test_refactor_unexpected_error(mock_dependencies): # noqa: ARG001 +@patch("ecooptimizer.api.routes.refactor_smell.measure_energy", return_value=10.0) +def test_refactor_unexpected_error(mock_measure, mock_dependencies): """Test the /refactor route when an unexpected error occurs during refactoring.""" Path.is_dir.return_value = True # type: ignore RefactorerController.run_refactorer.side_effect = Exception("Mock error") # type: ignore - with patch("ecooptimizer.api.routes.refactor_smell.measure_energy", return_value=10.0): - request_data = { - "source_dir": SAMPLE_SOURCE_DIR, - "smell": SAMPLE_SMELL, - } + request_data = { + "sourceDir": SAMPLE_SOURCE_DIR, + "smell": SAMPLE_SMELL, + } + + response = client.post("/refactor", json=request_data) + + assert response.status_code == 500 + assert "Mock error" == response.json()["detail"] + + +def test_refactor_success(mock_dependencies, mock_refactor_success): + """Test the /refactor route with a successful refactoring process.""" + request_data = { + "sourceDir": SAMPLE_SOURCE_DIR, + "smell": SAMPLE_SMELL, + } + + response = client.post("/refactor", json=request_data) + + assert response.status_code == 200 + assert set(response.json().keys()) == { + "tempDir", + "targetFile", + "energySaved", + "affectedFiles", + } + + +@patch("ecooptimizer.api.routes.refactor_smell.measure_energy", side_effect=[15, 10, 8]) +@patch.object(AnalyzerController, "run_analysis") +def test_refactor_by_type_success( + mock_run_analysis, mock_measure, mock_dependencies, mock_refactor_success +): + """Test the /refactor-by-type endpoint with successful refactoring.""" + mock_run_analysis.side_effect = [[SAMPLE_SMELL_MODEL], []] + request_data = { + "sourceDir": SAMPLE_SOURCE_DIR, + "smellType": "type", + "firstSmell": SAMPLE_SMELL, + } + + response = client.post("/refactor-by-type", json=request_data) + + assert response.status_code == 200 + assert set(response.json().keys()) == { + "tempDir", + "targetFile", + "energySaved", + "affectedFiles", + } + assert response.json()["energySaved"] == 7 + + +@patch("ecooptimizer.api.routes.refactor_smell.measure_energy", side_effect=[15, 10, 8, 6]) +@patch.object(AnalyzerController, "run_analysis") +def test_refactor_by_type_multiple_smells( + mock_run_analysis, mock_measure, mock_dependencies, mock_refactor_success +): + """Test /refactor-by-type with multiple smells of same type.""" + mock_run_analysis.side_effect = [[SAMPLE_SMELL_MODEL], [SAMPLE_SMELL_MODEL], []] + request_data = { + "sourceDir": SAMPLE_SOURCE_DIR, + "smellType": "type", + "firstSmell": SAMPLE_SMELL, + } + + response = client.post("/refactor-by-type", json=request_data) + + assert response.status_code == 200 + assert response.json()["energySaved"] == 9.0 + + +@patch("ecooptimizer.api.routes.refactor_smell.measure_energy", return_value=None) +def test_refactor_by_type_initial_energy_failure( + mock_measure, mock_dependencies, mock_refactor_success +): + """Test /refactor-by-type when initial energy measurement fails.""" + Path.exists.return_value = True # type: ignore + Path.is_dir.return_value = True # type: ignore - response = client.post("/refactor", json=request_data) + request_data = { + "sourceDir": SAMPLE_SOURCE_DIR, + "smellType": "type", + "firstSmell": SAMPLE_SMELL, + } + + response = client.post("/refactor-by-type", json=request_data) + + assert response.status_code == 400 + assert "Could not retrieve emissions" in response.json()["detail"] + + +@patch.object(Path, "is_dir", return_value=False) +def test_refactor_by_type_source_dir_not_found(mock_isdir): + """Test /refactor-by-type when source directory doesn't exist.""" + Path.exists.return_value = True # type: ignore + + request_data = { + "sourceDir": SAMPLE_SOURCE_DIR, + "smellType": "type", + "firstSmell": SAMPLE_SMELL, + } + + response = client.post("/refactor-by-type", json=request_data) + + assert response.status_code == 404 + assert "Folder not found" in response.json()["detail"] + + +@patch.object(RefactorerController, "run_refactorer") +def test_refactor_by_type_refactoring_error( + mock_run_refactor, + mock_dependencies, + mock_refactor_success, +): + """Test /refactor-by-type when refactoring fails.""" + mock_run_refactor.side_effect = AppError("Refactoring failed") + request_data = { + "sourceDir": SAMPLE_SOURCE_DIR, + "smellType": "type", + "firstSmell": SAMPLE_SMELL, + } + + response = client.post("/refactor-by-type", json=request_data) + + assert response.status_code == 500 + assert "Refactoring failed" in response.json()["detail"] + + +@patch("ecooptimizer.api.routes.refactor_smell.measure_energy", side_effect=[10.0, 15.0]) +def test_refactor_by_type_no_energy_saved(mock_measure, mock_dependencies, mock_refactor_success): + """Test /refactor-by-type when no energy is saved.""" + request_data = { + "sourceDir": SAMPLE_SOURCE_DIR, + "smellType": "type", + "firstSmell": SAMPLE_SMELL, + } - assert response.status_code == 400 - assert "Mock error" in response.json()["detail"] + response = client.post("/refactor-by-type", json=request_data) + + assert response.status_code == 400 + assert "Energy was not saved" in response.json()["detail"] + + +@patch("ecooptimizer.api.routes.refactor_smell.measure_energy", side_effect=[5.0]) +@patch.object(RefactorerController, "run_refactorer", return_value=[Path("modified_file.py")]) +@patch("shutil.copytree") +@patch("ecooptimizer.api.routes.refactor_smell.mkdtemp", return_value="/fake/temp/dir") +def test_perform_refactoring_success( + mock_mkdtemp, mock_copytree, mock_run_refactorer, mock_measure +): + """Test the perform_refactoring helper function.""" + source_dir = Path(SAMPLE_SOURCE_DIR) + smell = SAMPLE_SMELL_MODEL + result = perform_refactoring(source_dir, smell, 10.0) + + assert result.energySaved == 5.0 + mock_mkdtemp.assert_called_once_with(prefix="ecooptimizer-") + assert result.tempDir == str(Path("/fake/temp/dir")) + assert len(result.affectedFiles) == 1 + + +@patch("ecooptimizer.api.routes.refactor_smell.measure_energy", side_effect=[5]) +@patch.object(RefactorerController, "run_refactorer", return_value=[Path("modified_file.py")]) +@patch.object(shutil, "copytree") +def test_perform_refactoring_with_existing_temp_dir( + mock_copytree, mock_run_refactorer, mock_measure +): + """Test perform_refactoring with an existing temp directory.""" + source_dir = Path(SAMPLE_SOURCE_DIR) + smell = SAMPLE_SMELL_MODEL + existing_dir = Path("/existing/temp/dir") + result = perform_refactoring(source_dir, smell, 10.0, existing_dir) + + assert result.energySaved == 5.0 + assert result.tempDir == str(Path("/existing/temp/dir")) + assert len(result.affectedFiles) == 1 diff --git a/tests/benchmarking/test_code/1000_sample.py b/tests/benchmarking/test_code/1000_sample.py index bb59ba9d..552088e1 100644 --- a/tests/benchmarking/test_code/1000_sample.py +++ b/tests/benchmarking/test_code/1000_sample.py @@ -116,8 +116,7 @@ def do_god_knows_what(): mystring = "i hate capstone" n = 10 - for i in range(n): - b = 10 + for _ in range(n): mystring += "word" return n @@ -170,7 +169,6 @@ def __init__( def display_info(self): # Code Smell: Long Message Chain - random_test = self.make.split("") print( f"Make: {self.make}, Model: {self.model}, Year: {self.year}".upper().replace(",", "")[ ::2 @@ -234,31 +232,31 @@ def longestArithSeqLength5(A: list[int]) -> int: class Calculator: - def add(sum): + def add(self, sum): a = int(input("Enter number 1: ")) b = int(input("Enter number 2: ")) sum = a + b print("The addition of two numbers:", sum) - def mul(mul): + def mul(self, mul): a = int(input("Enter number 1: ")) b = int(input("Enter number 2: ")) mul = a * b print("The multiplication of two numbers:", mul) - def sub(sub): + def sub(self, sub): a = int(input("Enter number 1: ")) b = int(input("Enter number 2: ")) sub = a - b print("The subtraction of two numbers:", sub) - def div(div): + def div(self, div): a = int(input("Enter number 1: ")) b = int(input("Enter number 2: ")) div = a / b print("The division of two numbers: ", div) - def exp(exp): + def exp(self, exp): a = int(input("Enter number 1: ")) b = int(input("Enter number 2: ")) exp = a**b diff --git a/tests/benchmarking/test_code/250_sample.py b/tests/benchmarking/test_code/250_sample.py index d549d726..3871d37b 100644 --- a/tests/benchmarking/test_code/250_sample.py +++ b/tests/benchmarking/test_code/250_sample.py @@ -116,8 +116,7 @@ def do_god_knows_what(): mystring = "i hate capstone" n = 10 - for i in range(n): - b = 10 + for _ in range(n): mystring += "word" return n @@ -170,7 +169,6 @@ def __init__( def display_info(self): # Code Smell: Long Message Chain - random_test = self.make.split("") print( f"Make: {self.make}, Model: {self.model}, Year: {self.year}".upper().replace(",", "")[ ::2 diff --git a/tests/benchmarking/test_code/3000_sample.py b/tests/benchmarking/test_code/3000_sample.py index f8faab14..481be544 100644 --- a/tests/benchmarking/test_code/3000_sample.py +++ b/tests/benchmarking/test_code/3000_sample.py @@ -116,8 +116,7 @@ def do_god_knows_what(): mystring = "i hate capstone" n = 10 - for i in range(n): - b = 10 + for _ in range(n): mystring += "word" return n @@ -170,7 +169,6 @@ def __init__( def display_info(self): # Code Smell: Long Message Chain - random_test = self.make.split("") print( f"Make: {self.make}, Model: {self.model}, Year: {self.year}".upper().replace(",", "")[ ::2 @@ -234,31 +232,31 @@ def longestArithSeqLength5(A: list[int]) -> int: class Calculator: - def add(sum): + def add(self, sum): a = int(input("Enter number 1: ")) b = int(input("Enter number 2: ")) sum = a + b print("The addition of two numbers:", sum) - def mul(mul): + def mul(self, mul): a = int(input("Enter number 1: ")) b = int(input("Enter number 2: ")) mul = a * b print("The multiplication of two numbers:", mul) - def sub(sub): + def sub(self, sub): a = int(input("Enter number 1: ")) b = int(input("Enter number 2: ")) sub = a - b print("The subtraction of two numbers:", sub) - def div(div): + def div(self, div): a = int(input("Enter number 1: ")) b = int(input("Enter number 2: ")) div = a / b print("The division of two numbers: ", div) - def exp(exp): + def exp(self, exp): a = int(input("Enter number 1: ")) b = int(input("Enter number 2: ")) exp = a**b @@ -266,19 +264,19 @@ def exp(exp): class rootop: - def sqrt(): + def sqrt(self): a = int(input("Enter number 1: ")) b = int(input("Enter number 2: ")) print(math.sqrt(a)) print(math.sqrt(b)) - def cbrt(): + def cbrt(self): a = int(input("Enter number 1: ")) b = int(input("Enter number 2: ")) print(a ** (1 / 3)) print(b ** (1 / 3)) - def ranroot(): + def ranroot(self): a = int(input("Enter the x: ")) b = int(input("Enter the y: ")) b_div = 1 / b diff --git a/tests/controllers/test_analyzer_controller.py b/tests/controllers/test_analyzer_controller.py index e2d782dc..b6aa5b38 100644 --- a/tests/controllers/test_analyzer_controller.py +++ b/tests/controllers/test_analyzer_controller.py @@ -1,14 +1,42 @@ import textwrap import pytest -from unittest.mock import Mock +from unittest.mock import Mock, patch +from pathlib import Path + from ecooptimizer.analyzers.analyzer_controller import AnalyzerController +from ecooptimizer.analyzers.ast_analyzer import ASTAnalyzer from ecooptimizer.analyzers.ast_analyzers.detect_repeated_calls import detect_repeated_calls from ecooptimizer.data_types.custom_fields import CRCInfo, Occurence +from ecooptimizer.data_types.smell import Smell, CRCSmell +from ecooptimizer.data_types.smell_record import SmellRecord from ecooptimizer.refactorers.concrete.repeated_calls import CacheRepeatedCallsRefactorer -from ecooptimizer.refactorers.concrete.long_element_chain import LongElementChainRefactorer -from ecooptimizer.refactorers.concrete.list_comp_any_all import UseAGeneratorRefactorer -from ecooptimizer.refactorers.concrete.str_concat_in_loop import UseListAccumulationRefactorer -from ecooptimizer.data_types.smell import CRCSmell +from ecooptimizer.refactorers.base_refactorer import BaseRefactorer +from ecooptimizer.utils.smell_enums import CustomSmell + + +# Create proper mock refactorer classes with type parameters +class MockRefactorer(BaseRefactorer[CRCSmell]): + def refactor( + self, + target_file: Path, + source_dir: Path, + smell: CRCSmell, + output_file: Path, + overwrite: bool = True, + ): + pass + + +class MockGenericRefactorer(BaseRefactorer[Smell]): + def refactor( + self, + target_file: Path, + source_dir: Path, + smell: Smell, + output_file: Path, + overwrite: bool = True, + ): + pass @pytest.fixture @@ -24,7 +52,7 @@ def mock_crc_smell(): return CRCSmell( confidence="MEDIUM", message="Repeated function call detected (2/2). Consider caching the result: expensive_function(42)", - messageId="CRC001", + messageId=CustomSmell.CACHE_REPEATED_CALLS.value, module="main", obj=None, path="/path/to/test.py", @@ -38,7 +66,7 @@ def mock_crc_smell(): ) -def test_run_analysis_detects_crc_smell(mocker, mock_logger, tmp_path): +def test_run_analysis_detects_crc_smell(mocker, tmp_path): """Ensures the analyzer correctly detects CRC smells.""" test_file = tmp_path / "test.py" test_file.write_text( @@ -49,93 +77,105 @@ def test_case(): """) ) - mocker.patch( - "ecooptimizer.utils.smells_registry.retrieve_smell_registry", - return_value={ - "cached-repeated-calls": SmellRecord( - id="CRC001", - enabled=True, - analyzer_method="ast", - checker=detect_repeated_calls, - analyzer_options={"threshold": 2}, - refactorer=CacheRepeatedCallsRefactorer, - ) - }, + # Create a mock smell that would be returned by the analyzer + mock_smell = CRCSmell( + confidence="HIGH", + message="Repeated function call detected (2/2). Consider caching the result: expensive_function(42)", + messageId=CustomSmell.CACHE_REPEATED_CALLS.value, + module="test", + obj=None, + path=str(test_file), + symbol="cached-repeated-calls", + type="performance", + occurences=[ + Occurence(line=2, endLine=2, column=14, endColumn=36), + Occurence(line=3, endLine=3, column=14, endColumn=36), + ], + additionalInfo=CRCInfo(callString="expensive_function(42)", repetitions=2), ) - controller = AnalyzerController() - smells = controller.run_analysis(test_file) + # Mock the AST analyzer to return our mock smell + mock_ast_analyzer = mocker.patch.object(ASTAnalyzer, "analyze") + mock_ast_analyzer.return_value = [mock_smell] + + mock_registry = { + "cached-repeated-calls": SmellRecord( + id=CustomSmell.CACHE_REPEATED_CALLS.value, + enabled=True, + analyzer_method="ast", + checker=detect_repeated_calls, + analyzer_options={"threshold": 2}, + refactorer=CacheRepeatedCallsRefactorer, + ) + } + + with patch( + "ecooptimizer.utils.smells_registry.retrieve_smell_registry", return_value=mock_registry + ): + controller = AnalyzerController() + smells = controller.run_analysis(test_file, enabled_smells=["cached-repeated-calls"]) - print("Detected smells:", smells) - assert len(smells) == 1 - assert isinstance(smells[0], CRCSmell) - assert smells[0].additionalInfo.callString == "expensive_function(42)" - mock_logger.info.assert_any_call("⚠️ Detected Code Smells:") + assert len(smells) == 1 + assert isinstance(smells[0], Smell) + assert smells[0].symbol == "cached-repeated-calls" + assert smells[0].messageId == CustomSmell.CACHE_REPEATED_CALLS.value -def test_run_analysis_no_crc_smells_detected(mocker, mock_logger, tmp_path): +def test_run_analysis_no_crc_smells_detected(mocker, tmp_path): """Ensures the analyzer logs properly when no CRC smells are found.""" test_file = tmp_path / "test.py" test_file.write_text("print('No smells here')") - mocker.patch( - "ecooptimizer.utils.smells_registry.retrieve_smell_registry", - return_value={ - "cached-repeated-calls": SmellRecord( - id="CRC001", - enabled=True, - analyzer_method="ast", - checker=detect_repeated_calls, - analyzer_options={"threshold": 2}, - refactorer=CacheRepeatedCallsRefactorer, - ) - }, - ) - - controller = AnalyzerController() - smells = controller.run_analysis(test_file) + # Mock the AST analyzer to return no smells + mock_ast_analyzer = mocker.patch.object(ASTAnalyzer, "analyze") + mock_ast_analyzer.return_value = [] - assert smells == [] - mock_logger.info.assert_called_with("🎉 No code smells detected.") + mock_registry = { + "cached-repeated-calls": SmellRecord( + id=CustomSmell.CACHE_REPEATED_CALLS.value, + enabled=True, + analyzer_method="ast", + checker=detect_repeated_calls, + analyzer_options={"threshold": 2}, + refactorer=CacheRepeatedCallsRefactorer, + ) + } + with patch( + "ecooptimizer.utils.smells_registry.retrieve_smell_registry", return_value=mock_registry + ): + controller = AnalyzerController() + smells = controller.run_analysis(test_file, enabled_smells=["cached-repeated-calls"]) -from ecooptimizer.data_types.smell_record import SmellRecord + assert smells == [] def test_filter_smells_by_method(): """Ensures the method filters all types of smells correctly.""" mock_registry = { "cached-repeated-calls": SmellRecord( - id="CRC001", + id=CustomSmell.CACHE_REPEATED_CALLS.value, enabled=True, analyzer_method="ast", - checker=lambda x: x, - analyzer_options={}, + checker=detect_repeated_calls, + analyzer_options={"threshold": 2}, refactorer=CacheRepeatedCallsRefactorer, ), - "long-element-chain": SmellRecord( - id="LEC001", - enabled=True, - analyzer_method="ast", - checker=lambda x: x, - analyzer_options={}, - refactorer=LongElementChainRefactorer, - ), "use-a-generator": SmellRecord( id="R1729", enabled=True, analyzer_method="pylint", checker=None, analyzer_options={}, - refactorer=UseAGeneratorRefactorer, + refactorer=MockGenericRefactorer, ), "string-concat-loop": SmellRecord( id="SCL001", enabled=True, analyzer_method="astroid", - checker=lambda x: x, + checker=Mock(), analyzer_options={}, - refactorer=UseListAccumulationRefactorer, + refactorer=MockGenericRefactorer, ), } @@ -144,41 +184,64 @@ def test_filter_smells_by_method(): result_astroid = AnalyzerController.filter_smells_by_method(mock_registry, "astroid") assert "cached-repeated-calls" in result_ast - assert "long-element-chain" in result_ast assert "use-a-generator" in result_pylint assert "string-concat-loop" in result_astroid + assert len(result_ast) == 1 + assert len(result_pylint) == 1 + assert len(result_astroid) == 1 def test_generate_custom_options(): """Ensures AST and Astroid analysis options are generated correctly.""" mock_registry = { "cached-repeated-calls": SmellRecord( - id="CRC001", + id=CustomSmell.CACHE_REPEATED_CALLS.value, enabled=True, analyzer_method="ast", - checker=lambda x: x, - analyzer_options={}, + checker=detect_repeated_calls, + analyzer_options={"threshold": 2}, refactorer=CacheRepeatedCallsRefactorer, ), - "long-element-chain": SmellRecord( - id="LEC001", - enabled=True, - analyzer_method="ast", - checker=lambda x: x, - analyzer_options={}, - refactorer=LongElementChainRefactorer, - ), "string-concat-loop": SmellRecord( id="SCL001", enabled=True, analyzer_method="astroid", - checker=lambda x: x, + checker=Mock(), analyzer_options={}, - refactorer=UseListAccumulationRefactorer, + refactorer=MockGenericRefactorer, ), } + options = AnalyzerController.generate_custom_options(mock_registry) - assert len(options) == 3 - assert callable(options[0][0]) - assert callable(options[1][0]) - assert callable(options[2][0]) + assert len(options) == 2 + assert options[0][0] == detect_repeated_calls + assert options[0][1] == {"threshold": 2} + assert callable(options[1][0]) # Mock checker + assert options[1][1] == {} + + +def test_generate_pylint_options(): + """Ensures Pylint analysis options are generated correctly.""" + mock_registry = { + "use-a-generator": SmellRecord( + id="R1729", + enabled=True, + analyzer_method="pylint", + checker=None, + analyzer_options={}, + refactorer=MockGenericRefactorer, + ), + "too-many-arguments": SmellRecord( + id="R0913", + enabled=True, + analyzer_method="pylint", + checker=None, + analyzer_options={"max_args": {"flag": "--max-args", "value": 5}}, + refactorer=MockGenericRefactorer, + ), + } + + options = AnalyzerController.generate_pylint_options(mock_registry) + assert "--disable=all" in options + assert "--enable=use-a-generator,too-many-arguments" in options + assert any(opt.startswith("--max-args=") for opt in options) diff --git a/tests/controllers/test_refactorer_controller.py b/tests/controllers/test_refactorer_controller.py index 9d8222e8..c8706e4d 100644 --- a/tests/controllers/test_refactorer_controller.py +++ b/tests/controllers/test_refactorer_controller.py @@ -61,9 +61,7 @@ def test_run_refactorer_success(mocker, mock_refactorer_class, mock_logger, tmp_ # Assertions assert controller.smell_counters["LEC001"] == 1 - mock_logger.info.assert_called_once_with( - "🔄 Running refactoring for long-element-chain using TestRefactorer" - ) + mock_logger.info.assert_called_once_with("🔄 Running TestRefactorer for long-element-chain") mock_instance.refactor.assert_called_once_with( target_file, source_dir, mock_smell, mocker.ANY, True ) @@ -82,10 +80,8 @@ def test_run_refactorer_no_refactorer(mock_logger, mocker, tmp_path, mock_smell) with pytest.raises(NotImplementedError) as exc_info: controller.run_refactorer(target_file, source_dir, mock_smell) - mock_logger.error.assert_called_once_with( - "❌ No refactorer found for smell: long-element-chain" - ) - assert "No refactorer implemented for smell: long-element-chain" in str(exc_info.value) + mock_logger.error.assert_called_once_with("❌ No refactorer for smell: long-element-chain") + assert "No refactorer for smell: long-element-chain" in str(exc_info.value) def test_run_refactorer_multiple_calls(mocker, mock_refactorer_class, tmp_path, mock_smell): diff --git a/tests/input/vehicle_management/__init__.py b/tests/input/vehicle_management/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/tests/input/vehicle_management/requirements.txt b/tests/input/vehicle_management/requirements.txt new file mode 100644 index 00000000..e69de29b diff --git a/tests/input/vehicle_management/utils.py b/tests/input/vehicle_management/utils.py new file mode 100644 index 00000000..c10c674e --- /dev/null +++ b/tests/input/vehicle_management/utils.py @@ -0,0 +1,30 @@ +from datetime import datetime +from typing import Any + + +class Utility: + """ + General-purpose utility functions for the vehicle management system. + """ + + @staticmethod + def format_timestamp(ts: datetime = None) -> str: + """Returns a formatted timestamp.""" + if ts is None: + ts = datetime.now() + return ts.strftime("%Y-%m-%d %H:%M:%S") + + @staticmethod + def capitalize_words(text: str) -> str: + """Capitalize the first letter of each word in a string.""" + return " ".join(word.capitalize() for word in text.strip().split()) + + @staticmethod + def validate_positive_number(value: Any) -> bool: + """Checks if a value is a positive int or float.""" + return isinstance(value, (int, float)) and value > 0 + + @staticmethod + def safe_divide(numerator: float, denominator: float) -> float: + """Performs division and avoids ZeroDivisionError.""" + return numerator / denominator if denominator != 0 else 0.0 diff --git a/tests/input/vehicle_management/vehicles/__init__.py b/tests/input/vehicle_management/vehicles/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/tests/input/vehicle_management/vehicles/car_models.py b/tests/input/vehicle_management/vehicles/car_models.py new file mode 100644 index 00000000..19095cfc --- /dev/null +++ b/tests/input/vehicle_management/vehicles/car_models.py @@ -0,0 +1,201 @@ +import math + + +class VehicleSpecification: + """Class representing detailed specifications of a vehicle.""" + + def __init__( + self, + engine_type: str, + horsepower: int, + torque: float, + fuel_efficiency: float, + acceleration: float, + top_speed: int, + weight: float, + drivetrain: str, + braking_distance: float, + safety_rating: str, + warranty_years: int = 3, + ): + self.engine_type = engine_type + self.horsepower = horsepower + self.torque = torque + self.fuel_efficiency = fuel_efficiency + self.acceleration = acceleration + self.top_speed = top_speed + self.weight = weight + self.drivetrain = drivetrain + self.braking_distance = braking_distance + self.safety_rating = safety_rating + self.warranty_years = warranty_years + self.spec_id = self._generate_spec_id() + + def _generate_spec_id(self) -> str: + spec_id = "" + for attr in [self.engine_type, str(self.horsepower), self.drivetrain]: + spec_id += attr[:3].upper() + spec_id += "-" + return spec_id.rstrip("-") + + def _generate_alternate_id(self) -> str: + alt_id = "" + for attr in [self.engine_type, str(self.top_speed), self.safety_rating]: + alt_id = alt_id + attr[:2].lower() + alt_id = alt_id + "_" + return alt_id.rstrip("_") + + def validate_vehicle_attributes(self) -> bool: + return all([isinstance(attr, (str, int, float)) for attr in [self.engine_type, self.drivetrain]]) # type: ignore + + def get_technical_summary(self) -> str: + details = f"PERF: 0-60 in {self.acceleration}s | EFFICIENCY: {self.fuel_efficiency}mpg" + return details.upper().replace("|", "//").strip().lower().capitalize() + + def unused_spec_method(self): + print("This method doesn't use any instance attributes") + + +class ElectricVehicleSpec(VehicleSpecification): + """Specialization for electric vehicles.""" + + def __init__( + self, + engine_type: str, + horsepower: int, + torque: float, + fuel_efficiency: float, + acceleration: float, + top_speed: int, + weight: float, + drivetrain: str, + braking_distance: float, + safety_rating: str, + battery_capacity: float, + charge_time: float, + range_miles: int, + warranty_years: int = 5, + ): + super().__init__( + engine_type, + horsepower, + torque, + fuel_efficiency, + acceleration, + top_speed, + weight, + drivetrain, + braking_distance, + safety_rating, + warranty_years, + ) + self.battery_capacity = battery_capacity + self.charge_time = charge_time + self.range_miles = range_miles + self.charging_stations = [] + + def calculate_charging_cost(self, electricity_rate: float) -> float: + cost_calculator = lambda rate, capacity, efficiency=0.85, conversion=0.95: (rate * capacity * efficiency * conversion) # noqa: E731 + return cost_calculator(electricity_rate, self.battery_capacity) + + def format_specs(self): + processor = lambda x: str(x).strip().upper().replace(" ", "_") # noqa: E731 + return { + processor(key): processor(value) + for key, value in self.__dict__.items() + if not key.startswith('_') + } + + def is_high_performance(self) -> bool: + performance_score = 0 + for i in range(1, 50000): + performance_score += math.log(self.horsepower * i + 1) * math.sin(i / 1000.0) + + acceleration_factor = math.exp(-self.acceleration / 2) + top_speed_factor = math.sqrt(self.top_speed) + battery_weight_ratio = self.battery_capacity / self.weight + + score = performance_score * acceleration_factor * top_speed_factor * battery_weight_ratio + + return score > 1e6 + + +class EVUtility: + """Utility class for EV-related operations with a deeply nested structure.""" + + def __init__(self): + self.network_data = { + "stations": { + "NorthAmerica": { + "USA": { + "California": { + "SanFrancisco": { + "Downtown": { + "LotA": { + "port_1": {"status": "available"}, + "port_2": {"status": "charging"}, + } + } + } + } + } + } + } + } + + def get_deep_status(self): + return self.network_data["stations"]["NorthAmerica"]["USA"]["California"]["SanFrancisco"]["Downtown"]["LotA"]["port_2"]["status"] + + def get_partial_status(self): + return self.network_data["stations"]["NorthAmerica"]["USA"]["California"] + + +def create_tesla_model_s_spec(): + """Factory function for Tesla Model S specifications with clear repeated calls.""" + ev1 = ElectricVehicleSpec( + engine_type="Electric", horsepower=670, torque=1050, + fuel_efficiency=120, acceleration=2.3, top_speed=200, + weight=4600, drivetrain="AWD", braking_distance=133, + safety_rating="5-Star", battery_capacity=100, + charge_time=10, range_miles=405 + ) + ev2 = ElectricVehicleSpec( + engine_type="Manual", horsepower=465, torque=787, + fuel_efficiency=120, acceleration=2.3, top_speed=178, + weight=6969, drivetrain="AWD", braking_distance=76, + safety_rating="5-Star", battery_capacity=100, + charge_time=10, range_miles=405 + ) + + perf1 = ev1.is_high_performance() + perf2 = ev2.is_high_performance() + + range1 = ev1.range_miles + range2 = ev2.range_miles + + print(f"Performance checks: {perf1}, {perf2}") + print(f"Range values: {range1}, {range2}") + print(f"Second EV instance: {ev2}") + + if ev1.is_high_performance(): + print("High performance vehicle") + if ev1.is_high_performance(): + print("Confirmed high performance") + + # Long element chain example + utility = EVUtility() + deep_status = utility.network_data["stations"]["NorthAmerica"]["USA"]["California"]["SanFrancisco"]["Downtown"]["LotA"]["port_1"]["status"] + partial_info = utility.get_partial_status() + + print(f"Deeply nested port status: {deep_status}") + print(f"Partial station data: {partial_info}") + + return max( + ev1.calculate_charging_cost(0.15), + ev1.calculate_charging_cost(0.15) + ) + +if __name__ == "__main__": + print("Creating Tesla Model S Spec...") + max_cost = create_tesla_model_s_spec() + print(f"Max charging cost: ${max_cost:.2f}") diff --git a/tests/input/vehicle_management/vehicles/dealership.py b/tests/input/vehicle_management/vehicles/dealership.py new file mode 100644 index 00000000..5ca57fc8 --- /dev/null +++ b/tests/input/vehicle_management/vehicles/dealership.py @@ -0,0 +1,41 @@ +from car_models import VehicleSpecification + + +def manage_fleet(): + """ + Example function to demonstrate multiple code smells in a vehicle management context. + """ + vehicle = VehicleSpecification( + engine_type="Hybrid", + horsepower=300, + torque=400.5, + fuel_efficiency=45.2, + acceleration=6.2, + top_speed=150, + weight=3200.0, + drivetrain="FWD", + braking_distance=120.5, + safety_rating="4-Star" + ) + + diagnostics = lambda a, b, c, d, e: ((a + b) * (c - d) / (e + 1) + (a * d) - (c ** 2) + (e * b - a / c) + a + b + c + d + e) # noqa: E731 + print("Running diagnostics:", diagnostics(1, 2, 3, 4, 5)) + + vehicle.unused_spec_method() + + status = "" + for i in range(5): + status += "Status-" + str(i) + "; " + print("Status Log:", status) + + report = {"summary": ""} + for i in range(3): + report["summary"] += f"Trip-{i}, " + print("Trip Summary:", report["summary"]) + + return vehicle.get_technical_summary() + + +if __name__ == "__main__": + summary = manage_fleet() + print("Vehicle Summary:", summary) diff --git a/tests/measurements/test_codecarbon_energy_meter.py b/tests/measurements/test_codecarbon_energy_meter.py index 0e2d9b6e..093a3bc2 100644 --- a/tests/measurements/test_codecarbon_energy_meter.py +++ b/tests/measurements/test_codecarbon_energy_meter.py @@ -1,92 +1,143 @@ +import math import pytest -import logging +from unittest.mock import patch, MagicMock from pathlib import Path -import subprocess import pandas as pd -from unittest.mock import patch -import sys +import subprocess from ecooptimizer.measurements.codecarbon_energy_meter import CodeCarbonEnergyMeter @pytest.fixture -def energy_meter(): - return CodeCarbonEnergyMeter() - - -@patch("codecarbon.EmissionsTracker.start") -@patch("codecarbon.EmissionsTracker.stop", return_value=0.45) -@patch("subprocess.run") -def test_measure_energy_success(mock_run, mock_stop, mock_start, energy_meter, caplog): - mock_run.return_value = subprocess.CompletedProcess( - args=["python3", "../input/project_car_stuff/main.py"], returncode=0 - ) - file_path = Path("../input/project_car_stuff/main.py") - with caplog.at_level(logging.INFO): - energy_meter.measure_energy(file_path) - - assert mock_run.call_count >= 1 - mock_run.assert_any_call( - [sys.executable, file_path], - capture_output=True, - text=True, - check=True, - ) - mock_start.assert_called_once() - mock_stop.assert_called_once() - assert "CodeCarbon measurement completed successfully." in caplog.text - assert energy_meter.emissions == 0.45 - - -@patch("codecarbon.EmissionsTracker.start") -@patch("codecarbon.EmissionsTracker.stop", return_value=0.45) -@patch("subprocess.run", side_effect=subprocess.CalledProcessError(1, "python3")) -def test_measure_energy_failure(mock_run, mock_stop, mock_start, energy_meter, caplog): - file_path = Path("../input/project_car_stuff/main.py") - with caplog.at_level(logging.ERROR): - energy_meter.measure_energy(file_path) - - mock_start.assert_called_once() - mock_run.assert_called_once() - mock_stop.assert_called_once() - assert "Error executing file" in caplog.text - assert ( - energy_meter.emissions_data is None - ) # since execution failed, emissions data should be None - - -@patch("pandas.read_csv") -@patch("pathlib.Path.exists", return_value=True) # mock file existence -def test_extract_emissions_csv_success(mock_exists, mock_read_csv, energy_meter): # noqa: ARG001 - # simulate DataFrame return value - mock_read_csv.return_value = pd.DataFrame( - [{"timestamp": "2025-03-01 12:00:00", "emissions": 0.45}] - ) - - csv_path = Path("dummy_path.csv") # fake path - result = energy_meter.extract_emissions_csv(csv_path) - - assert isinstance(result, dict) - assert "emissions" in result - assert result["emissions"] == 0.45 - - -@patch("pandas.read_csv", side_effect=Exception("File read error")) -@patch("pathlib.Path.exists", return_value=True) # mock file existence -def test_extract_emissions_csv_failure(mock_exists, mock_read_csv, energy_meter, caplog): # noqa: ARG001 - csv_path = Path("dummy_path.csv") # fake path - with caplog.at_level(logging.INFO): - result = energy_meter.extract_emissions_csv(csv_path) - - assert result is None # since reading the CSV fails, result should be None - assert "Error reading file" in caplog.text - - -@patch("pathlib.Path.exists", return_value=False) -def test_extract_emissions_csv_missing_file(mock_exists, energy_meter, caplog): # noqa: ARG001 - csv_path = Path("dummy_path.csv") # fake path - with caplog.at_level(logging.INFO): - result = energy_meter.extract_emissions_csv(csv_path) - - assert result is None # since file path does not exist, result should be None - assert "File 'dummy_path.csv' does not exist." in caplog.text +def mock_dependencies(): + """Fixture to mock all dependencies with proper subprocess mocking""" + with ( + patch("subprocess.run") as mock_subprocess, + patch("ecooptimizer.measurements.codecarbon_energy_meter.EmissionsTracker") as mock_tracker, + patch( + "ecooptimizer.measurements.codecarbon_energy_meter.TemporaryDirectory" + ) as mock_tempdir, + patch.object(Path, "exists") as mock_exists, + patch.object(CodeCarbonEnergyMeter, "_extract_emissions_data"), + ): + # Setup default successful subprocess mock + process_mock = MagicMock() + process_mock.returncode = 0 + mock_subprocess.return_value = process_mock + + # Setup tracker mock + tracker_instance = MagicMock() + mock_tracker.return_value = tracker_instance + + # Setup tempdir mock + mock_tempdir.return_value.__enter__.return_value = "/fake/temp/dir" + + mock_exists.return_value = True + + yield { + "subprocess": mock_subprocess, + "tracker": mock_tracker, + "tracker_instance": tracker_instance, + "tempdir": mock_tempdir, + "exists": mock_exists, + } + + +class TestCodeCarbonEnergyMeter: + @pytest.fixture + def meter(self): + return CodeCarbonEnergyMeter() + + def test_measure_energy_success(self, meter, mock_dependencies): + """Test successful measurement with float return value.""" + mock_dependencies["tracker_instance"].stop.return_value = 1.23 + + test_file = Path("test.py") + meter.measure_energy(test_file) + + assert meter.emissions == 1.23 + mock_dependencies["subprocess"].assert_called_once() + mock_dependencies["tracker_instance"].start.assert_called_once() + mock_dependencies["tracker_instance"].stop.assert_called_once() + + def test_measure_energy_none_return(self, meter, mock_dependencies): + """Test measurement that returns None.""" + mock_dependencies["tracker_instance"].stop.return_value = None + + test_file = Path("test.py") + meter.measure_energy(test_file) + + assert meter.emissions is None + mock_dependencies["tracker_instance"].stop.assert_called_once() + + def test_measure_energy_unexpected_return_type(self, meter, mock_dependencies, caplog): + """Test handling of unexpected return types.""" + mock_dependencies["tracker_instance"].stop.return_value = "invalid" + + test_file = Path("test.py") + meter.measure_energy(test_file) + + assert meter.emissions is None + assert "Unexpected emissions type" in caplog.text + mock_dependencies["tracker_instance"].stop.assert_called_once() + + def test_measure_energy_nan_return_type(self, meter, mock_dependencies, caplog): + """Test handling of unexpected return types.""" + mock_dependencies["tracker_instance"].stop.return_value = math.nan + + test_file = Path("test.py") + meter.measure_energy(test_file) + + assert meter.emissions is None + assert "Unexpected emissions type" in caplog.text + mock_dependencies["tracker_instance"].stop.assert_called_once() + + def test_measure_energy_subprocess_failure( + self, meter, mock_dependencies: dict[str, MagicMock], caplog + ): + """Test handling of subprocess failures.""" + # Configure subprocess to raise error + mock_dependencies["subprocess"].side_effect = subprocess.CalledProcessError( + returncode=1, cmd=["python", "test.py"], output="Error output", stderr="Error details" + ) + mock_dependencies["tracker_instance"].stop.return_value = 1.23 + + test_file = Path("test.py") + meter.measure_energy(test_file) + + mock_dependencies["subprocess"].assert_called() + assert "Error executing file" in caplog.text + assert meter.emissions == 1.23 + + def test_extract_emissions_data_success(self, meter, tmp_path): + """Test successful extraction of emissions data.""" + test_data = [ + {"timestamp": "2023-01-01", "emissions": 1.0}, + {"timestamp": "2023-01-02", "emissions": 2.0}, + ] + df = pd.DataFrame(test_data) + csv_path = tmp_path / "emissions.csv" + df.to_csv(csv_path, index=False) + + result = meter._extract_emissions_data(csv_path) + assert result == test_data[-1] + + def test_extract_emissions_data_failure(self, meter, tmp_path, caplog): + """Test failure to extract emissions data.""" + csv_path = tmp_path / "nonexistent.csv" + result = meter._extract_emissions_data(csv_path) + + assert result is None + assert "Failed to read emissions data" in caplog.text + + def test_measure_energy_missing_emissions_file(self, meter, mock_dependencies, caplog): + """Test handling when emissions file is missing.""" + mock_dependencies["tracker_instance"].stop.return_value = 1.23 + mock_dependencies["exists"].return_value = False + + with patch.object(Path, "exists", return_value=False): + test_file = Path("test.py") + meter.measure_energy(test_file) + + assert "Emissions file missing" in caplog.text + assert meter.emissions_data is None diff --git a/tests/refactorers/test_repeated_calls_refactor.py b/tests/refactorers/test_repeated_calls_refactor.py index 162d680d..2a5b23e0 100644 --- a/tests/refactorers/test_repeated_calls_refactor.py +++ b/tests/refactorers/test_repeated_calls_refactor.py @@ -185,8 +185,8 @@ def compute(self): def test_case(): demo1 = Demo(1) - cached_demo1_compute = demo1.compute() demo2 = Demo(2) + cached_demo1_compute = demo1.compute() result1 = cached_demo1_compute result2 = demo2.compute() result3 = cached_demo1_compute