diff --git a/cumulusci/cli/cci.py b/cumulusci/cli/cci.py index adc50c49df..79443c1d54 100644 --- a/cumulusci/cli/cci.py +++ b/cumulusci/cli/cci.py @@ -20,6 +20,7 @@ from .error import error from .flow import flow +from .hash import hash_group from .logger import get_tempfile_logger, init_logger from .org import org from .plan import plan @@ -242,3 +243,4 @@ def shell(runtime, script=None, python=None): cli.add_command(flow) cli.add_command(plan) cli.add_command(robot) +cli.add_command(hash_group) diff --git a/cumulusci/cli/flow.py b/cumulusci/cli/flow.py index 96bd8db9cf..8979dc9fc2 100644 --- a/cumulusci/cli/flow.py +++ b/cumulusci/cli/flow.py @@ -1,13 +1,17 @@ import json +import os +import yaml from collections import defaultdict from datetime import datetime from pathlib import Path import click +from cumulusci.core.github import set_github_output from cumulusci.core.exceptions import FlowNotFoundError from cumulusci.core.utils import format_duration from cumulusci.utils import document_flow, flow_ref_title_and_intro +from cumulusci.utils.hashing import hash_dict from cumulusci.utils.yaml.safer_loader import load_yaml_data from .runtime import pass_runtime @@ -44,9 +48,9 @@ def flow_doc(runtime, project=False): flows_by_group = group_items(flows) flow_groups = sorted( flows_by_group.keys(), - key=lambda group: flow_info_groups.index(group) - if group in flow_info_groups - else 100, + key=lambda group: ( + flow_info_groups.index(group) if group in flow_info_groups else 100 + ), ) for group in flow_groups: @@ -183,3 +187,71 @@ def flow_run(runtime, flow_name, org, delete_org, debug, o, no_prompt): click.echo(str(e)) runtime.alert(f"Flow Complete: {flow_name}") + + +@flow.command(name="freeze", help="Freeze a flow into a flattened list of static steps") +@click.argument("flow_name") +@click.option( + "--org", + help="Specify the target org. By default, runs against the current default org", +) +@click.option( + "--debug", is_flag=True, help="Drops into pdb, the Python debugger, on an exception" +) +@click.option( + "-o", + nargs=2, + multiple=True, + help="Pass task specific options for the task as '-o taskname__option value'. You can specify more than one option by using -o more than once.", +) +@click.option( + "--no-prompt", + is_flag=True, + help="Disables all prompts. Set for non-interactive mode use such as calling from scripts or CI systems", +) +@pass_runtime(require_keychain=True) +def flow_freeze(runtime, flow_name, org, debug, o, no_prompt=True): + + # Get necessary configs + org, org_config = runtime.get_org(org) + + # Parse command line options + options = defaultdict(dict) + if o: + for key, value in o: + if "__" in key: + task_name, option_name = key.split("__") + options[task_name][option_name] = value + else: + raise click.UsageError( + "-o option for flows should contain __ to split task name from option name." + ) + + # Create the flow and handle initialization exceptions + try: + coordinator = runtime.get_flow(flow_name, options=options) + start_time = datetime.now() + steps = {} + for step in coordinator.freeze(org_config): + stepnum = len(steps) + steps[stepnum] = step + + steps_hash = make_md5_hash(steps) + duration = datetime.now() - start_time + click.echo(f"Froze {flow_name} in {format_duration(duration)}") + frozen_name = f"{flow_name}__{steps_hash}" + filename = f"{frozen_name}.yml" + frozen_flow = coordinator.flow_config.config + frozen_flow["description"] = ( + f"Frozen version of {flow_name} with hash {steps_hash}" + ) + frozen_flow["steps"] = steps + with open(filename, "w") as f: + yaml.dump({"flows": {frozen_name: frozen_flow}}, f) + set_github_output("FLOW_FILENAME", filename) + click.echo(f"Frozen flow saved to {filename}") + except Exception: + runtime.alert(f"Flow error: {flow_name}") + raise + + runtime.alert(f"Flow Complete: {flow_name}") diff --git a/cumulusci/cli/hash.py b/cumulusci/cli/hash.py new file mode 100644 index 0000000000..d24475f3d8 --- /dev/null +++ b/cumulusci/cli/hash.py @@ -0,0 +1,104 @@ +import click +import hashlib +import json +import os +from cumulusci.core.dependencies.resolvers import get_static_dependencies +from cumulusci.core.utils import process_list_arg +from cumulusci.core.github import set_github_output +from cumulusci.utils.hashing import hash_dict +from pydantic import BaseModel +from .runtime import pass_runtime + + +@click.group( + "hash", + help="Commands for hashing parts of the project's CumulusCI configuration and state", +) +def hash_group(): + pass + + +# Commands for group: hash + + +@hash_group.command( + name="config", + help="Hashes all or part of the project's merged CumulusCI configuration", +) +@pass_runtime(require_project=True, require_keychain=False) # maybe not needed... +@click.option( + "--locators", + "locators", + help="A comma separated list of CumulusCI config locators to specify the top level of config key(s) to hash. Example: project__package,flows__ci_beta", +) +def hash_config( + runtime, + locators, +): + locators_str = "for {}".format(locators) if locators else "" + locators = process_list_arg(locators) + config = runtime.project_config.config + if locators: + config = {loc: runtime.project_config.lookup(loc) for loc in locators} + config_hash = hash_dict(config) + click.echo(f"Hash of CumulusCI Config{locators_str}:") + click.echo(config_hash) + output_name = "HASH_CONFIG" + if locators: + output_name + "__" + "__AND__".join(locators) + set_github_output(output_name, config_hash) + + +@hash_group.command( + name="flow", + help="Hashes a flow's configuration, either dynamic or frozen as a flat list of static steps", +) +@pass_runtime(require_project=True, require_keychain=False) # maybe not needed... +@click.argument("flow_name") +@click.option( + "--freeze", + is_flag=True, + help="Freeze the flow configuration as a flat list of static steps", +) +def hash_flow( + runtime, + flow_name, + freeze, +): + flow = runtime.get_flow(flow_name) + + steps = flow.steps + if freeze: + steps = flow.freeze(org_config=None) + config_hash = hash_dict(steps) + click.echo(f"Hash of flow {flow_name}:") + click.echo(config_hash) + output_name = "HASH_FLOW__" + flow_name + if freeze: + output_name + "__FROZEN" + set_github_output(output_name, config_hash) + + +@hash_group.command( + name="dependencies", + help="Resolve and hash the project's current dependencies", +) +@click.option( + "--resolution-strategy", + help="The resolution strategy to use. Defaults to production.", + default="production", +) +@pass_runtime(require_keychain=True) +def hash_dependencies(runtime, resolution_strategy): + resolved = get_static_dependencies( + runtime.project_config, + resolution_strategy=resolution_strategy, + ) + dependencies = [] + for dependency in resolved: + click.echo(dependency) + dependencies.append(dependency.dict()) + + deps_hash = hash_dict(dependencies) + click.echo(f"Hash of CumulusCI Dependencies for {resolution_strategy}:") + click.echo(deps_hash) diff --git a/cumulusci/core/flowrunner.py b/cumulusci/core/flowrunner.py index 098e84ba4d..e83a2aa403 100644 --- a/cumulusci/core/flowrunner.py +++ b/cumulusci/core/flowrunner.py @@ -78,6 +78,7 @@ FlowInfiniteLoopError, TaskImportError, ) +from cumulusci.utils import cd from cumulusci.utils.version_strings import LooseVersion if TYPE_CHECKING: @@ -459,6 +460,28 @@ def get_flow_steps( return lines + def freeze(self, org_config) -> List[StepSpec]: + self.org_config = org_config + line = f"Initializing flow for freezing: {self.__class__.__name__}" + if self.name: + line = f"{line} ({self.name})" + self._rule() + self.logger.info(line) + self.logger.info(self.flow_config.description) + self._rule(new_line=True) + steps = [] + for step in self.steps: + if step.skip: + continue + with cd(step.project_config.repo_root): + task = step.task_class( + step.project_config, + TaskConfig(step.task_config), + name=step.task_name, + ) + steps.extend(task.freeze(step)) + return steps + def run(self, org_config: OrgConfig): self.org_config = org_config line = f"Initializing flow: {self.__class__.__name__}" diff --git a/cumulusci/core/github.py b/cumulusci/core/github.py index eb5732a805..50334833e0 100644 --- a/cumulusci/core/github.py +++ b/cumulusci/core/github.py @@ -603,7 +603,7 @@ def catch_common_github_auth_errors(func: Callable) -> Callable: def inner(*args, **kwargs): try: return func(*args, **kwargs) - except (ConnectionError) as exc: + except ConnectionError as exc: if error_msg := format_github3_exception(exc): raise GithubApiError(error_msg) from exc else: @@ -663,3 +663,13 @@ def create_gist(github, description, files): files - A dict of files in the form of {filename:{'content': content},...} """ return github.create_gist(description, files, public=False) + + +# Utils for GitHub Actions worker environments +def set_github_output(name: str, value: str): + """Set an output parameter for the GitHub Actions runner.""" + github_output = os.environ.get("GITHUB_OUTPUT") + if not github_output: + return + with open(github_output, "a") as f: + f.write(f"{name}={value}\n") diff --git a/cumulusci/utils/hashing.py b/cumulusci/utils/hashing.py new file mode 100644 index 0000000000..97f903fdfe --- /dev/null +++ b/cumulusci/utils/hashing.py @@ -0,0 +1,31 @@ +import hashlib +import json +from pydantic import BaseModel + + +def cci_json_encoder(obj): + if isinstance(obj, BaseModel): + return obj.dict() + if hasattr(obj, "task_config"): + if obj.skip: + return None + return obj.task_config + # Fallback to default encoder + try: + return json.JSONEncoder().default(obj) + except TypeError: + raise TypeError( + f"Object of type {obj.__class__.__name__} is not JSON serializable" + ) + + +def hash_dict(dictionary): + # Step 1: Serialize the dictionary in a sorted order to ensure consistency + serialized_dict = json.dumps( + dictionary, sort_keys=True, default=cci_json_encoder + ).encode("utf-8") + + # Step 2: Create an MD5 hash of the serialized dictionary + md5_hash = hashlib.md5(serialized_dict).hexdigest() + + return md5_hash[:8]