Sync from dev @ 497baf0
Source: main (497baf0) Excluded: live tenant exports, generated artifacts, and dev-only tooling.
This commit is contained in:
@@ -150,7 +150,8 @@ def _fetch_directory_audits(
|
||||
"$top": "999",
|
||||
"$select": "activityDateTime,activityDisplayName,category,result,initiatedBy,targetResources",
|
||||
}
|
||||
filter_parts = [f"activityDateTime le {_format_filter_datetime(backup_start)}"]
|
||||
audit_end = backup_start - dt.timedelta(minutes=10)
|
||||
filter_parts = [f"activityDateTime le {_format_filter_datetime(audit_end)}"]
|
||||
if last_commit_date is not None:
|
||||
filter_parts.append(f"activityDateTime ge {_format_filter_datetime(last_commit_date)}")
|
||||
params["$filter"] = " and ".join(filter_parts)
|
||||
|
||||
@@ -114,6 +114,15 @@ def request_json(
|
||||
except urllib.error.HTTPError as exc:
|
||||
last_error = exc
|
||||
if exc.code not in retry_codes or attempt == max_retries:
|
||||
body = ""
|
||||
try:
|
||||
body = exc.read().decode("utf-8", errors="replace")[:2048]
|
||||
except Exception:
|
||||
pass
|
||||
if body:
|
||||
raise RuntimeError(
|
||||
f"{method} {url} failed: HTTP Error {exc.code}: {exc.reason} — {body}"
|
||||
) from exc
|
||||
raise
|
||||
retry_after = _get_retry_after_seconds(exc)
|
||||
sleep = retry_after if retry_after is not None else (2 ** attempt)
|
||||
|
||||
@@ -325,28 +325,10 @@ def _current_pr_merge_strategy(pr: dict[str, Any]) -> str:
|
||||
|
||||
def _build_description(workload: str, drift_branch: str, baseline_branch: str, build_number: str, build_id: str) -> str:
|
||||
is_entra = workload.lower() == "entra"
|
||||
lead = "Rolling Entra drift PR created by backup pipeline." if is_entra else "Rolling drift PR created by backup pipeline."
|
||||
lead = "Rolling Entra drift PR — backup pipeline" if is_entra else "Rolling drift PR — backup pipeline"
|
||||
return (
|
||||
f"{lead}\n\n"
|
||||
f"- Source branch: `{drift_branch}`\n"
|
||||
f"- Target branch: `{baseline_branch}`\n"
|
||||
f"- Last pipeline run: `{build_number}` (BuildId: {build_id})\n\n"
|
||||
"The automated review summary is generated immediately after PR creation and inserted "
|
||||
"above the reviewer actions section.\n\n"
|
||||
"## Reviewer Quick Actions\n\n"
|
||||
"### 1) Accept all changes\n"
|
||||
"- Merge PR to accept drift into baseline.\n\n"
|
||||
"### 2) Reject whole PR and revert\n"
|
||||
"- Set reviewer vote to **Reject**.\n"
|
||||
"- Abandon PR.\n"
|
||||
"- Auto-remediation queues restore (if `AUTO_REMEDIATE_ON_PR_REJECTION=true`).\n\n"
|
||||
"### 3) Reject only selected policy changes\n"
|
||||
"- In each `Change Needed` policy thread, comment `/reject` for changes you do not want.\n"
|
||||
"- Optional: use `/accept` for changes you want to keep.\n"
|
||||
"- Wait for review-sync pipeline (about 5 minutes) to update PR diff.\n"
|
||||
"- Merge remaining accepted changes.\n"
|
||||
"- Post-merge auto-remediation queues restore to reconcile tenant to merged baseline "
|
||||
"(if `AUTO_REMEDIATE_AFTER_MERGE=true`)."
|
||||
f"{lead} run `{build_number}` (build {build_id})\n\n"
|
||||
f"Source: `{drift_branch}` → Target: `{baseline_branch}`\n"
|
||||
)
|
||||
|
||||
|
||||
|
||||
102
scripts/filter_intune_formatting_noise.py
Normal file
102
scripts/filter_intune_formatting_noise.py
Normal file
@@ -0,0 +1,102 @@
|
||||
#!/usr/bin/env python3
|
||||
"""Revert Intune JSON exports that differ from baseline only in formatting or key ordering."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import argparse
|
||||
import json
|
||||
import subprocess
|
||||
import sys
|
||||
from pathlib import Path
|
||||
|
||||
|
||||
def _run_git_show(repo_root: Path, ref: str, rel_path: str) -> str | None:
|
||||
proc = subprocess.run(
|
||||
["git", "show", f"{ref}:{rel_path}"],
|
||||
cwd=str(repo_root),
|
||||
check=False,
|
||||
capture_output=True,
|
||||
)
|
||||
if proc.returncode != 0:
|
||||
return None
|
||||
return proc.stdout.decode("utf-8", errors="replace")
|
||||
|
||||
|
||||
def revert_formatting_only_changes(
|
||||
repo_root: Path,
|
||||
backup_root: Path,
|
||||
baseline_ref: str,
|
||||
) -> tuple[list[str], list[str]]:
|
||||
reverted: list[str] = []
|
||||
kept: list[str] = []
|
||||
|
||||
for file_path in sorted(backup_root.rglob("*.json")):
|
||||
rel_path = file_path.relative_to(repo_root).as_posix()
|
||||
baseline_text = _run_git_show(repo_root, baseline_ref, rel_path)
|
||||
if not baseline_text:
|
||||
# New file — nothing to revert against
|
||||
continue
|
||||
|
||||
try:
|
||||
current_text = file_path.read_text(encoding="utf-8")
|
||||
current_payload = json.loads(current_text)
|
||||
baseline_payload = json.loads(baseline_text)
|
||||
except Exception:
|
||||
kept.append(rel_path)
|
||||
continue
|
||||
|
||||
if current_payload == baseline_payload:
|
||||
file_path.write_text(baseline_text, encoding="utf-8")
|
||||
reverted.append(rel_path)
|
||||
else:
|
||||
kept.append(rel_path)
|
||||
|
||||
return reverted, kept
|
||||
|
||||
|
||||
def main() -> int:
|
||||
parser = argparse.ArgumentParser(description=__doc__)
|
||||
parser.add_argument("--repo-root", required=True)
|
||||
parser.add_argument(
|
||||
"--backup-root",
|
||||
default="tenant-state/intune",
|
||||
help="Path to Intune backup root (default: tenant-state/intune).",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--baseline-ref",
|
||||
default="HEAD",
|
||||
help="Git ref used as baseline for comparison (default: HEAD).",
|
||||
)
|
||||
args = parser.parse_args()
|
||||
|
||||
repo_root = Path(args.repo_root).resolve()
|
||||
backup_root = Path(args.backup_root)
|
||||
if not backup_root.is_absolute():
|
||||
backup_root = repo_root / backup_root
|
||||
backup_root = backup_root.resolve()
|
||||
|
||||
if not backup_root.exists():
|
||||
print(f"Backup root not found: {backup_root}")
|
||||
return 0
|
||||
|
||||
reverted, kept = revert_formatting_only_changes(
|
||||
repo_root=repo_root,
|
||||
backup_root=backup_root,
|
||||
baseline_ref=args.baseline_ref,
|
||||
)
|
||||
|
||||
if reverted:
|
||||
print(f"Reverted {len(reverted)} formatting-only Intune JSON export(s) to baseline:")
|
||||
for path in reverted:
|
||||
print(f" - {path}")
|
||||
else:
|
||||
print("No formatting-only Intune JSON exports detected.")
|
||||
|
||||
if kept:
|
||||
print(f"Files with actual semantic changes (kept): {len(kept)}")
|
||||
|
||||
return 0
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
sys.exit(main())
|
||||
444
scripts/probe_tenant_changes.py
Normal file
444
scripts/probe_tenant_changes.py
Normal file
@@ -0,0 +1,444 @@
|
||||
#!/usr/bin/env python3
|
||||
"""Probe tenant audit logs to detect configuration changes and decide whether to trigger a backup pipeline.
|
||||
|
||||
This script is designed to run inside an Azure Function timer trigger or locally for testing.
|
||||
It queries Microsoft Graph audit endpoints for the cheapest possible signal that a configuration
|
||||
change occurred since the last check, then applies a debouncer so that a burst of changes during
|
||||
an admin sprint results in a single backup run after a configurable quiet window.
|
||||
|
||||
Usage (local testing):
|
||||
python3 scripts/probe_tenant_changes.py \
|
||||
--token "$GRAPH_TOKEN" \
|
||||
--state-path ./probe-state.json \
|
||||
--quiet-window-minutes 15 \
|
||||
--cooldown-minutes 30
|
||||
|
||||
Usage (Azure Function wrapper):
|
||||
python3 scripts/probe_tenant_changes.py \
|
||||
--token "$GRAPH_TOKEN" \
|
||||
--state-json '{"intune":{"last_check":"2026-04-20T10:00:00+00:00"},...}' \
|
||||
--quiet-window-minutes 15 \
|
||||
--cooldown-minutes 30
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import argparse
|
||||
import datetime as dt
|
||||
import json
|
||||
import os
|
||||
import pathlib
|
||||
import sys
|
||||
import urllib.parse
|
||||
from typing import Any
|
||||
|
||||
# scripts/ is not guaranteed to be on PYTHONPATH when loaded by the Function wrapper,
|
||||
# so we tolerate a relative import failure and fall back to an absolute import.
|
||||
try:
|
||||
from scripts.common import request_json
|
||||
except ImportError:
|
||||
from common import request_json # type: ignore[no-redef]
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Constants
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
_INTUNE_AUDIT_URL = "https://graph.microsoft.com/beta/deviceManagement/auditEvents"
|
||||
_ENTRA_AUDIT_URL = "https://graph.microsoft.com/v1.0/auditLogs/directoryAudits"
|
||||
|
||||
# Target resource types in Entra that map to the categories exported by export_entra_baseline.py.
|
||||
_ENTRA_TARGET_TYPES = (
|
||||
"ConditionalAccessPolicy",
|
||||
"NamedLocation",
|
||||
"AuthenticationStrengthPolicy",
|
||||
"Application",
|
||||
"ServicePrincipal",
|
||||
)
|
||||
|
||||
_DEFAULT_STATE: dict[str, Any] = {
|
||||
"intune": {"last_check": None},
|
||||
"entra": {"last_check": None},
|
||||
"debouncer": {
|
||||
"state": "idle",
|
||||
"first_event_at": None,
|
||||
"trigger_after": None,
|
||||
"cooldown_until": None,
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Token acquisition
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
def _acquire_graph_token(client_id: str, client_secret: str, tenant_id: str) -> str:
|
||||
"""Acquire a Graph access token via client credentials flow."""
|
||||
url = f"https://login.microsoftonline.com/{tenant_id}/oauth2/v2.0/token"
|
||||
body = urllib.parse.urlencode(
|
||||
{
|
||||
"client_id": client_id,
|
||||
"client_secret": client_secret,
|
||||
"scope": "https://graph.microsoft.com/.default",
|
||||
"grant_type": "client_credentials",
|
||||
}
|
||||
).encode("utf-8")
|
||||
headers = {"Content-Type": "application/x-www-form-urlencoded"}
|
||||
req = urllib.request.Request(url, data=body, headers=headers, method="POST")
|
||||
with urllib.request.urlopen(req, timeout=30) as resp:
|
||||
payload = json.loads(resp.read().decode("utf-8"))
|
||||
access_token = payload.get("access_token")
|
||||
if not access_token:
|
||||
raise RuntimeError("Token endpoint did not return an access_token.")
|
||||
return str(access_token)
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# CLI
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
def parse_args() -> argparse.Namespace:
|
||||
parser = argparse.ArgumentParser(description=__doc__)
|
||||
parser.add_argument("--token", default="", help="Microsoft Graph bearer token (direct).")
|
||||
parser.add_argument("--client-id", default="", help="Entra app client ID (alternative to --token).")
|
||||
parser.add_argument("--client-secret", default="", help="Entra app client secret (alternative to --token).")
|
||||
parser.add_argument("--tenant-id", default="", help="Entra tenant ID (alternative to --token).")
|
||||
parser.add_argument(
|
||||
"--state-path",
|
||||
default="",
|
||||
help="Path to a local JSON state file (used for local testing).",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--state-json",
|
||||
default="",
|
||||
help="Raw JSON state string (used when the caller manages persistence, e.g. Azure Table Storage).",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--quiet-window-minutes",
|
||||
type=int,
|
||||
default=15,
|
||||
help="Minutes of silence after the last detected change before triggering a backup.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--cooldown-minutes",
|
||||
type=int,
|
||||
default=30,
|
||||
help="Minimum minutes between two triggered backup runs.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--now",
|
||||
default="",
|
||||
help="Override the current time (ISO 8601). Useful for tests.",
|
||||
)
|
||||
return parser.parse_args()
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# State helpers
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
def _load_state(path: str, json_str: str) -> dict[str, Any]:
|
||||
if json_str:
|
||||
return json.loads(json_str)
|
||||
if path:
|
||||
p = pathlib.Path(path)
|
||||
if p.exists():
|
||||
return json.loads(p.read_text(encoding="utf-8"))
|
||||
return json.loads(json.dumps(_DEFAULT_STATE))
|
||||
|
||||
|
||||
def _save_state(path: str, state: dict[str, Any]) -> None:
|
||||
if path:
|
||||
pathlib.Path(path).write_text(
|
||||
json.dumps(state, indent=2, ensure_ascii=False) + "\n",
|
||||
encoding="utf-8",
|
||||
)
|
||||
|
||||
|
||||
def _parse_iso(value: str | None) -> dt.datetime | None:
|
||||
if not value:
|
||||
return None
|
||||
try:
|
||||
parsed = dt.datetime.fromisoformat(value.replace("Z", "+00:00"))
|
||||
return parsed.astimezone(dt.timezone.utc)
|
||||
except ValueError:
|
||||
return None
|
||||
|
||||
|
||||
def _format_iso(value: dt.datetime) -> str:
|
||||
return value.astimezone(dt.timezone.utc).isoformat().replace("+00:00", "Z")
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Graph queries
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
def _build_intune_filter(since: dt.datetime, until: dt.datetime) -> str:
|
||||
since_str = since.strftime("%Y-%m-%dT%H:%M:%SZ")
|
||||
until_str = until.strftime("%Y-%m-%dT%H:%M:%SZ")
|
||||
return (
|
||||
f"activityDateTime ge {since_str}"
|
||||
f" and activityDateTime le {until_str}"
|
||||
f" and activityResult eq 'Success'"
|
||||
f" and ActivityOperationType ne 'Get'"
|
||||
)
|
||||
|
||||
|
||||
def _build_entra_filter(since: dt.datetime, until: dt.datetime) -> str:
|
||||
since_str = since.strftime("%Y-%m-%dT%H:%M:%SZ")
|
||||
until_str = until.strftime("%Y-%m-%dT%H:%M:%SZ")
|
||||
type_clauses = " or ".join(
|
||||
f"targetResources/any(t: t/type eq '{t}')" for t in _ENTRA_TARGET_TYPES
|
||||
)
|
||||
return (
|
||||
f"activityDateTime ge {since_str}"
|
||||
f" and activityDateTime le {until_str}"
|
||||
f" and result eq 'success'"
|
||||
f" and ({type_clauses})"
|
||||
)
|
||||
|
||||
|
||||
def _fetch_latest_event(url: str, token: str) -> dict[str, Any] | None:
|
||||
"""Return the single latest matching audit event, or None if nothing found."""
|
||||
try:
|
||||
payload = request_json(url, token=token, timeout=30, max_retries=2)
|
||||
except Exception as exc:
|
||||
# Defensive: log and treat as no event so a transient Graph failure does
|
||||
# not wedge the debouncer in an armed state forever.
|
||||
print(f"Warning: Graph query failed ({exc})", file=sys.stderr)
|
||||
return None
|
||||
|
||||
value = payload.get("value")
|
||||
if isinstance(value, list) and value:
|
||||
event = value[0]
|
||||
if isinstance(event, dict):
|
||||
return event
|
||||
return None
|
||||
|
||||
|
||||
def _get_latest_intune_event(
|
||||
token: str, since: dt.datetime, until: dt.datetime
|
||||
) -> dict[str, Any] | None:
|
||||
filter_str = _build_intune_filter(since, until)
|
||||
params = {
|
||||
"$filter": filter_str,
|
||||
"$orderby": "activityDateTime desc",
|
||||
"$top": "1",
|
||||
"$select": "id,activityDateTime,activityType,activityOperationType",
|
||||
}
|
||||
url = f"{_INTUNE_AUDIT_URL}?{urllib.parse.urlencode(params)}"
|
||||
return _fetch_latest_event(url, token)
|
||||
|
||||
|
||||
def _get_latest_entra_event(
|
||||
token: str, since: dt.datetime, until: dt.datetime
|
||||
) -> dict[str, Any] | None:
|
||||
filter_str = _build_entra_filter(since, until)
|
||||
params = {
|
||||
"$filter": filter_str,
|
||||
"$orderby": "activityDateTime desc",
|
||||
"$top": "1",
|
||||
"$select": "id,activityDateTime,activityDisplayName",
|
||||
}
|
||||
url = f"{_ENTRA_AUDIT_URL}?{urllib.parse.urlencode(params)}"
|
||||
return _fetch_latest_event(url, token)
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Debouncer
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
def _evaluate_debouncer(
|
||||
state: dict[str, Any],
|
||||
intune_event: dict[str, Any] | None,
|
||||
entra_event: dict[str, Any] | None,
|
||||
now: dt.datetime,
|
||||
quiet_window: dt.timedelta,
|
||||
cooldown: dt.timedelta,
|
||||
) -> tuple[bool, dict[str, Any], str]:
|
||||
"""Return (should_trigger, updated_state, human_readable_reason)."""
|
||||
|
||||
deb = dict(state.get("debouncer") or {})
|
||||
deb_state = str(deb.get("state") or "idle")
|
||||
|
||||
# Extract event timestamps if present
|
||||
intune_time: dt.datetime | None = None
|
||||
entra_time: dt.datetime | None = None
|
||||
if intune_event:
|
||||
intune_time = _parse_iso(intune_event.get("activityDateTime"))
|
||||
if entra_event:
|
||||
entra_time = _parse_iso(entra_event.get("activityDateTime"))
|
||||
|
||||
latest_event_time = max(
|
||||
(t for t in (intune_time, entra_time) if t is not None), default=None
|
||||
)
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
# Cooldown check
|
||||
# ------------------------------------------------------------------
|
||||
if deb_state == "cooldown":
|
||||
cooldown_until = _parse_iso(deb.get("cooldown_until"))
|
||||
if cooldown_until is not None and now < cooldown_until:
|
||||
reason = (
|
||||
f"In cooldown until {_format_iso(cooldown_until)}; "
|
||||
f"{int(intune_event is not None) + int(entra_event is not None)} event(s) ignored."
|
||||
)
|
||||
return False, state, reason
|
||||
# Cooldown expired → fall through to idle logic
|
||||
deb = {
|
||||
"state": "idle",
|
||||
"first_event_at": None,
|
||||
"trigger_after": None,
|
||||
"cooldown_until": None,
|
||||
}
|
||||
deb_state = "idle"
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
# Idle or armed
|
||||
# ------------------------------------------------------------------
|
||||
if latest_event_time is None:
|
||||
# No changes in this window
|
||||
if deb_state == "armed":
|
||||
trigger_after = _parse_iso(deb.get("trigger_after"))
|
||||
if trigger_after is not None and now >= trigger_after:
|
||||
# Quiet window satisfied — fire
|
||||
deb = {
|
||||
"state": "cooldown",
|
||||
"first_event_at": None,
|
||||
"trigger_after": None,
|
||||
"cooldown_until": _format_iso(now + cooldown),
|
||||
}
|
||||
reason = "Quiet window satisfied; no new events since last check."
|
||||
state["debouncer"] = deb
|
||||
return True, state, reason
|
||||
# Still waiting
|
||||
reason = f"Armed, waiting for quiet window until {_format_iso(trigger_after)}."
|
||||
state["debouncer"] = deb
|
||||
return False, state, reason
|
||||
# Idle, no changes
|
||||
reason = "No changes detected."
|
||||
state["debouncer"] = deb
|
||||
return False, state, reason
|
||||
|
||||
# There is at least one new event
|
||||
if deb_state == "idle":
|
||||
# First change in a while — arm the debouncer
|
||||
trigger_after = now + quiet_window
|
||||
deb = {
|
||||
"state": "armed",
|
||||
"first_event_at": _format_iso(latest_event_time),
|
||||
"trigger_after": _format_iso(trigger_after),
|
||||
"cooldown_until": None,
|
||||
}
|
||||
reason = (
|
||||
f"Change detected at {_format_iso(latest_event_time)}; "
|
||||
f"armed, trigger scheduled for {_format_iso(trigger_after)}."
|
||||
)
|
||||
state["debouncer"] = deb
|
||||
return False, state, reason
|
||||
|
||||
if deb_state == "armed":
|
||||
# Extend the quiet window because activity is still ongoing
|
||||
trigger_after = now + quiet_window
|
||||
first_event = deb.get("first_event_at") or _format_iso(latest_event_time)
|
||||
deb = {
|
||||
"state": "armed",
|
||||
"first_event_at": first_event,
|
||||
"trigger_after": _format_iso(trigger_after),
|
||||
"cooldown_until": None,
|
||||
}
|
||||
workloads: list[str] = []
|
||||
if intune_event:
|
||||
workloads.append("intune")
|
||||
if entra_event:
|
||||
workloads.append("entra")
|
||||
reason = (
|
||||
f"Additional change detected at {_format_iso(latest_event_time)} "
|
||||
f"({'/'.join(workloads)}); quiet window extended to {_format_iso(trigger_after)}."
|
||||
)
|
||||
state["debouncer"] = deb
|
||||
return False, state, reason
|
||||
|
||||
# Defensive fallback
|
||||
reason = f"Unexpected debouncer state '{deb_state}'; resetting to idle."
|
||||
state["debouncer"] = {
|
||||
"state": "idle",
|
||||
"first_event_at": None,
|
||||
"trigger_after": None,
|
||||
"cooldown_until": None,
|
||||
}
|
||||
return False, state, reason
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Main
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
def main() -> int:
|
||||
args = parse_args()
|
||||
|
||||
token = args.token.strip()
|
||||
if not token:
|
||||
if args.client_id and args.client_secret and args.tenant_id:
|
||||
token = _acquire_graph_token(args.client_id, args.client_secret, args.tenant_id)
|
||||
else:
|
||||
print(
|
||||
"ERROR: Provide --token, or all three of --client-id, --client-secret, --tenant-id.",
|
||||
file=sys.stderr,
|
||||
)
|
||||
raise SystemExit(1)
|
||||
|
||||
quiet_window = dt.timedelta(minutes=args.quiet_window_minutes)
|
||||
cooldown = dt.timedelta(minutes=args.cooldown_minutes)
|
||||
|
||||
now = _parse_iso(args.now) or dt.datetime.now(dt.timezone.utc)
|
||||
# Truncate to second for cleaner output
|
||||
now = now.replace(microsecond=0)
|
||||
|
||||
state = _load_state(args.state_path, args.state_json)
|
||||
|
||||
# Initialise missing last_check values to a safe default (24 hours ago).
|
||||
# This prevents a brand-new state file from scanning the entire audit log history.
|
||||
default_since = now - dt.timedelta(hours=24)
|
||||
intune_since = _parse_iso(state.get("intune", {}).get("last_check")) or default_since
|
||||
entra_since = _parse_iso(state.get("entra", {}).get("last_check")) or default_since
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
# Query Graph
|
||||
# ------------------------------------------------------------------
|
||||
intune_event = _get_latest_intune_event(token, intune_since, now)
|
||||
entra_event = _get_latest_entra_event(token, entra_since, now)
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
# Debounce
|
||||
# ------------------------------------------------------------------
|
||||
trigger, state, reason = _evaluate_debouncer(
|
||||
state, intune_event, entra_event, now, quiet_window, cooldown
|
||||
)
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
# Advance watermarks regardless of trigger decision so the next run
|
||||
# does not re-scan the same window.
|
||||
# ------------------------------------------------------------------
|
||||
state.setdefault("intune", {})["last_check"] = _format_iso(now)
|
||||
state.setdefault("entra", {})["last_check"] = _format_iso(now)
|
||||
|
||||
_save_state(args.state_path, state)
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
# Emit decision
|
||||
# ------------------------------------------------------------------
|
||||
result = {
|
||||
"trigger": trigger,
|
||||
"reason": reason,
|
||||
"checked_at": _format_iso(now),
|
||||
"intune_event": intune_event,
|
||||
"entra_event": entra_event,
|
||||
"new_state": state,
|
||||
}
|
||||
print(json.dumps(result, indent=2, ensure_ascii=False))
|
||||
return 0
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
raise SystemExit(main())
|
||||
86
scripts/trigger_backup_pipeline.py
Normal file
86
scripts/trigger_backup_pipeline.py
Normal file
@@ -0,0 +1,86 @@
|
||||
#!/usr/bin/env python3
|
||||
"""Trigger an Azure DevOps pipeline run via REST API.
|
||||
|
||||
Intended to be invoked from the queue-consumer Azure Function or locally for testing.
|
||||
|
||||
Usage:
|
||||
python3 scripts/trigger_backup_pipeline.py \
|
||||
--organization "my-org" \
|
||||
--project "my-project" \
|
||||
--pipeline-id 123 \
|
||||
--token "$ADO_PAT" \
|
||||
--branch "main" \
|
||||
--parameters '{"forceFullRun": false}'
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import argparse
|
||||
import json
|
||||
import sys
|
||||
from typing import Any
|
||||
|
||||
try:
|
||||
from scripts.common import request_json
|
||||
except ImportError:
|
||||
from common import request_json # type: ignore[no-redef]
|
||||
|
||||
|
||||
def parse_args() -> argparse.Namespace:
|
||||
parser = argparse.ArgumentParser(description=__doc__)
|
||||
parser.add_argument("--organization", required=True)
|
||||
parser.add_argument("--project", required=True)
|
||||
parser.add_argument("--pipeline-id", type=int, required=True)
|
||||
parser.add_argument("--token", required=True, help="Azure DevOps PAT or OAuth token.")
|
||||
parser.add_argument("--branch", default="main", help="Git ref to run against.")
|
||||
parser.add_argument(
|
||||
"--parameters",
|
||||
default="{}",
|
||||
help='JSON object of pipeline template parameters (e.g. \'{"forceFullRun": true}\').',
|
||||
)
|
||||
return parser.parse_args()
|
||||
|
||||
|
||||
def main() -> int:
|
||||
args = parse_args()
|
||||
|
||||
base_url = (
|
||||
f"https://dev.azure.com/{args.organization}/{args.project}"
|
||||
f"/_apis/pipelines/{args.pipeline_id}/runs?api-version=7.1"
|
||||
)
|
||||
|
||||
body: dict[str, Any] = {
|
||||
"resources": {
|
||||
"repositories": {
|
||||
"self": {"refName": f"refs/heads/{args.branch.lstrip('refs/heads/')}"}
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
params = json.loads(args.parameters)
|
||||
if isinstance(params, dict) and params:
|
||||
body["templateParameters"] = params
|
||||
|
||||
# ADO REST API accepts Basic auth with an empty username and the PAT as password.
|
||||
import base64
|
||||
encoded = base64.b64encode(f":{args.token}".encode("utf-8")).decode("utf-8")
|
||||
auth_header = f"Basic {encoded}"
|
||||
|
||||
print(f"Triggering pipeline {args.pipeline_id} on branch {args.branch} ...")
|
||||
response = request_json(
|
||||
base_url,
|
||||
method="POST",
|
||||
body=body,
|
||||
headers={"Authorization": auth_header},
|
||||
timeout=30,
|
||||
max_retries=2,
|
||||
)
|
||||
|
||||
run_id = response.get("id")
|
||||
run_url = response.get("url")
|
||||
print(f"Queued run id={run_id} url={run_url}")
|
||||
return 0
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
raise SystemExit(main())
|
||||
@@ -54,6 +54,14 @@ TICKET_BLOCK_END = "<!-- AUTO-CHANGE-TICKETS:END -->"
|
||||
AUTO_TICKET_THREAD_PREFIX = "AUTO-CHANGE-TICKET:"
|
||||
AUTO_AI_REVIEW_THREAD_PREFIX = "AUTO-AI-REVIEW:"
|
||||
COMPACT_AI_THREAD_NOTE = "_Full AI reviewer narrative is posted in a dedicated PR thread due PR description limits._"
|
||||
AUTO_DETERMINISTIC_THREAD_PREFIX = "AUTO-DETERMINISTIC-SUMMARY:"
|
||||
COMPACT_DETERMINISTIC_THREAD_NOTE = (
|
||||
"_Full deterministic summary (including Top Risk Items) is posted in a dedicated PR thread "
|
||||
"due to Azure DevOps description size limits._"
|
||||
)
|
||||
ADO_PR_DESCRIPTION_MAX_LEN = 4000
|
||||
AUTO_REVIEWER_GUIDE_THREAD_PREFIX = "AUTO-REVIEWER-GUIDE:"
|
||||
COMPACT_REVIEWER_GUIDE_NOTE = "> 📋 Full **reviewer guide** is posted in a dedicated PR thread."
|
||||
|
||||
THREAD_STATUS_ACTIVE = 1
|
||||
THREAD_STATUS_FIXED = 2
|
||||
@@ -2035,6 +2043,29 @@ def _compact_deterministic_summary(deterministic_summary: str) -> str:
|
||||
return deterministic_summary[:idx].strip()
|
||||
|
||||
|
||||
def _compact_reviewer_guide(description: str) -> str:
|
||||
"""Replace the legacy long reviewer guide with a compact reference."""
|
||||
description = description or ""
|
||||
marker = "## Reviewer Quick Actions"
|
||||
idx = description.find(marker)
|
||||
if idx == -1:
|
||||
return description
|
||||
prefix = description[:idx].rstrip()
|
||||
if not prefix:
|
||||
return COMPACT_REVIEWER_GUIDE_NOTE + "\n"
|
||||
return prefix + "\n\n" + COMPACT_REVIEWER_GUIDE_NOTE + "\n"
|
||||
|
||||
|
||||
def _append_reviewer_guide_note(description: str) -> str:
|
||||
"""Append the compact reviewer guide note if not already present."""
|
||||
description = description or ""
|
||||
if COMPACT_REVIEWER_GUIDE_NOTE in description:
|
||||
return description
|
||||
if description.endswith("\n"):
|
||||
return description + COMPACT_REVIEWER_GUIDE_NOTE + "\n"
|
||||
return description + "\n\n" + COMPACT_REVIEWER_GUIDE_NOTE + "\n"
|
||||
|
||||
|
||||
def _remove_marked_block(description: str, start_marker: str, end_marker: str) -> str:
|
||||
description = description or ""
|
||||
pattern = re.compile(
|
||||
@@ -2273,6 +2304,185 @@ def _sync_full_ai_review_thread(
|
||||
return True
|
||||
|
||||
|
||||
def _deterministic_thread_marker(workload: str) -> str:
|
||||
return f"Automation marker: {AUTO_DETERMINISTIC_THREAD_PREFIX}{workload.strip().lower()}"
|
||||
|
||||
|
||||
def _build_full_deterministic_thread_content(workload: str, deterministic_summary: str) -> str:
|
||||
marker = _deterministic_thread_marker(workload)
|
||||
return (
|
||||
"Automated review summary (full)\n\n"
|
||||
"PR description uses a compact review summary because of Azure DevOps description size limits.\n\n"
|
||||
f"{deterministic_summary}\n\n"
|
||||
f"{marker}"
|
||||
).strip()
|
||||
|
||||
|
||||
def _create_deterministic_thread(
|
||||
repo_api: str,
|
||||
pr_id: int,
|
||||
token: str,
|
||||
workload: str,
|
||||
deterministic_summary: str,
|
||||
) -> None:
|
||||
content = _build_full_deterministic_thread_content(workload, deterministic_summary)
|
||||
_request_json(
|
||||
f"{repo_api}/pullrequests/{pr_id}/threads?api-version=7.1",
|
||||
token=token,
|
||||
method="POST",
|
||||
body={
|
||||
"comments": [
|
||||
{
|
||||
"parentCommentId": 0,
|
||||
"content": content,
|
||||
"commentType": 1,
|
||||
}
|
||||
],
|
||||
"status": THREAD_STATUS_ACTIVE,
|
||||
},
|
||||
)
|
||||
|
||||
|
||||
def _sync_deterministic_thread(
|
||||
repo_api: str,
|
||||
pr_id: int,
|
||||
token: str,
|
||||
workload: str,
|
||||
deterministic_summary: str,
|
||||
) -> bool:
|
||||
marker = _deterministic_thread_marker(workload)
|
||||
desired_content = _build_full_deterministic_thread_content(workload, deterministic_summary)
|
||||
threads_payload = _request_json(
|
||||
f"{repo_api}/pullrequests/{pr_id}/threads?api-version=7.1",
|
||||
token=token,
|
||||
)
|
||||
threads = threads_payload.get("value", []) if isinstance(threads_payload, dict) else []
|
||||
thread = _find_marked_thread(threads, marker)
|
||||
if thread is None:
|
||||
_create_deterministic_thread(repo_api, pr_id, token, workload, deterministic_summary)
|
||||
return True
|
||||
|
||||
comments = thread.get("comments", []) if isinstance(thread.get("comments"), list) else []
|
||||
if _thread_has_matching_comment(comments, desired_content):
|
||||
return False
|
||||
|
||||
thread_id = _thread_id(thread)
|
||||
if thread_id <= 0:
|
||||
_create_deterministic_thread(repo_api, pr_id, token, workload, deterministic_summary)
|
||||
return True
|
||||
|
||||
if _is_thread_resolved(thread):
|
||||
_set_thread_status(repo_api, pr_id, thread_id, token, THREAD_STATUS_ACTIVE)
|
||||
_add_thread_comment(repo_api, pr_id, thread_id, token, desired_content)
|
||||
return True
|
||||
|
||||
|
||||
def _close_deterministic_thread(
|
||||
repo_api: str,
|
||||
pr_id: int,
|
||||
token: str,
|
||||
workload: str,
|
||||
) -> bool:
|
||||
marker = _deterministic_thread_marker(workload)
|
||||
threads_payload = _request_json(
|
||||
f"{repo_api}/pullrequests/{pr_id}/threads?api-version=7.1",
|
||||
token=token,
|
||||
)
|
||||
threads = threads_payload.get("value", []) if isinstance(threads_payload, dict) else []
|
||||
thread = _find_marked_thread(threads, marker)
|
||||
if thread is None:
|
||||
return False
|
||||
thread_id = _thread_id(thread)
|
||||
if thread_id <= 0:
|
||||
return False
|
||||
if _is_thread_resolved(thread):
|
||||
return False
|
||||
_set_thread_status(repo_api, pr_id, thread_id, token, THREAD_STATUS_CLOSED)
|
||||
return True
|
||||
|
||||
|
||||
def _reviewer_guide_thread_marker(workload: str) -> str:
|
||||
return f"Automation marker: {AUTO_REVIEWER_GUIDE_THREAD_PREFIX}{workload.strip().lower()}"
|
||||
|
||||
|
||||
def _build_full_reviewer_guide_thread_content(workload: str) -> str:
|
||||
marker = _reviewer_guide_thread_marker(workload)
|
||||
return (
|
||||
"## Reviewer Quick Actions\n\n"
|
||||
"### 1) Accept all changes\n"
|
||||
"- Merge PR to accept drift into baseline.\n\n"
|
||||
"### 2) Reject whole PR and revert\n"
|
||||
"- Set reviewer vote to **Reject**.\n"
|
||||
"- Abandon PR.\n"
|
||||
"- Auto-remediation queues restore (if `AUTO_REMEDIATE_ON_PR_REJECTION=true`).\n\n"
|
||||
"### 3) Reject only selected policy changes\n"
|
||||
"- In each `Change Needed` policy thread, comment `/reject` for changes you do not want.\n"
|
||||
"- Optional: use `/accept` for changes you want to keep.\n"
|
||||
"- Wait for review-sync pipeline (about 5 minutes) to update PR diff.\n"
|
||||
"- Merge remaining accepted changes.\n"
|
||||
"- Post-merge auto-remediation queues restore to reconcile tenant to merged baseline "
|
||||
"(if `AUTO_REMEDIATE_AFTER_MERGE=true`).\n\n"
|
||||
f"{marker}"
|
||||
).strip()
|
||||
|
||||
|
||||
def _create_reviewer_guide_thread(
|
||||
repo_api: str,
|
||||
pr_id: int,
|
||||
token: str,
|
||||
workload: str,
|
||||
) -> None:
|
||||
content = _build_full_reviewer_guide_thread_content(workload)
|
||||
_request_json(
|
||||
f"{repo_api}/pullrequests/{pr_id}/threads?api-version=7.1",
|
||||
token=token,
|
||||
method="POST",
|
||||
body={
|
||||
"comments": [
|
||||
{
|
||||
"parentCommentId": 0,
|
||||
"content": content,
|
||||
"commentType": 1,
|
||||
}
|
||||
],
|
||||
"status": THREAD_STATUS_ACTIVE,
|
||||
},
|
||||
)
|
||||
|
||||
|
||||
def _sync_reviewer_guide_thread(
|
||||
repo_api: str,
|
||||
pr_id: int,
|
||||
token: str,
|
||||
workload: str,
|
||||
) -> bool:
|
||||
marker = _reviewer_guide_thread_marker(workload)
|
||||
desired_content = _build_full_reviewer_guide_thread_content(workload)
|
||||
threads_payload = _request_json(
|
||||
f"{repo_api}/pullrequests/{pr_id}/threads?api-version=7.1",
|
||||
token=token,
|
||||
)
|
||||
threads = threads_payload.get("value", []) if isinstance(threads_payload, dict) else []
|
||||
thread = _find_marked_thread(threads, marker)
|
||||
if thread is None:
|
||||
_create_reviewer_guide_thread(repo_api, pr_id, token, workload)
|
||||
return True
|
||||
|
||||
comments = thread.get("comments", []) if isinstance(thread.get("comments"), list) else []
|
||||
if _thread_has_matching_comment(comments, desired_content):
|
||||
return False
|
||||
|
||||
thread_id = _thread_id(thread)
|
||||
if thread_id <= 0:
|
||||
_create_reviewer_guide_thread(repo_api, pr_id, token, workload)
|
||||
return True
|
||||
|
||||
if _is_thread_resolved(thread):
|
||||
_set_thread_status(repo_api, pr_id, thread_id, token, THREAD_STATUS_ACTIVE)
|
||||
_add_thread_comment(repo_api, pr_id, thread_id, token, desired_content)
|
||||
return True
|
||||
|
||||
|
||||
def _set_thread_status(
|
||||
repo_api: str,
|
||||
pr_id: int,
|
||||
@@ -2530,12 +2740,16 @@ def main() -> int:
|
||||
)
|
||||
|
||||
full_pr = _request_json(f"{repo_api}/pullrequests/{pr_id}?api-version=7.1", token=token)
|
||||
current_description = full_pr.get("description", "")
|
||||
current_description = full_pr.get("description") or ""
|
||||
pr_is_draft = bool(full_pr.get("isDraft"))
|
||||
existing_fingerprint = _existing_change_fingerprint(current_description)
|
||||
existing_summary_version = _existing_summary_version(current_description)
|
||||
current_auto_body = _auto_block_body(current_description)
|
||||
deterministic_already_present = deterministic in current_auto_body if current_auto_body else False
|
||||
compact_deterministic = _compact_deterministic_summary(deterministic)
|
||||
deterministic_already_present = (
|
||||
(deterministic in current_auto_body)
|
||||
or (compact_deterministic in current_auto_body)
|
||||
) if current_auto_body else False
|
||||
ai_fallback_in_current_block = _auto_block_contains_ai_fallback(current_auto_body)
|
||||
refresh_on_fallback = _env_bool("PR_AI_FORCE_REFRESH_ON_FALLBACK", default=True)
|
||||
if existing_fingerprint and existing_fingerprint == changes_fingerprint:
|
||||
@@ -2549,7 +2763,7 @@ def main() -> int:
|
||||
repo_api=repo_api,
|
||||
token=token,
|
||||
pr_id=int(pr_id),
|
||||
title=full_pr.get("title", pr.get("title", f"{args.workload} drift review (rolling)")),
|
||||
title=full_pr.get("title") or pr.get("title") or f"{args.workload} drift review (rolling)",
|
||||
description=current_description,
|
||||
is_draft=pr_is_draft,
|
||||
)
|
||||
@@ -2625,29 +2839,44 @@ def main() -> int:
|
||||
updated_description = _upsert_auto_block(current_description, auto_block)
|
||||
# Cleanup legacy description-based ticket checklist if present.
|
||||
updated_description = _remove_marked_block(updated_description, TICKET_BLOCK_START, TICKET_BLOCK_END)
|
||||
# Strip legacy long reviewer guide and ensure compact note is present.
|
||||
updated_description = _compact_reviewer_guide(updated_description)
|
||||
updated_description = _append_reviewer_guide_note(updated_description)
|
||||
|
||||
patch_url = f"{repo_api}/pullrequests/{pr_id}?api-version=7.1"
|
||||
patch_title = full_pr.get("title", pr.get("title", f"{args.workload} drift review (rolling)"))
|
||||
patch_title = full_pr.get("title") or pr.get("title") or f"{args.workload} drift review (rolling)"
|
||||
summary_updated = False
|
||||
final_description = current_description
|
||||
description_compacted = False
|
||||
print(
|
||||
f"DEBUG summary: pr_id={pr_id} workload={args.workload} "
|
||||
f"status={full_pr.get('status')} isDraft={full_pr.get('isDraft')} "
|
||||
f"mergeStatus={full_pr.get('mergeStatus')} title_len={len(patch_title)} "
|
||||
f"current_desc_len={len(current_description or '')} updated_desc_len={len(updated_description or '')}"
|
||||
)
|
||||
# Proactively compact if we are near the Azure DevOps PR description limit.
|
||||
if len(updated_description) > (ADO_PR_DESCRIPTION_MAX_LEN - 100):
|
||||
description_compacted = True
|
||||
|
||||
if updated_description != current_description:
|
||||
try:
|
||||
_request_json(
|
||||
patch_url,
|
||||
token=token,
|
||||
method="PATCH",
|
||||
body={
|
||||
"title": patch_title,
|
||||
"description": updated_description,
|
||||
},
|
||||
)
|
||||
summary_updated = True
|
||||
final_description = updated_description
|
||||
except RuntimeError as exc:
|
||||
if not _is_description_limit_error(exc):
|
||||
raise
|
||||
description_compacted = True
|
||||
if not description_compacted:
|
||||
try:
|
||||
_request_json(
|
||||
patch_url,
|
||||
token=token,
|
||||
method="PATCH",
|
||||
body={
|
||||
"title": patch_title,
|
||||
"description": updated_description,
|
||||
},
|
||||
)
|
||||
summary_updated = True
|
||||
final_description = updated_description
|
||||
except RuntimeError as exc:
|
||||
if not _is_description_limit_error(exc):
|
||||
raise
|
||||
description_compacted = True
|
||||
if description_compacted:
|
||||
compact_ai_block = ""
|
||||
if ai_summary:
|
||||
compact_ai_block = "\n### AI Reviewer Narrative\n" + COMPACT_AI_THREAD_NOTE
|
||||
@@ -2660,6 +2889,8 @@ def main() -> int:
|
||||
"",
|
||||
f"- **Summary Version:** `{AUTO_SUMMARY_VERSION}`",
|
||||
_compact_deterministic_summary(deterministic),
|
||||
"",
|
||||
COMPACT_DETERMINISTIC_THREAD_NOTE,
|
||||
compact_ai_block,
|
||||
AUTO_BLOCK_END,
|
||||
]
|
||||
@@ -2670,10 +2901,11 @@ def main() -> int:
|
||||
)
|
||||
if compact_description == updated_description:
|
||||
raise
|
||||
print(
|
||||
"WARNING: Full PR summary update failed; retrying with compact summary block. "
|
||||
f"Reason: {exc}"
|
||||
)
|
||||
if not summary_updated:
|
||||
print(
|
||||
"INFO: Full PR summary exceeds Azure DevOps description limit; "
|
||||
"using compact summary in description and posting full details to a PR thread."
|
||||
)
|
||||
try:
|
||||
_request_json(
|
||||
patch_url,
|
||||
@@ -2697,6 +2929,7 @@ def main() -> int:
|
||||
f"- **Summary Version:** `{AUTO_SUMMARY_VERSION}`",
|
||||
_compact_deterministic_summary(deterministic),
|
||||
"",
|
||||
COMPACT_DETERMINISTIC_THREAD_NOTE,
|
||||
COMPACT_AI_THREAD_NOTE,
|
||||
AUTO_BLOCK_END,
|
||||
]
|
||||
@@ -2720,6 +2953,34 @@ def main() -> int:
|
||||
else:
|
||||
final_description = updated_description
|
||||
|
||||
if description_compacted:
|
||||
try:
|
||||
thread_updated = _sync_deterministic_thread(
|
||||
repo_api=repo_api,
|
||||
pr_id=int(pr_id),
|
||||
token=token,
|
||||
workload=args.workload,
|
||||
deterministic_summary=deterministic,
|
||||
)
|
||||
if thread_updated:
|
||||
print(f"Updated full deterministic summary thread for PR #{pr_id} ({args.workload}).")
|
||||
else:
|
||||
print(f"Full deterministic summary thread already up to date for PR #{pr_id} ({args.workload}).")
|
||||
except Exception as exc:
|
||||
print(f"WARNING: Failed to sync full deterministic summary thread for PR #{pr_id}: {exc}")
|
||||
else:
|
||||
try:
|
||||
closed = _close_deterministic_thread(
|
||||
repo_api=repo_api,
|
||||
pr_id=int(pr_id),
|
||||
token=token,
|
||||
workload=args.workload,
|
||||
)
|
||||
if closed:
|
||||
print(f"Closed full deterministic summary thread for PR #{pr_id} ({args.workload}) because description now fits.")
|
||||
except Exception as exc:
|
||||
print(f"WARNING: Failed to close deterministic summary thread for PR #{pr_id}: {exc}")
|
||||
|
||||
if summary_updated:
|
||||
print(f"Updated automated review summary for PR #{pr_id} ({args.workload}).")
|
||||
else:
|
||||
@@ -2739,6 +3000,19 @@ def main() -> int:
|
||||
print(f"Full AI reviewer narrative thread already up to date for PR #{pr_id} ({args.workload}).")
|
||||
except Exception as exc:
|
||||
print(f"WARNING: Failed to sync full AI reviewer narrative thread for PR #{pr_id}: {exc}")
|
||||
try:
|
||||
guide_updated = _sync_reviewer_guide_thread(
|
||||
repo_api=repo_api,
|
||||
pr_id=int(pr_id),
|
||||
token=token,
|
||||
workload=args.workload,
|
||||
)
|
||||
if guide_updated:
|
||||
print(f"Updated reviewer guide thread for PR #{pr_id} ({args.workload}).")
|
||||
else:
|
||||
print(f"Reviewer guide thread already up to date for PR #{pr_id} ({args.workload}).")
|
||||
except Exception as exc:
|
||||
print(f"WARNING: Failed to sync reviewer guide thread for PR #{pr_id}: {exc}")
|
||||
if _publish_draft_pr(
|
||||
repo_api=repo_api,
|
||||
token=token,
|
||||
|
||||
Reference in New Issue
Block a user